当前位置: 首页>>代码示例>>Python>>正文


Python layers.dropout函数代码示例

本文整理汇总了Python中tensorflow.contrib.layers.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了dropout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: model

def model():
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        X_expand = tf.expand_dims(X_pl, axis=2)
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        cell_fw = tf.nn.rnn_cell.GRUCell(205)
        cell_bw = tf.nn.rnn_cell.GRUCell(205)
        seq_len = tf.reduce_sum(tf.ones(tf.shape(X_pl), dtype=tf.int32), axis=1)
        _, enc_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
            cell_bw=cell_bw, inputs=X_expand, sequence_length=seq_len,
            dtype=tf.float32)
        enc_states = tf.concat(1, enc_states)
        enc_states_drop = dropout(enc_states, is_training=is_training_pl) 
        l1 = fully_connected(enc_states_drop, 200, activation_fn=None)
        l1 = batch_norm(l1, is_training=is_training_pl)
        l1_relu = relu(l1)
        l1_dropout = dropout(l1_relu, is_training=is_training_pl)
        l2 = fully_connected(l1_dropout, 200, activation_fn=None)
        l2 = batch_norm(l2, is_training=is_training_pl)
        l2_relu = relu(l2)
        l_out = fully_connected(l2_relu, num_outputs=num_classes, activation_fn=None)
        l_out_softmax = tf.nn.softmax(l_out)
        tf.contrib.layers.summarize_variables()

    with tf.variable_scope('metrics'):
        loss = sparse_softmax_cross_entropy_with_logits(l_out, t_pl)
        print("loss", loss.get_shape())
        loss = tf.reduce_mean(loss)
        print("loss", loss.get_shape())
        tf.summary.scalar('train/loss', loss)
        argmax = tf.to_int32(tf.argmax(l_out, 1))
        print("argmax", argmax.get_shape())
        correct = tf.to_float(tf.equal(argmax, t_pl))
        print("correct,", correct.get_shape())
        accuracy = tf.reduce_mean(correct)
        print("accuracy", accuracy.get_shape())

    with tf.variable_scope('optimizer'):
        print("building optimizer ...")
        global_step = tf.Variable(0, name='global_step', trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        grads_and_vars = optimizer.compute_gradients(loss)
        gradients, variables = zip(*grads_and_vars)
        clipped_gradients, global_norm = (
            tf.clip_by_global_norm(gradients, clip_norm))
        clipped_grads_and_vars = zip(clipped_gradients, variables)

        tf.summary.scalar('train/global_gradient_norm', global_norm)

        train_op = optimizer.apply_gradients(clipped_grads_and_vars, global_step=global_step)

    return X_pl, t_pl, is_training_pl, l_out, l_out_softmax, loss, accuracy, train_op, global_step
开发者ID:alrojo,项目名称:EEG_DauwelsLab,代码行数:57,代码来源:rnn_big2.py

示例2: define_sequence_model

 def define_sequence_model(self):
     seed=12345
     np.random.seed(12345)
     layer_list=[]
     with self.graph.as_default() as g:
         utt_length=tf.placeholder(tf.int32,shape=(None))
         g.add_to_collection(name="utt_length",value=utt_length)
         with tf.name_scope("input"):
              input_layer=tf.placeholder(dtype=tf.float32,shape=(None,None,self.n_in),name="input_layer")
              if self.dropout_rate!=0.0:
                 print "Using dropout to avoid overfitting and the dropout rate is",self.dropout_rate
                 is_training_drop=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_drop")
                 input_layer_drop=dropout(input_layer,self.dropout_rate,is_training=is_training_drop)
                 layer_list.append(input_layer_drop)
                 g.add_to_collection(name="is_training_drop",value=is_training_drop)
              else:
                 layer_list.append(input_layer)
         g.add_to_collection("input_layer",layer_list[0])
         with tf.name_scope("hidden_layer"):
            basic_cell=[]
            if "tanh" in self.hidden_layer_type:
                is_training_batch=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_batch")
                bn_params={"is_training":is_training_batch,"decay":0.99,"updates_collections":None}
                g.add_to_collection("is_training_batch",is_training_batch)
            for i in xrange(len(self.hidden_layer_type)):
                if self.dropout_rate!=0.0:
                    if self.hidden_layer_type[i]=="tanh":
                        new_layer=fully_connected(layer_list[-1],self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,normalizer_params=bn_params)
                        new_layer_drop=dropout(new_layer,self.dropout_rate,is_training=is_training_drop)
                        layer_list.append(new_layer_drop)
                    if self.hidden_layer_type[i]=="lstm":
                        basic_cell.append(MyDropoutWrapper(BasicLSTMCell(num_units=self.hidden_layer_size[i]),self.dropout_rate,self.dropout_rate,is_training=is_training_drop))
                    if self.hidden_layer_type[i]=="gru":
                        basic_cell.append(MyDropoutWrapper(GRUCell(num_units=self.hidden_layer_size[i]),self.dropout_rate,self.dropout_rate,is_training=is_training_drop))
                else:
                    if self.hidden_layer_type[i]=="tanh":
                       new_layer=fully_connected(layer_list[-1],self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,normalizer_params=bn_params)
                       layer_list.append(new_layer)
                    if self.hidden_layer_type[i]=="lstm":
                       basic_cell.append(LayerNormBasicLSTMCell(num_units=self.hidden_layer_size[i]))
                    if self.hidden_layer_type[i]=="gru":
                       basic_cell.append(LayerNormGRUCell(num_units=self.hidden_layer_size[i]))
            multi_cell=MultiRNNCell(basic_cell)
            rnn_outputs,rnn_states=tf.nn.dynamic_rnn(multi_cell,layer_list[-1],dtype=tf.float32,sequence_length=utt_length)
            layer_list.append(rnn_outputs)
         with tf.name_scope("output_layer"):
              if self.output_type=="linear" :
                  output_layer=tf.layers.dense(rnn_outputs,self.n_out)
               #  stacked_rnn_outputs=tf.reshape(rnn_outputs,[-1,self.n_out])
               #  stacked_outputs=tf.layers.dense(stacked_rnn_outputs,self.n_out)
               #  output_layer=tf.reshape(stacked_outputs,[-1,utt_length,self.n_out])
              g.add_to_collection(name="output_layer",value=output_layer)
         with tf.name_scope("training_op"):
              if self.optimizer=="adam":
                  self.training_op=tf.train.AdamOptimizer()
开发者ID:CSTR-Edinburgh,项目名称:merlin,代码行数:55,代码来源:model.py

示例3: _init_body

  def _init_body(self, scope):
    with tf.variable_scope(scope):

      word_level_inputs = tf.reshape(self.inputs_embedded, [
        self.document_size * self.sentence_size,
        self.word_size,
        self.embedding_size
      ])
      word_level_lengths = tf.reshape(
        self.word_lengths, [self.document_size * self.sentence_size])

      with tf.variable_scope('word') as scope:
        word_encoder_output, _ = bidirectional_rnn(
          self.word_cell, self.word_cell,
          word_level_inputs, word_level_lengths,
          scope=scope)

        with tf.variable_scope('attention') as scope:
          word_level_output = task_specific_attention(
            word_encoder_output,
            self.word_output_size,
            scope=scope)

        with tf.variable_scope('dropout'):
          word_level_output = layers.dropout(
            word_level_output, keep_prob=self.dropout_keep_proba,
            is_training=self.is_training,
          )

      # sentence_level

      sentence_inputs = tf.reshape(
        word_level_output, [self.document_size, self.sentence_size, self.word_output_size])

      with tf.variable_scope('sentence') as scope:
        sentence_encoder_output, _ = bidirectional_rnn(
          self.sentence_cell, self.sentence_cell, sentence_inputs, self.sentence_lengths, scope=scope)

        with tf.variable_scope('attention') as scope:
          sentence_level_output = task_specific_attention(
            sentence_encoder_output, self.sentence_output_size, scope=scope)

        with tf.variable_scope('dropout'):
          sentence_level_output = layers.dropout(
            sentence_level_output, keep_prob=self.dropout_keep_proba,
            is_training=self.is_training,
          )

      with tf.variable_scope('classifier'):
        self.logits = layers.fully_connected(
          sentence_level_output, self.classes, activation_fn=None)

        self.prediction = tf.argmax(self.logits, axis=-1)
开发者ID:siddrtm,项目名称:hierarchical-attention-networks,代码行数:53,代码来源:HAN_model.py

示例4: dnn_logits_fn

 def dnn_logits_fn():
   """Builds the logits from the input layer."""
   previous_layer = input_layer
   for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
     with variable_scope.variable_scope(
         "hiddenlayer_%d" % layer_id,
         values=(previous_layer,)) as hidden_layer_scope:
       net = layers.fully_connected(
           previous_layer,
           num_hidden_units,
           activation_fn=dnn_activation_fn,
           variables_collections=[dnn_parent_scope],
           scope=hidden_layer_scope)
       if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
         net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
     _add_hidden_layer_summary(net, hidden_layer_scope.name)
     previous_layer = net
   with variable_scope.variable_scope(
       "logits", values=(previous_layer,)) as logits_scope:
     dnn_logits = layers.fully_connected(
         previous_layer,
         head.logits_dimension,
         activation_fn=None,
         variables_collections=[dnn_parent_scope],
         scope=logits_scope)
   _add_hidden_layer_summary(dnn_logits, logits_scope.name)
   return dnn_logits
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:dnn_tree_combined_estimator.py

示例5: conv_model

def conv_model(X, Y_, mode):
    XX = tf.reshape(X, [-1, 28, 28, 1])
    biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
    Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6], biases_initializer=biasInit)
    Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
    Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
    Y4 = layers.flatten(Y3)
    Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
    # to deactivate dropout on the dense layer, set keep_prob=1
    Y5d = layers.dropout(Y5, keep_prob=0.75, noise_shape=None, is_training=mode==learn.ModeKeys.TRAIN)
    Ylogits = layers.linear(Y5d, 10)
    predict = tf.nn.softmax(Ylogits)
    classes = tf.cast(tf.argmax(predict, 1), tf.uint8)

    loss = conv_model_loss(Ylogits, Y_, mode)
    train_op = conv_model_train_op(loss, mode)
    eval_metrics = conv_model_eval_metrics(classes, Y_, mode)

    return learn.ModelFnOps(
        mode=mode,
        # You can name the fields of your predictions dictionary as you like.
        predictions={"predictions": predict, "classes": classes},
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metrics
    )
开发者ID:spwcd,项目名称:QTML,代码行数:26,代码来源:task.py

示例6: model_fn

def model_fn(x, target, mode, params):
    """Model function for Estimator."""

    y_ = tf.cast(target, tf.float32)

    x_image = tf.reshape(x, [-1, 28, 28, 1])

    # first convolutional layer
    h_conv1 = layers.convolution2d(x_image, 32, [5,5])
    h_pool1 = layers.max_pool2d(h_conv1, [2,2])

    # second convolutional layer
    h_conv2 = layers.convolution2d(h_pool1, 64, [5,5])
    h_pool2 = layers.max_pool2d(h_conv2, [2,2])

    # densely connected layer
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
    h_fc1 = layers.fully_connected(h_pool2_flat, 1024)
    h_fc1_drop = layers.dropout(
        h_fc1, keep_prob=params["dropout"],
        is_training=(mode == ModeKeys.TRAIN))

    # readout layer
    y_conv = layers.fully_connected(h_fc1_drop, 10, activation_fn=None)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
    train_op = tf.contrib.layers.optimize_loss(
        loss=cross_entropy,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=params["learning_rate"],
        optimizer="Adam")

    predictions = tf.argmax(y_conv, 1)
    return predictions, cross_entropy, train_op
开发者ID:ccortezb,项目名称:pipeline,代码行数:35,代码来源:mnist_cnn_estim_layers.py

示例7: _dnn_logits

 def _dnn_logits(self, features, is_training=False):
   net = layers.input_from_feature_columns(
       features,
       self._get_dnn_feature_columns(),
       weight_collections=[self._dnn_weight_collection])
   for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
     net = layers.legacy_fully_connected(
         net,
         num_hidden_units,
         activation_fn=self._dnn_activation_fn,
         weight_collections=[self._dnn_weight_collection],
         bias_collections=[self._dnn_weight_collection],
         name="hiddenlayer_%d" % layer_id)
     if self._dnn_dropout is not None and is_training:
       net = layers.dropout(
           net,
           keep_prob=(1.0 - self._dnn_dropout))
     self._add_hidden_layer_summary(net, "hiddenlayer_%d" % layer_id)
   logit = layers.legacy_fully_connected(
       net,
       self._num_label_columns(),
       weight_collections=[self._dnn_weight_collection],
       bias_collections=[self._dnn_weight_collection],
       name="dnn_logit")
   self._add_hidden_layer_summary(logit, "dnn_logit")
   return logit
开发者ID:Ambier,项目名称:tensorflow,代码行数:26,代码来源:dnn_linear_combined.py

示例8: define_feedforward_model

 def define_feedforward_model(self):
     layer_list=[]
     with self.graph.as_default() as g:
         is_training_batch=tf.placeholder(tf.bool,shape=(),name="is_training_batch")
         bn_params={"is_training":is_training_batch,"decay":0.99,"updates_collections":None}
         g.add_to_collection("is_training_batch",is_training_batch)
         with tf.name_scope("input"):
             input_layer=tf.placeholder(dtype=tf.float32,shape=(None,self.n_in),name="input_layer")
             if self.dropout_rate!=0.0:
                print "Using dropout to avoid overfitting and the dropout rate is",self.dropout_rate
                is_training_drop=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_drop")
                input_layer_drop=dropout(input_layer,self.dropout_rate,is_training=is_training_drop)
                layer_list.append(input_layer_drop)
                g.add_to_collection(name="is_training_drop",value=is_training_drop)
             else:
                layer_list.append(input_layer)
         g.add_to_collection("input_layer",layer_list[0])
         for i in xrange(len(self.hidden_layer_size)):
             with tf.name_scope("hidden_layer_"+str(i+1)):
               if self.dropout_rate!=0.0:
                   last_layer=layer_list[-1]
                   if self.hidden_layer_type[i]=="tanh":
                      new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,\
                                 normalizer_params=bn_params)
                   if self.hidden_layer_type[i]=="sigmoid":
                       new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.sigmoid,normalizer_fn=batch_norm,\
                                 normalizer_params=bn_params)
                   new_layer_drop=dropout(new_layer,self.dropout_rate,is_training=is_training_drop)
                   layer_list.append(new_layer_drop)
               else:
                   last_layer=layer_list[-1]
                   if self.hidden_layer_type[i]=="tanh":
                      new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,\
                                normalizer_params=bn_params)
                   if self.hidden_layer_type[i]=="sigmoid":
                      new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.sigmoid,normalizer_fn=batch_norm,\
                                normalizer_params=bn_params)
                   layer_list.append(new_layer)
         with tf.name_scope("output_layer"):
             if self.output_type=="linear":
                output_layer=fully_connected(layer_list[-1],self.n_out,activation_fn=None)
             if self.output_type=="tanh":
                output_layer=fully_connected(layer_list[-1],self.n_out,activation_fn=tf.nn.tanh)
             g.add_to_collection(name="output_layer",value=output_layer)
         with tf.name_scope("training_op"):
              if self.optimizer=="adam":
                 self.training_op=tf.train.AdamOptimizer()
开发者ID:CSTR-Edinburgh,项目名称:merlin,代码行数:47,代码来源:model.py

示例9: build_model

  def build_model(self, features, feature_columns, is_training):
    """See base class."""
    self._feature_columns = feature_columns

    input_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas,
            min_slice_size=64 << 20))
    with variable_scope.variable_scope(
        self._scope + "/input_from_feature_columns",
        values=features.values(),
        partitioner=input_layer_partitioner) as scope:
      net = layers.input_from_feature_columns(
          features,
          self._get_feature_columns(),
          weight_collections=[self._scope],
          trainable=self._trainable,
          scope=scope)

    hidden_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas))
    for layer_id, num_hidden_units in enumerate(self._hidden_units):
      with variable_scope.variable_scope(
          self._scope + "/hiddenlayer_%d" % layer_id,
          values=[net],
          partitioner=hidden_layer_partitioner) as scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=self._activation_fn,
            variables_collections=[self._scope],
            trainable=self._trainable,
            scope=scope)
        if self._dropout is not None and is_training:
          net = layers.dropout(
              net,
              keep_prob=(1.0 - self._dropout))
      self._add_hidden_layer_summary(net, scope.name)

    with variable_scope.variable_scope(
        self._scope + "/logits",
        values=[net],
        partitioner=hidden_layer_partitioner) as scope:
      logits = layers.fully_connected(
          net,
          self._num_label_columns,
          activation_fn=None,
          variables_collections=[self._scope],
          trainable=self._trainable,
          scope=scope)
    self._add_hidden_layer_summary(logits, "logits")
    return logits
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:53,代码来源:composable_model.py

示例10: model

def model():
    tf.set_random_seed(1)
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        X_bn = batch_norm(X_pl, is_training=is_training_pl)
        print("X_bn", X_bn.get_shape())
        l1 = fully_connected(X_pl, num_outputs=100, activation_fn=relu)#, normalizer_fn=batch_norm)
        print("l1", l1.get_shape())
        l1_drop = dropout(l1, is_training=is_training_pl)
        print("l1_drop", l1_drop.get_shape())
        l_out = fully_connected(l1_drop, num_outputs=num_classes, activation_fn=None)
        print("l_out", l_out.get_shape())
        l_out_softmax = tf.nn.softmax(l_out)
        tf.contrib.layers.summarize_variables()

    with tf.variable_scope('metrics'):
        loss = sparse_softmax_cross_entropy_with_logits(l_out, t_pl)
        print("loss", loss.get_shape())
        loss = tf.reduce_mean(loss)
        print("loss", loss.get_shape())
        tf.summary.scalar('train/loss', loss)
        argmax = tf.to_int32(tf.argmax(l_out, 1))
        print("argmax", argmax.get_shape())
        correct = tf.to_float(tf.equal(argmax, t_pl))
        print("correct,", correct.get_shape())
        accuracy = tf.reduce_mean(correct)
        print("accuracy", accuracy.get_shape())

    with tf.variable_scope('optimizer'):
        print("building optimizer ...")
        global_step = tf.Variable(0, name='global_step', trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        grads_and_vars = optimizer.compute_gradients(loss)
        gradients, variables = zip(*grads_and_vars)
        clipped_gradients, global_norm = (
            tf.clip_by_global_norm(gradients, clip_norm))
        clipped_grads_and_vars = zip(clipped_gradients, variables)

        tf.summary.scalar('train/global_gradient_norm', global_norm)

        train_op = optimizer.apply_gradients(clipped_grads_and_vars, global_step=global_step)

    return X_pl, t_pl, is_training_pl, l_out, l_out_softmax, loss, accuracy, train_op, global_step
开发者ID:alrojo,项目名称:EEG_DauwelsLab,代码行数:49,代码来源:mlp.py

示例11: general_module_end_operations

    def general_module_end_operations(self, tensor, dropout_on, strided_max_pool_on):
        """
        Common end of module operations.

        :param tensor: The tensor being processed.
        :type tensor: tf.Tensor
        :param dropout_on: Whether to include dropout or not.
        :type dropout_on: bool
        :param strided_max_pool_on: Whether to include a strided max pool at the end of the module.
        :type strided_max_pool_on: bool
        :return: The processed tensor.
        :rtype: tf.Tensor
        """
        if strided_max_pool_on:
            tensor = max_pool2d(tensor, kernel_size=3, stride=2, padding='VALID')
        if dropout_on:
            tensor = dropout(tensor, self.dropout_keep_probability_tensor)
        return tensor
开发者ID:golmschenk,项目名称:go_net,代码行数:18,代码来源:net.py

示例12: conv_model

def conv_model(feature, target, mode):
  """2-layer convolution model."""
  # Convert the target to a one-hot tensor of shape (batch_size, 10) and
  # with a on-value of 1 for each one-hot vector of length 10.
  target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)

  # Reshape feature to 4d tensor with 2nd and 3rd dimensions being
  # image width and height final dimension being the number of color channels.
  feature = tf.reshape(feature, [-1, 28, 28, 1])

  # First conv layer will compute 32 features for each 5x5 patch
  with tf.variable_scope('conv_layer1'):
    h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5],
                                 activation_fn=tf.nn.relu)
    h_pool1 = max_pool_2x2(h_conv1)

  # Second conv layer will compute 64 features for each 5x5 patch.
  with tf.variable_scope('conv_layer2'):
    h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5],
                                 activation_fn=tf.nn.relu)
    h_pool2 = max_pool_2x2(h_conv2)
    # reshape tensor into a batch of vectors
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

  # Densely connected layer with 1024 neurons.
  h_fc1 = layers.dropout(
      layers.fully_connected(
          h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5,
      is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)

  # Compute logits (1 per class) and compute loss.
  logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

  # Create a tensor for training op.
  train_op = layers.optimize_loss(
      loss, tf.contrib.framework.get_global_step(), optimizer='SGD',
      learning_rate=0.001)

  return tf.argmax(logits, 1), loss, train_op
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:40,代码来源:mnist.py

示例13: build_model

  def build_model(self, features, feature_columns, is_training):
    """See base class."""
    features = self._get_feature_dict(features)
    self._feature_columns = feature_columns

    net = layers.input_from_feature_columns(
        features,
        self._get_feature_columns(),
        weight_collections=[self._weight_collection_name])
    for layer_id, num_hidden_units in enumerate(self._hidden_units):
      with variable_scope.variable_op_scope(
          [net], "hiddenlayer_%d" % layer_id,
          partitioner=partitioned_variables.min_max_variable_partitioner(
              max_partitions=self._config.num_ps_replicas)) as scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=self._activation_fn,
            variables_collections=[self._weight_collection_name],
            scope=scope)
        if self._dropout is not None and is_training:
          net = layers.dropout(
              net,
              keep_prob=(1.0 - self._dropout))
      self._add_hidden_layer_summary(net, scope.name)
    with variable_scope.variable_op_scope(
        [net], "dnn_logits",
        partitioner=partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._config.num_ps_replicas)) as scope:
      logits = layers.fully_connected(
          net,
          self._num_label_columns,
          activation_fn=None,
          variables_collections=[self._weight_collection_name],
          scope=scope)
    self._add_hidden_layer_summary(logits, "dnn_logits")
    return logits
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:37,代码来源:dnn_linear_combined.py

示例14: _dnn_logits

 def _dnn_logits(self, features, is_training=False):
     net = layers.input_from_feature_columns(
         features, self._get_dnn_feature_columns(), weight_collections=[self._dnn_weight_collection]
     )
     for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
         with variable_scope.variable_op_scope(
             [net],
             "hiddenlayer_%d" % layer_id,
             partitioner=partitioned_variables.min_max_variable_partitioner(
                 max_partitions=self._config.num_ps_replicas
             ),
         ) as scope:
             net = layers.fully_connected(
                 net,
                 num_hidden_units,
                 activation_fn=self._dnn_activation_fn,
                 variables_collections=[self._dnn_weight_collection],
                 scope=scope,
             )
             if self._dnn_dropout is not None and is_training:
                 net = layers.dropout(net, keep_prob=(1.0 - self._dnn_dropout))
         self._add_hidden_layer_summary(net, scope.name)
     with variable_scope.variable_op_scope(
         [net],
         "dnn_logit",
         partitioner=partitioned_variables.min_max_variable_partitioner(max_partitions=self._config.num_ps_replicas),
     ) as scope:
         logit = layers.fully_connected(
             net,
             self._target_column.num_label_columns,
             activation_fn=None,
             variables_collections=[self._dnn_weight_collection],
             scope=scope,
         )
     self._add_hidden_layer_summary(logit, "dnn_logit")
     return logit
开发者ID:285219011,项目名称:liuwenfeng,代码行数:36,代码来源:dnn_linear_combined.py

示例15: _dnn_model_fn

def _dnn_model_fn(features, labels, mode, params, config=None):
  """Deep Neural Net model_fn.

  Args:
    features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
    labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
      dtype `int32` or `int64` in the range `[0, n_classes)`.
    mode: Defines whether this is training, evaluation or prediction.
      See `ModeKeys`.
    params: A dict of hyperparameters.
      The following hyperparameters are expected:
      * head: A `_Head` instance.
      * hidden_units: List of hidden units per layer.
      * feature_columns: An iterable containing all the feature columns used by
          the model.
      * optimizer: string, `Optimizer` object, or callable that defines the
          optimizer to use for training. If `None`, will use the Adagrad
          optimizer with a default learning rate of 0.05.
      * activation_fn: Activation function applied to each layer. If `None`,
          will use `tf.nn.relu`.
      * dropout: When not `None`, the probability we will drop out a given
          coordinate.
      * gradient_clip_norm: A float > 0. If provided, gradients are
          clipped to their global norm with this clipping ratio.
      * embedding_lr_multipliers: Optional. A dictionary from
          `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
          multiply with learning rate for the embedding variables.
      * input_layer_min_slice_size: Optional. The min slice size of input layer
          partitions. If not provided, will use the default of 64M.
    config: `RunConfig` object to configure the runtime settings.

  Returns:
    predictions: A dict of `Tensor` objects.
    loss: A scalar containing the loss of the step.
    train_op: The op for training.
  """
  head = params["head"]
  hidden_units = params["hidden_units"]
  feature_columns = params["feature_columns"]
  optimizer = params.get("optimizer") or "Adagrad"
  activation_fn = params.get("activation_fn")
  dropout = params.get("dropout")
  gradient_clip_norm = params.get("gradient_clip_norm")
  input_layer_min_slice_size = (
      params.get("input_layer_min_slice_size") or 64 << 20)
  num_ps_replicas = config.num_ps_replicas if config else 0
  embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})

  features = _get_feature_dict(features)
  parent_scope = "dnn"

  partitioner = partitioned_variables.min_max_variable_partitioner(
      max_partitions=num_ps_replicas)
  with variable_scope.variable_scope(
      parent_scope,
      values=tuple(six.itervalues(features)),
      partitioner=partitioner):
    input_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=num_ps_replicas,
            min_slice_size=input_layer_min_slice_size))
    with variable_scope.variable_scope(
        "input_from_feature_columns",
        values=tuple(six.itervalues(features)),
        partitioner=input_layer_partitioner) as input_layer_scope:
      if all([
          isinstance(fc, feature_column._FeatureColumn)  # pylint: disable=protected-access
          for fc in feature_columns
      ]):
        net = layers.input_from_feature_columns(
            columns_to_tensors=features,
            feature_columns=feature_columns,
            weight_collections=[parent_scope],
            scope=input_layer_scope)
      else:
        net = fc_core.input_layer(
            features=features,
            feature_columns=feature_columns,
            weight_collections=[parent_scope])

    for layer_id, num_hidden_units in enumerate(hidden_units):
      with variable_scope.variable_scope(
          "hiddenlayer_%d" % layer_id,
          values=(net,)) as hidden_layer_scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=activation_fn,
            variables_collections=[parent_scope],
            scope=hidden_layer_scope)
        if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
          net = layers.dropout(net, keep_prob=(1.0 - dropout))
      _add_hidden_layer_summary(net, hidden_layer_scope.name)

    with variable_scope.variable_scope(
        "logits",
        values=(net,)) as logits_scope:
      logits = layers.fully_connected(
          net,
          head.logits_dimension,
#.........这里部分代码省略.........
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:101,代码来源:dnn.py


注:本文中的tensorflow.contrib.layers.dropout函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。