当前位置: 首页>>代码示例>>Python>>正文


Python layers.optimize_loss方法代码示例

本文整理汇总了Python中tensorflow.contrib.layers.optimize_loss方法的典型用法代码示例。如果您正苦于以下问题:Python layers.optimize_loss方法的具体用法?Python layers.optimize_loss怎么用?Python layers.optimize_loss使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.layers的用法示例。


在下文中一共展示了layers.optimize_loss方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dnn_tanh

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def dnn_tanh(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    # Organize continues features.
    final_features = [tf.expand_dims(tf.cast(features[var], tf.float32), 1) for var in continues_vars]
    # Embed categorical variables into distributed representation.
    for var in categorical_vars:
        feature = learn.ops.categorical_variable(
            features[var + '_ids'], len(categorical_var_encoders[var].classes_), 
            embedding_size=CATEGORICAL_EMBED_SIZE, name=var)
        final_features.append(feature)
    # Concatenate all features into one vector.
    features = tf.concat(1, final_features)
    # Deep Neural Network
    logits = layers.stack(features, layers.fully_connected, [10, 20, 10],
        activation_fn=tf.tanh)
    prediction, loss = learn.models.logistic_regression(logits, target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD', learning_rate=0.05)
    return tf.argmax(prediction, dimension=1), loss, train_op 
开发者ID:ilblackdragon,项目名称:tf_examples,代码行数:21,代码来源:titanic_all_features_with_fc.py

示例2: __init__

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def __init__(self, hidden_size, batch_size, learning_rate):
        self.input_tensor = tf.placeholder(tf.float32, [None, 28 * 28])

        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=concat_elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope("model"):
                D1 = discriminator(self.input_tensor)  # positive examples
                D_params_num = len(tf.trainable_variables())
                G = decoder(tf.random_normal([batch_size, hidden_size]))
                self.sampled_tensor = G

            with tf.variable_scope("model", reuse=True):
                D2 = discriminator(G)  # generated examples

        D_loss = self.__get_discrinator_loss(D1, D2)
        G_loss = self.__get_generator_loss(D2)

        params = tf.trainable_variables()
        D_params = params[:D_params_num]
        G_params = params[D_params_num:]
        #    train_discrimator = optimizer.minimize(loss=D_loss, var_list=D_params)
        # train_generator = optimizer.minimize(loss=G_loss, var_list=G_params)
        global_step = tf.contrib.framework.get_or_create_global_step()
        self.train_discrimator = layers.optimize_loss(
            D_loss, global_step, learning_rate / 10, 'Adam', variables=D_params, update_ops=[])
        self.train_generator = layers.optimize_loss(
            G_loss, global_step, learning_rate, 'Adam', variables=G_params, update_ops=[])

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer()) 
开发者ID:ikostrikov,项目名称:TensorFlow-VAE-GAN-DRAW,代码行数:34,代码来源:gan.py

示例3: __init__

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def __init__(self, hidden_size, batch_size, learning_rate):
        self.input_tensor = tf.placeholder(
            tf.float32, [None, 28 * 28])

        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=tf.nn.elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope("model") as scope:
                encoded = encoder(self.input_tensor, hidden_size * 2)

                mean = encoded[:, :hidden_size]
                stddev = tf.sqrt(tf.exp(encoded[:, hidden_size:]))

                epsilon = tf.random_normal([tf.shape(mean)[0], hidden_size])
                input_sample = mean + epsilon * stddev

                output_tensor = decoder(input_sample)

            with tf.variable_scope("model", reuse=True) as scope:
                self.sampled_tensor = decoder(tf.random_normal(
                    [batch_size, hidden_size]))

        vae_loss = self.__get_vae_cost(mean, stddev)
        rec_loss = self.__get_reconstruction_cost(
            output_tensor, self.input_tensor)

        loss = vae_loss + rec_loss
        self.train = layers.optimize_loss(loss, tf.contrib.framework.get_or_create_global_step(
        ), learning_rate=learning_rate, optimizer='Adam', update_ops=[])

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer()) 
开发者ID:ikostrikov,项目名称:TensorFlow-VAE-GAN-DRAW,代码行数:35,代码来源:vae.py

示例4: build_train_graph

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def build_train_graph(loss, learning_rate=0.001, clip_norm=5.0):
    """
    builds training graph
    """
    train_args = {"learning_rate": learning_rate, "clip_norm": clip_norm}
    logger.debug("building training graph: %s.", train_args)

    learning_rate = tf.placeholder_with_default(learning_rate, [], "learning_rate")
    global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = layers.optimize_loss(loss, global_step, learning_rate, "Adam",
                                    clip_gradients=clip_norm)

    model = {"global_step": global_step, "train_op": train_op,
             "learning_rate": learning_rate, "train_args": train_args}
    return model 
开发者ID:yxtay,项目名称:char-rnn-text-generation,代码行数:17,代码来源:tf_model.py

示例5: conv_model

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def conv_model(feature, target, mode):
  """2-layer convolution model."""
  # Convert the target to a one-hot tensor of shape (batch_size, 10) and
  # with a on-value of 1 for each one-hot vector of length 10.
  target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)

  # Reshape feature to 4d tensor with 2nd and 3rd dimensions being
  # image width and height final dimension being the number of color channels.
  feature = tf.reshape(feature, [-1, 28, 28, 1])

  # First conv layer will compute 32 features for each 5x5 patch
  with tf.variable_scope('conv_layer1'):
    h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5],
                                 activation_fn=tf.nn.relu)
    h_pool1 = max_pool_2x2(h_conv1)

  # Second conv layer will compute 64 features for each 5x5 patch.
  with tf.variable_scope('conv_layer2'):
    h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5],
                                 activation_fn=tf.nn.relu)
    h_pool2 = max_pool_2x2(h_conv2)
    # reshape tensor into a batch of vectors
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

  # Densely connected layer with 1024 neurons.
  h_fc1 = layers.dropout(
      layers.fully_connected(
          h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5,
      is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)

  # Compute logits (1 per class) and compute loss.
  logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

  # Create a tensor for training op.
  train_op = layers.optimize_loss(
      loss, tf.contrib.framework.get_global_step(), optimizer='SGD',
      learning_rate=0.001)

  return tf.argmax(logits, 1), loss, train_op 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:42,代码来源:mnist.py

示例6: dnn_tanh

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def dnn_tanh(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    logits = layers.stack(features, layers.fully_connected, [10, 20, 10],
        activation_fn=tf.tanh)
    prediction, loss = learn.models.logistic_regression(logits, target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD', learning_rate=0.05)
    return tf.argmax(prediction, dimension=1), loss, train_op 
开发者ID:ilblackdragon,项目名称:tf_examples,代码行数:10,代码来源:titanic.py

示例7: categorical_model

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def categorical_model(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    features = learn.ops.categorical_variable(
        features, n_classes, embedding_size=EMBEDDING_SIZE, name='embarked')
    prediction, loss = learn.models.logistic_regression(tf.squeeze(features, [1]), target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD', learning_rate=0.05)
    return tf.argmax(prediction, dimension=1), loss, train_op 
开发者ID:ilblackdragon,项目名称:tf_examples,代码行数:10,代码来源:titanic_categorical_variables.py

示例8: one_hot_categorical_model

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def one_hot_categorical_model(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    features = tf.one_hot(features, n_classes, 1.0, 0.0)
    prediction, loss = learn.models.logistic_regression(
      tf.squeeze(features, [1]), target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD',
        learning_rate=0.01)
    return tf.argmax(prediction, dimension=1), loss, train_op 
开发者ID:ilblackdragon,项目名称:tf_examples,代码行数:11,代码来源:titanic_categorical_variables.py

示例9: conv_model

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def conv_model(features, target):
    target = tf.one_hot(target, 10, 1.0, 0.0)
    features = tf.expand_dims(features, 3)
    features = tf.reduce_max(layers.conv2d(features, 12, [3, 3]), [1, 2])
    features = tf.reshape(features, [-1, 12])
    prediction, loss = learn.models.logistic_regression(features, target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD',
        learning_rate=0.01)
    return tf.argmax(prediction, dimension=1), loss, train_op

# Create a classifier, train and predict. 
开发者ID:ilblackdragon,项目名称:tf_examples,代码行数:14,代码来源:digits.py

示例10: conv_learn

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import optimize_loss [as 别名]
def conv_learn(X, y, mode):
    # Ensure our images are 2d 
    X = tf.reshape(X, [-1, 36, 36, 1])
    # We'll need these in one-hot format
    y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0)

    # conv layer will compute 4 kernels for each 5x5 patch
    with tf.variable_scope('conv_layer'):
        # 5x5 convolution, pad with zeros on edges
        h1 = layers.convolution2d(X, num_outputs=4,
                kernel_size=[5, 5], 
                activation_fn=tf.nn.relu)
        # 2x2 Max pooling, no padding on edges
        p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1], padding='VALID')
                
        # Need to flatten conv output for use in dense layer
    p1_size = np.product(
              [s.value for s in p1.get_shape()[1:]])
    p1f = tf.reshape(p1, [-1, p1_size ])
    
    # densely connected layer with 32 neurons and dropout
    h_fc1 = layers.fully_connected(p1f,
             5,
             activation_fn=tf.nn.relu)
    drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
    
    logits = layers.fully_connected(drop, 5, activation_fn=None)
    loss = tf.losses.softmax_cross_entropy(y, logits)
    # Setup the training function manually
    train_op = layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adam',
        learning_rate=0.01)
    return tf.argmax(logits, 1), loss, train_op
# Use generic estimator with our function 
开发者ID:PacktPublishing,项目名称:Hands-On-Deep-Learning-with-TensorFlow,代码行数:39,代码来源:extracting_weights.py


注:本文中的tensorflow.contrib.layers.optimize_loss方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。