當前位置: 首頁>>代碼示例>>Python>>正文


Python prettytensor.defaults_scope方法代碼示例

本文整理匯總了Python中prettytensor.defaults_scope方法的典型用法代碼示例。如果您正苦於以下問題:Python prettytensor.defaults_scope方法的具體用法?Python prettytensor.defaults_scope怎麽用?Python prettytensor.defaults_scope使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在prettytensor的用法示例。


在下文中一共展示了prettytensor.defaults_scope方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_model

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def build_model(sess, embedding_dim, batch_size):
    model = CondGAN(
        lr_imsize=cfg.TEST.LR_IMSIZE,
        hr_lr_ratio=int(cfg.TEST.HR_IMSIZE/cfg.TEST.LR_IMSIZE))

    embeddings = tf.placeholder(
        tf.float32, [batch_size, embedding_dim],
        name='conditional_embeddings')
    with pt.defaults_scope(phase=pt.Phase.test):
        with tf.variable_scope("g_net"):
            c = sample_encoded_context(embeddings, model)
            z = tf.random_normal([batch_size, cfg.Z_DIM])
            fake_images = model.get_generator(tf.concat(1, [c, z]))
        with tf.variable_scope("hr_g_net"):
            hr_c = sample_encoded_context(embeddings, model)
            hr_fake_images = model.hr_get_generator(fake_images, hr_c)

    ckt_path = cfg.TEST.PRETRAINED_MODEL
    if ckt_path.find('.ckpt') != -1:
        print("Reading model parameters from %s" % ckt_path)
        saver = tf.train.Saver(tf.all_variables())
        saver.restore(sess, ckt_path)
    else:
        print("Input a valid model path.")
    return embeddings, fake_images, hr_fake_images 
開發者ID:hanzhanggit,項目名稱:StackGAN,代碼行數:27,代碼來源:demo.py

示例2: image2feature

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def image2feature(self, image_tensor):

        if self.patch_feature_dim == 0:
            return None

        hgd = [
            {"type": "conv2d", "depth": 32, "decoder_depth": 64},
            {"type": "conv2d", "depth": 64, "decoder_depth": 64},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 40 x 40
            {"type": "conv2d", "depth": 128},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 20x20
            {"type": "conv2d", "depth": 256},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 10x10
            {"type": "conv2d", "depth": 512},
        ]

        with pt.defaults_scope(**self.pt_defaults_scope_value()):
            feature_map = hourglass(
                image_tensor, hgd,
                net_type=self.options["hourglass_type"] if "hourglass_type" in self.options else None
            )
        return feature_map 
開發者ID:YutingZhang,項目名稱:lmdis-rep,代碼行數:27,代碼來源:general_64x64.py

示例3: image2feature

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def image2feature(self, image_tensor):

        if self.patch_feature_dim == 0:
            return None
        
        hgd = [
            {"type": "conv2d", "depth": 32, "decoder_depth": 64},
            {"type": "conv2d", "depth": 64, "decoder_depth": 64},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 40 x 40
            {"type": "conv2d", "depth": 128},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 20x20
            {"type": "conv2d", "depth": 256},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 10x10
            {"type": "conv2d", "depth": 512},
        ]

        with pt.defaults_scope(**self.pt_defaults_scope_value()):
            feature_map = hourglass(
                image_tensor, hgd,
                net_type=self.options["hourglass_type"] if "hourglass_type" in self.options else None
            )
        return feature_map 
開發者ID:YutingZhang,項目名稱:lmdis-rep,代碼行數:27,代碼來源:general_80x80.py

示例4: encoder

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def encoder(self, inputs, latent_size, activ=tf.nn.elu, phase=pt.Phase.train):
        with pt.defaults_scope(activation_fn=activ,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True,
                               phase=phase):
            params = (pt.wrap(inputs).
                      reshape([-1, self.input_shape[0], self.input_shape[1], 1]).
                      conv2d(5, 32, stride=2).
                      conv2d(5, 64, stride=2).
                      conv2d(5, 128, edges='VALID').
                      #dropout(0.9).
                      flatten().
                      fully_connected(self.latent_size * 2, activation_fn=None)).tensor

        mean = params[:, :self.latent_size]
        stddev = params[:, self.latent_size:]
        return [mean, stddev] 
開發者ID:jramapuram,項目名稱:CVAE,代碼行數:21,代碼來源:cvae.py

示例5: main_network

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def main_network(images, training):
    x_pretty = pt.wrap(images)
    if training:
        phase = pt.Phase.train
    else:
        phase = pt.Phase.infer
    with pt.defaults_scope(activation_fn=tf.nn.relu, phase=phase):
        y_pred, loss = x_pretty.\
        conv2d(kernel=5, depth=64, name="layer_conv1", batch_normalize=True).\
        max_pool(kernel=2, stride=2).\
        conv2d(kernel=5, depth=64, name="layer_conv2").\
        max_pool(kernel=2, stride=2).\
        flatten().\
        fully_connected(size=256, name="layer_fc1").\
        fully_connected(size=128, name="layer_fc2").\
        softmax_classifier(num_classes, labels=y_true)
    return y_pred, loss 
開發者ID:lawlite19,項目名稱:MachineLearning_TensorFlow,代碼行數:19,代碼來源:cnn_for_CIFAR-10.py

示例6: init_opt

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def init_opt(self):
        self.build_placeholder()

        with pt.defaults_scope(phase=pt.Phase.train):
            with tf.variable_scope("g_net"):
                # ####get output from G network################################
                c, kl_loss = self.sample_encoded_context(self.embeddings)
                z = tf.random_normal([self.batch_size, cfg.Z_DIM])
                self.log_vars.append(("hist_c", c))
                self.log_vars.append(("hist_z", z))
                fake_images = self.model.get_generator(tf.concat(1, [c, z]))

            # ####get discriminator_loss and generator_loss ###################
            discriminator_loss, generator_loss =\
                self.compute_losses(self.images,
                                    self.wrong_images,
                                    fake_images,
                                    self.embeddings)
            generator_loss += kl_loss
            self.log_vars.append(("g_loss_kl_loss", kl_loss))
            self.log_vars.append(("g_loss", generator_loss))
            self.log_vars.append(("d_loss", discriminator_loss))

            # #######Total loss for build optimizers###########################
            self.prepare_trainer(generator_loss, discriminator_loss)
            # #######define self.g_sum, self.d_sum,....########################
            self.define_summaries()

        with pt.defaults_scope(phase=pt.Phase.test):
            with tf.variable_scope("g_net", reuse=True):
                self.sampler()
            self.visualization(cfg.TRAIN.NUM_COPY)
            print("success") 
開發者ID:hanzhanggit,項目名稱:StackGAN,代碼行數:35,代碼來源:trainer.py

示例7: _make_encoder_template

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def _make_encoder_template(self):
        defaults_scope = {
            'phase': pt.UnboundVariable('phase', default=pt.Phase.train),
            'scale_after_normalization': True,
            }
        with pt.defaults_scope(**defaults_scope):
          with tf.variable_scope("encoder"):
            if self.network_type=="fully-connected":
                z_dim = self.latent_dist.dist_flat_dim
                self.encoder_template = (pt.template("x_in").
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(z_dim))

            elif self.network_type=="convolutional":
                z_dim = self.latent_dist.dist_flat_dim
                self.encoder_template = (pt.template("x_in").
                                         reshape([-1] + list(self.image_shape)).
                                         custom_conv2d(64, k_h=4, k_w=4).
                                         apply(tf.nn.elu).
                                         custom_conv2d(128, k_h=4, k_w=4).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(1024).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(z_dim)) 
開發者ID:gitmatti,項目名稱:AAE-tensorflow,代碼行數:33,代碼來源:adversarial_autoencoder.py

示例8: _make_decoder_template

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def _make_decoder_template(self):
        defaults_scope = {
            'phase': pt.UnboundVariable('phase', default=pt.Phase.train),
            'scale_after_normalization': True,
            }
        image_size = self.image_shape[0]
        with pt.defaults_scope(**defaults_scope):
          with tf.variable_scope("decoder"):
            if self.network_type=="fully-connected":
                self.decoder_template = (pt.template("z_in").
                                         custom_fully_connected(1000).
                                         apply(tf.nn.relu).
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.relu).
                                         custom_fully_connected(self.image_dim))

            elif self.network_type=="convolutional":
                self.decoder_template = \
                    (pt.template("z_in").
                     custom_fully_connected(1024).
                     batch_normalize().
                     apply(tf.nn.relu).
                     custom_fully_connected(image_size/4 * image_size/4 * 128).
                     batch_normalize().
                     apply(tf.nn.relu).
                     reshape([-1, image_size/4, image_size/4, 128]).
                     custom_deconv2d([0, image_size/2, image_size/2, 64],
                                     k_h=4, k_w=4).
                     batch_normalize().
                     apply(tf.nn.relu).
                     custom_deconv2d([0] + list(self.image_shape),
                                     k_h=4, k_w=4).
                     flatten()) 
開發者ID:gitmatti,項目名稱:AAE-tensorflow,代碼行數:36,代碼來源:adversarial_autoencoder.py

示例9: _make_discriminator_template

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def _make_discriminator_template(self):
        defaults_scope = {
            'phase': pt.UnboundVariable('phase', default=pt.Phase.train),
            'scale_after_normalization': True,
            }
        with pt.defaults_scope(**defaults_scope):
          with tf.variable_scope("discriminator"):
            self.discriminator_template = (pt.template("z_in").
                                           custom_fully_connected(1000).
                                           apply(tf.nn.relu).
                                           custom_fully_connected(1000).
                                           batch_normalize().
                                           apply(tf.nn.relu).
                                           custom_fully_connected(1)) 
開發者ID:gitmatti,項目名稱:AAE-tensorflow,代碼行數:16,代碼來源:adversarial_autoencoder.py

示例10: multilayer_fully_connected

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def multilayer_fully_connected(images, labels):
    images = pt.wrap(images)
    with pt.defaults_scope(activation_fn=tf.nn.relu,l2loss=0.00001):
        return (images.flatten().\
                fully_connected(100).\
                fully_connected(100).\
                softmax_classifier(10, labels)) 
開發者ID:PacktPublishing,項目名稱:Deep-Learning-with-TensorFlow-Second-Edition,代碼行數:9,代碼來源:pretty_tensor_digit.py

示例11: lenet5

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def lenet5(images, labels):
    images = pt.wrap(images)
    with pt.defaults_scope\
         (activation_fn=tf.nn.relu, l2loss=0.00001):
        return (images.conv2d(5, 20).\
                max_pool(2, 2).\
                conv2d(5, 50).\
                max_pool(2, 2).\
                flatten().\
                fully_connected(500).\
                softmax_classifier(10, labels)) 
開發者ID:PacktPublishing,項目名稱:Deep-Learning-with-TensorFlow-Second-Edition,代碼行數:13,代碼來源:pretty_tensor_digit.py

示例12: build_model

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def build_model(self):
    tf.reset_default_graph()
    self._batch_shape = inp.get_batch_shape(FLAGS.batch_size, FLAGS.input_path)
    self._current_step = tf.Variable(0, trainable=False, name='global_step')
    self._step = tf.assign(self._current_step, self._current_step + 1)
    with pt.defaults_scope(activation_fn=self._activation.func):
      with pt.defaults_scope(phase=pt.Phase.train):
        with tf.variable_scope(self.encoder_scope):
          self._build_encoder()
        with tf.variable_scope(self.decoder_scope):
          self._build_decoder() 
開發者ID:yselivonchyk,項目名稱:TensorFlow_DCIGN,代碼行數:13,代碼來源:IGNModel.py

示例13: image2heatmap

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def image2heatmap(self, image_tensor):
        hgd = [
            {"type": "conv2d", "depth": 32, "decoder_depth": self.options["keypoint_num"] + 1,
             "decoder_activation_fn": None},
            # plus one for bg
            {"type": "conv2d", "depth": 32},
            {"type": "skip", "layer_num": 3, },
            {"type": "pool", "pool": "max"},
            {"type": "conv2d", "depth": 64},
            {"type": "conv2d", "depth": 64},
            {"type": "skip", "layer_num": 3, },
            {"type": "pool", "pool": "max"},
            {"type": "conv2d", "depth": 64},
            {"type": "conv2d", "depth": 64},
            {"type": "skip", "layer_num": 3, },
            {"type": "pool", "pool": "max"},
            {"type": "conv2d", "depth": 64},
            {"type": "conv2d", "depth": 64},
        ]

        with pt.defaults_scope(**self.pt_defaults_scope_value()):
            raw_heatmap = hourglass(
                image_tensor, hgd,
                net_type=self.options["hourglass_type"] if "hourglass_type" in self.options else None
            )
            # raw_heatmap = pt.wrap(raw_heatmap).pixel_bias(activation_fn=None).tensor

        return raw_heatmap 
開發者ID:YutingZhang,項目名稱:lmdis-rep,代碼行數:30,代碼來源:general_64x64.py

示例14: image2heatmap

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def image2heatmap(self, image_tensor):
        mid_tensor = (
            pt.wrap(image_tensor).
            conv2d(3, 32).
            max_pool(2, 2)
        ).tensor 

        hgd = [
            {"type": "conv2d", "depth": 64},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 32 x 32
            {"type": "conv2d", "depth": 128},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 16 x 16
            {"type": "conv2d", "depth": 256},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 8 x 8
            {"type": "conv2d", "depth": 512},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 4 x 4
            {"type": "conv2d", "depth": 512},
        ]

        with pt.defaults_scope(**self.pt_defaults_scope_value()):
            raw_heatmap_feat = hourglass(
                mid_tensor, hgd,
                net_type = self.options["hourglass_type"] if "hourglass_type" in self.options else None
            )

        return raw_heatmap_feat 
開發者ID:YutingZhang,項目名稱:lmdis-rep,代碼行數:32,代碼來源:general_128x128_landmark.py

示例15: bg_feature

# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import defaults_scope [as 別名]
def bg_feature(self, image_tensor):
        with pt.defaults_scope(**self.pt_defaults_scope_value()):
            return (
                pt.wrap(image_tensor).
                conv2d(3, 32).max_pool(2, 2).   # 64
                conv2d(3, 64).max_pool(2, 2).   # 32
                conv2d(3, 128).max_pool(2, 2).  # 16
                conv2d(3, 256).max_pool(2, 2).  # 8
                conv2d(3, 256).max_pool(2, 2).  # 4
                conv2d(3, 512).max_pool(2, 2).  # 2
                conv2d(3, 512)
            ) 
開發者ID:YutingZhang,項目名稱:lmdis-rep,代碼行數:14,代碼來源:general_128x128_landmark.py


注:本文中的prettytensor.defaults_scope方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。