当前位置: 首页>>代码示例>>Python>>正文


Python ops.fc方法代码示例

本文整理汇总了Python中ops.fc方法的典型用法代码示例。如果您正苦于以下问题:Python ops.fc方法的具体用法?Python ops.fc怎么用?Python ops.fc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ops的用法示例。


在下文中一共展示了ops.fc方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: inception_v3_parameters

# 需要导入模块: import ops [as 别名]
# 或者: from ops import fc [as 别名]
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
                            batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
  """Yields the scope with the default parameters for inception_v3.

  Args:
    weight_decay: the weight decay for weights variables.
    stddev: standard deviation of the truncated guassian weight distribution.
    batch_norm_decay: decay for the moving average of batch_norm momentums.
    batch_norm_epsilon: small float added to variance to avoid dividing by zero.

  Yields:
    a arg_scope with the parameters needed for inception_v3.
  """
  # Set weight_decay for weights in Conv and FC layers.
  with scopes.arg_scope([ops.conv2d, ops.fc],
                        weight_decay=weight_decay):
    # Set stddev, activation and parameters for batch_norm.
    with scopes.arg_scope([ops.conv2d],
                          stddev=stddev,
                          activation=tf.nn.relu,
                          batch_norm_params={
                              'decay': batch_norm_decay,
                              'epsilon': batch_norm_epsilon}) as arg_scope:
      yield arg_scope 
开发者ID:MasazI,项目名称:InceptionV3_TensorFlow,代码行数:26,代码来源:inception_model.py

示例2: __call__

# 需要导入模块: import ops [as 别名]
# 或者: from ops import fc [as 别名]
def __call__(self, input):
        if self._deconv_type == 'bilinear':
            from ops import bilinear_deconv2d as deconv2d
        elif self._deconv_type == 'nn':
            from ops import nn_deconv2d as deconv2d
        elif self._deconv_type == 'transpose':
            from ops import deconv2d
        else:
            raise NotImplementedError
        with tf.variable_scope(self.name, reuse=self._reuse):
            if not self._reuse:
                print('\033[93m'+self.name+'\033[0m')
            _ = tf.reshape(input, [input.get_shape().as_list()[0], 1, 1, -1])
            _ = fc(_, 1024, self._is_train, info=not self._reuse, norm='None', name='fc')
            for i in range(int(np.ceil(np.log2(max(self._h, self._w))))):
                _ = deconv2d(_, max(self._c, int(_.get_shape().as_list()[-1]/2)), 
                             self._is_train, info=not self._reuse, norm=self._norm_type,
                             name='deconv{}'.format(i+1))
            _ = deconv2d(_, self._c, self._is_train, k=1, s=1, info=not self._reuse,
                         activation_fn=tf.tanh, norm='None',
                         name='deconv{}'.format(i+2))
            _ = tf.image.resize_bilinear(_, [self._h, self._w])

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
            return _ 
开发者ID:clvrai,项目名称:SSGAN-Tensorflow,代码行数:28,代码来源:generator.py

示例3: inference

# 需要导入模块: import ops [as 别名]
# 或者: from ops import fc [as 别名]
def inference(inputs, dropout_keep_prob, label_cnt):
    # todo: change lrn parameters
    # conv layer 1
    with tf.name_scope('conv1layer'):
        conv1 = op.conv(inputs, 7, 96, 3)
        conv1 = op.lrn(conv1)
        conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')

    # conv layer 2
    with tf.name_scope('conv2layer'):
        conv2 = op.conv(conv1, 5, 256, 1, 1.0)
        conv2 = op.lrn(conv2)
        conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')

    # conv layer 3
    with tf.name_scope('conv3layer'):
        conv3 = op.conv(conv2, 3, 384, 1)

    # conv layer 4
    with tf.name_scope('conv4layer'):
        conv4 = op.conv(conv3, 3, 384, 1, 1.0)

    # conv layer 5
    with tf.name_scope('conv5layer'):
        conv5 = op.conv(conv4, 3, 256, 1, 1.0)
        conv5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')

    # fc layer 1
    with tf.name_scope('fc1layer'):
        fc1 = op.fc(conv5, 4096, 1.0)
        fc1 = tf.nn.dropout(fc1, dropout_keep_prob)

    # fc layer 2
    with tf.name_scope('fc2layer'):
        fc2 = op.fc(fc1, 4096, 1.0)
        fc2 = tf.nn.dropout(fc2, dropout_keep_prob)

    # fc layer 3 - output
    with tf.name_scope('fc3layer'):
        return op.fc(fc2, label_cnt, 1.0, None) 
开发者ID:jireh-father,项目名称:tensorflow-alexnet,代码行数:42,代码来源:kaggle_mnist_alexnet_model.py

示例4: __call__

# 需要导入模块: import ops [as 别名]
# 或者: from ops import fc [as 别名]
def __call__(self, input):
        if self._deconv_type == 'bilinear':
            from ops import bilinear_deconv2d as deconv2d
        elif self._deconv_type == 'nn':
            from ops import nn_deconv2d as deconv2d
        elif self._deconv_type == 'transpose':
            from ops import deconv2d
        else:
            raise NotImplementedError
        with tf.variable_scope(self.name, reuse=self._reuse):
            if not self._reuse:
                log.warn(self.name)
            _ = fc(input, self.start_dim_x * self.start_dim_y * self.start_dim_ch,
                   self._is_train, info=not self._reuse, norm='none', name='fc')
            _ = tf.reshape(_, [_.shape.as_list()[0], self.start_dim_y,
                               self.start_dim_x, self.start_dim_ch])
            if not self._reuse:
                log.info('reshape {} '.format(_.shape.as_list()))
            num_deconv_layer = int(np.ceil(np.log2(
                max(float(self._h/self.start_dim_y), float(self._w/self.start_dim_x)))))
            for i in range(num_deconv_layer):
                _ = deconv2d(_, max(self._c, int(_.get_shape().as_list()[-1]/2)),
                             self._is_train, info=not self._reuse, norm=self._norm_type,
                             name='deconv{}'.format(i+1))
                if num_deconv_layer - i <= self._num_res_block:
                    _ = conv2d_res(
                            _, self._is_train, info=not self._reuse,
                            name='res_block{}'.format(self._num_res_block - num_deconv_layer + i + 1))
            _ = deconv2d(_, self._c, self._is_train, k=1, s=1, info=not self._reuse,
                         activation_fn=tf.tanh, norm='none',
                         name='deconv{}'.format(i+2))
            _ = tf.image.resize_bilinear(_, [self._h, self._w])

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
            return _ 
开发者ID:shaohua0116,项目名称:WGAN-GP-TensorFlow,代码行数:38,代码来源:generator.py

示例5: build

# 需要导入模块: import ops [as 别名]
# 或者: from ops import fc [as 别名]
def build(self, is_train=True):

        n = self.a_dim
        conv_info = self.conv_info

        # build loss and accuracy {{{
        def build_loss(logits, labels):
            # Cross-entropy loss
            loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)

            # Classification accuracy
            correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return tf.reduce_mean(loss), accuracy
        # }}}

        # Classifier: takes images as input and outputs class label [B, m]
        def C(img, q, scope='Classifier'):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
                conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
                conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
                conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')
                conv_q = tf.concat([tf.reshape(conv_4, [self.batch_size, -1]), q], axis=1)
                fc_1 = fc(conv_q, 256, name='fc_1')
                fc_2 = fc(fc_1, 256, name='fc_2')
                fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
                fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
                return fc_3

        logits = C(self.img, self.q, scope='Classifier')
        self.all_preds = tf.nn.softmax(logits)
        self.loss, self.accuracy = build_loss(logits, self.a)

        # Add summaries
        def draw_iqa(img, q, target_a, pred_a):
            fig, ax = tfplot.subplots(figsize=(6, 6))
            ax.imshow(img)
            ax.set_title(question2str(q))
            ax.set_xlabel(answer2str(target_a)+answer2str(pred_a, 'Predicted'))
            return fig

        try:
            tfplot.summary.plot_many('IQA/',
                                     draw_iqa, [self.img, self.q, self.a, self.all_preds],
                                     max_outputs=3,
                                     collections=["plot_summaries"])
        except:
            pass

        tf.summary.scalar("loss/accuracy", self.accuracy)
        tf.summary.scalar("loss/cross_entropy", self.loss)
        log.warn('Successfully loaded the model.') 
开发者ID:clvrai,项目名称:Relation-Network-Tensorflow,代码行数:56,代码来源:model_baseline.py


注:本文中的ops.fc方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。