当前位置: 首页>>代码示例>>Python>>正文


Python conv.global_avg_pool方法代码示例

本文整理汇总了Python中tflearn.layers.conv.global_avg_pool方法的典型用法代码示例。如果您正苦于以下问题:Python conv.global_avg_pool方法的具体用法?Python conv.global_avg_pool怎么用?Python conv.global_avg_pool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tflearn.layers.conv的用法示例。


在下文中一共展示了conv.global_avg_pool方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: finalize_get_model

# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import global_avg_pool [as 别名]
def finalize_get_model(net, flags):
    net['gap'], curr = dup(global_avg_pool(net['conv_final'], name='gap'))

    net['final'] = regression(curr,
                              optimizer='adam',
                              learning_rate=flags.lr,
                              batch_size=flags.bs,
                              loss='softmax_categorical_crossentropy',
                              name='target',
                              n_classes=flags.nc,
                              shuffle_batches=True)

    model = tflearn.DNN(net['final'],
                        tensorboard_verbose=0,
                        tensorboard_dir=flags.logdir,
                        best_checkpoint_path=os.path.join(flags.logdir,
                                                          flags.run_id,
                                                          flags.run_id),
                        best_val_accuracy=flags.acc_save)

    model.net_dict = net
    model.flags = flags

    return model 
开发者ID:daniilidis-group,项目名称:polar-transformer-networks,代码行数:26,代码来源:arch.py

示例2: Global_Average_Pooling

# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import global_avg_pool [as 别名]
def Global_Average_Pooling(x, stride=1):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width, height]
    return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride) # The stride value does not matter
    It is global average pooling without tflearn
    """

    return global_avg_pool(x, name='Global_avg_pooling')
    # But maybe you need to install h5py and curses or not 
开发者ID:taki0112,项目名称:Densenet-Tensorflow,代码行数:13,代码来源:Densenet_MNIST.py

示例3: Global_Average_Pooling

# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import global_avg_pool [as 别名]
def Global_Average_Pooling(x):
    return global_avg_pool(x, name='Global_avg_pooling') 
开发者ID:taki0112,项目名称:ResNeXt-Tensorflow,代码行数:4,代码来源:ResNeXt.py

示例4: Global_Average_Pooling

# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import global_avg_pool [as 别名]
def Global_Average_Pooling(x,stride=1):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width,height]
    return tf.layer.average_pooling2d(inputs=x,pool_size=pool_size,strides=stride)
    # The strdie value does not matter.It is global average pooling without tflearn
    """
    return global_avg_pool(x,name='Global_avg_pooling')
    # But maybe you need to install h5py and curses or not 
开发者ID:zfxxfeng,项目名称:cnn_lstm_ctc_ocr_for_ICPR,代码行数:12,代码来源:denseNet.py

示例5: squeeze_excitation_layer

# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import global_avg_pool [as 别名]
def squeeze_excitation_layer(input_x, out_dim, middle):
    squeeze = global_avg_pool(input_x)
    excitation = tf.layers.dense(squeeze, use_bias=True, units=middle)
    excitation = tf.nn.relu(excitation)
    excitation = tf.layers.dense(excitation, use_bias=True, units=out_dim)
    excitation = tf.nn.sigmoid(excitation)
    excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
    scale = input_x * excitation
    return scale 
开发者ID:491506870,项目名称:PRIDNet,代码行数:11,代码来源:network.py

示例6: selective_kernel_layer

# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import global_avg_pool [as 别名]
def selective_kernel_layer(sk_conv1, sk_conv2, sk_conv3, middle, out_dim):
    sum_u = sk_conv1 + sk_conv2 + sk_conv3
    squeeze = global_avg_pool(sum_u)
    squeeze = tf.reshape(squeeze, [-1, 1, 1, out_dim])
    z = tf.layers.dense(squeeze, use_bias=True, units=middle)
    z = tf.nn.relu(z)
    a1 = tf.layers.dense(z, use_bias=True, units=out_dim)
    a2 = tf.layers.dense(z, use_bias=True, units=out_dim)
    a3 = tf.layers.dense(z, use_bias=True, units=out_dim)

    before_softmax = tf.concat([a1, a2, a3], 1)
    after_softmax = tf.nn.softmax(before_softmax, dim=1)
    a1 = after_softmax[:, 0, :, :]
    a1 = tf.reshape(a1, [-1, 1, 1, out_dim])
    a2 = after_softmax[:, 1, :, :]
    a2 = tf.reshape(a2, [-1, 1, 1, out_dim])
    a3 = after_softmax[:, 2, :, :]
    a3 = tf.reshape(a3, [-1, 1, 1, out_dim])

    select_1 = sk_conv1 * a1
    select_2 = sk_conv2 * a2
    select_3 = sk_conv3 * a3

    out = select_1 + select_2 + select_3

    return out 
开发者ID:491506870,项目名称:PRIDNet,代码行数:28,代码来源:network.py

示例7: construct_inceptionv4onfire

# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import global_avg_pool [as 别名]
def construct_inceptionv4onfire(x,y, training=True, enable_batch_norm=True):

    network = input_data(shape=[None, y, x, 3])

    #stem of inceptionV4

    conv1_3_3 = conv_2d(network,32,3,strides=2,activation='relu',name='conv1_3_3_s2',padding='valid')
    conv2_3_3 = conv_2d(conv1_3_3,32,3,activation='relu',name='conv2_3_3')
    conv3_3_3 = conv_2d(conv2_3_3,64,3,activation='relu',name='conv3_3_3')
    b_conv_1_pool = max_pool_2d(conv3_3_3,kernel_size=3,strides=2,padding='valid',name='b_conv_1_pool')
    if enable_batch_norm:
        b_conv_1_pool = batch_normalization(b_conv_1_pool)
    b_conv_1_conv = conv_2d(conv3_3_3,96,3,strides=2,padding='valid',activation='relu',name='b_conv_1_conv')
    b_conv_1 = merge([b_conv_1_conv,b_conv_1_pool],mode='concat',axis=3)

    b_conv4_1_1 = conv_2d(b_conv_1,64,1,activation='relu',name='conv4_3_3')
    b_conv4_3_3 = conv_2d(b_conv4_1_1,96,3,padding='valid',activation='relu',name='conv5_3_3')

    b_conv4_1_1_reduce = conv_2d(b_conv_1,64,1,activation='relu',name='b_conv4_1_1_reduce')
    b_conv4_1_7 = conv_2d(b_conv4_1_1_reduce,64,[1,7],activation='relu',name='b_conv4_1_7')
    b_conv4_7_1 = conv_2d(b_conv4_1_7,64,[7,1],activation='relu',name='b_conv4_7_1')
    b_conv4_3_3_v = conv_2d(b_conv4_7_1,96,3,padding='valid',name='b_conv4_3_3_v')
    b_conv_4 = merge([b_conv4_3_3_v, b_conv4_3_3],mode='concat',axis=3)

    b_conv5_3_3 = conv_2d(b_conv_4,192,3,padding='valid',activation='relu',name='b_conv5_3_3',strides=2)
    b_pool5_3_3 = max_pool_2d(b_conv_4,kernel_size=3,padding='valid',strides=2,name='b_pool5_3_3')
    if enable_batch_norm:
        b_pool5_3_3 = batch_normalization(b_pool5_3_3)
    b_conv_5 = merge([b_conv5_3_3,b_pool5_3_3],mode='concat',axis=3)
    net = b_conv_5

    # inceptionV4 modules

    net=inception_block_a(net)

    net=inception_block_b(net)

    net=inception_block_c(net)

    pool5_7_7=global_avg_pool(net)
    if(training):
        pool5_7_7=dropout(pool5_7_7,0.4)
    loss = fully_connected(pool5_7_7, 2,activation='softmax')

    if(training):
        network = regression(loss, optimizer='rmsprop',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
    else:
        network=loss

    model = tflearn.DNN(network, checkpoint_path='inceptionv4onfire',
                        max_checkpoints=1, tensorboard_verbose=0)

    return model

################################################################################ 
开发者ID:tobybreckon,项目名称:fire-detection-cnn,代码行数:59,代码来源:inceptionVxOnFire.py


注:本文中的tflearn.layers.conv.global_avg_pool方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。