当前位置: 首页>>代码示例>>Python>>正文


Python data_preprocessing.ImagePreprocessing方法代码示例

本文整理汇总了Python中tflearn.data_preprocessing.ImagePreprocessing方法的典型用法代码示例。如果您正苦于以下问题:Python data_preprocessing.ImagePreprocessing方法的具体用法?Python data_preprocessing.ImagePreprocessing怎么用?Python data_preprocessing.ImagePreprocessing使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tflearn.data_preprocessing的用法示例。


在下文中一共展示了data_preprocessing.ImagePreprocessing方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_model

# 需要导入模块: from tflearn import data_preprocessing [as 别名]
# 或者: from tflearn.data_preprocessing import ImagePreprocessing [as 别名]
def build_model():
    logging.info('building model')
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    encoder = input_data(shape=(None, IMAGE_INPUT_SIZE[0], IMAGE_INPUT_SIZE[1],
                                3), data_preprocessing=img_prep)
    encoder = conv_2d(encoder, 16, 7, activation='relu')
    encoder = dropout(encoder, 0.25)  # you can have noisy input instead
    encoder = max_pool_2d(encoder, 2)
    encoder = conv_2d(encoder, 16, 7, activation='relu')
    encoder = max_pool_2d(encoder, 2)
    encoder = conv_2d(encoder, 8, 7, activation='relu')
    encoder = max_pool_2d(encoder, 2)
    
    decoder = conv_2d(encoder, 8, 7, activation='relu')
    decoder = upsample_2d(decoder, 2)
    decoder = conv_2d(decoder, 16, 7, activation='relu')
    decoder = upsample_2d(decoder, 2)
    decoder = conv_2d(decoder, 16, 7, activation='relu')
    decoder = upsample_2d(decoder, 2)
    decoder = conv_2d(decoder, 3, 7)

    encoded_str = re.search(r', (.*)\)', str(encoder.get_shape)).group(1)
    encoded_size = np.prod([int(o) for o in encoded_str.split(', ')])
    
    original_img_size = np.prod(IMAGE_INPUT_SIZE) * 3
    
    percentage = round(encoded_size / original_img_size, 2) * 100
    logging.debug('the encoded representation is {}% of the original \
image'.format(percentage))
    
    return regression(decoder, optimizer='adadelta',
                      loss='binary_crossentropy', learning_rate=0.005) 
开发者ID:OliverEdholm,项目名称:Convolutional-Autoencoder,代码行数:37,代码来源:train_autoencoder.py

示例2: run

# 需要导入模块: from tflearn import data_preprocessing [as 别名]
# 或者: from tflearn.data_preprocessing import ImagePreprocessing [as 别名]
def run(self):
        # Real-time pre-processing of the image data
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()
        # img_aug.add_random_crop([48, 48], padding=8)

        # Building Residual Network
        net = tflearn.input_data(shape=[None, 48, 48, 1], data_preprocessing=img_prep, data_augmentation=img_aug)
        net = tflearn.conv_2d(net, nb_filter=16, filter_size=3, regularizer='L2', weight_decay=0.0001)
        net = tflearn.residual_block(net, self.n, 16)
        net = tflearn.residual_block(net, 1, 32, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 32)
        net = tflearn.residual_block(net, 1, 64, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 64)
        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)

        # Regression
        net = tflearn.fully_connected(net, 7, activation='softmax')
        mom = tflearn.Momentum(learning_rate=0.1, lr_decay=0.0001, decay_step=32000, staircase=True, momentum=0.9)
        net = tflearn.regression(net, optimizer=mom,
                                 loss='categorical_crossentropy')

        self.model = tflearn.DNN(net, checkpoint_path='models/model_resnet_emotion',
                            max_checkpoints=10, tensorboard_verbose=0,
                            clip_gradients=0.)

        self.model.load('current_model/model_resnet_emotion-42000')

        face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        cap = cv2.VideoCapture(0)

        while True:
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]
                self.process_image(roi_gray, img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows() 
开发者ID:safreita1,项目名称:Resnet-Emotion-Recognition,代码行数:54,代码来源:face_tracking.py


注:本文中的tflearn.data_preprocessing.ImagePreprocessing方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。