本文整理匯總了Python中tflearn.data_preprocessing.ImagePreprocessing方法的典型用法代碼示例。如果您正苦於以下問題:Python data_preprocessing.ImagePreprocessing方法的具體用法?Python data_preprocessing.ImagePreprocessing怎麽用?Python data_preprocessing.ImagePreprocessing使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tflearn.data_preprocessing
的用法示例。
在下文中一共展示了data_preprocessing.ImagePreprocessing方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build_model
# 需要導入模塊: from tflearn import data_preprocessing [as 別名]
# 或者: from tflearn.data_preprocessing import ImagePreprocessing [as 別名]
def build_model():
logging.info('building model')
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
encoder = input_data(shape=(None, IMAGE_INPUT_SIZE[0], IMAGE_INPUT_SIZE[1],
3), data_preprocessing=img_prep)
encoder = conv_2d(encoder, 16, 7, activation='relu')
encoder = dropout(encoder, 0.25) # you can have noisy input instead
encoder = max_pool_2d(encoder, 2)
encoder = conv_2d(encoder, 16, 7, activation='relu')
encoder = max_pool_2d(encoder, 2)
encoder = conv_2d(encoder, 8, 7, activation='relu')
encoder = max_pool_2d(encoder, 2)
decoder = conv_2d(encoder, 8, 7, activation='relu')
decoder = upsample_2d(decoder, 2)
decoder = conv_2d(decoder, 16, 7, activation='relu')
decoder = upsample_2d(decoder, 2)
decoder = conv_2d(decoder, 16, 7, activation='relu')
decoder = upsample_2d(decoder, 2)
decoder = conv_2d(decoder, 3, 7)
encoded_str = re.search(r', (.*)\)', str(encoder.get_shape)).group(1)
encoded_size = np.prod([int(o) for o in encoded_str.split(', ')])
original_img_size = np.prod(IMAGE_INPUT_SIZE) * 3
percentage = round(encoded_size / original_img_size, 2) * 100
logging.debug('the encoded representation is {}% of the original \
image'.format(percentage))
return regression(decoder, optimizer='adadelta',
loss='binary_crossentropy', learning_rate=0.005)
示例2: run
# 需要導入模塊: from tflearn import data_preprocessing [as 別名]
# 或者: from tflearn.data_preprocessing import ImagePreprocessing [as 別名]
def run(self):
# Real-time pre-processing of the image data
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
# Real-time data augmentation
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()
# img_aug.add_random_crop([48, 48], padding=8)
# Building Residual Network
net = tflearn.input_data(shape=[None, 48, 48, 1], data_preprocessing=img_prep, data_augmentation=img_aug)
net = tflearn.conv_2d(net, nb_filter=16, filter_size=3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, self.n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, self.n - 1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, self.n - 1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 7, activation='softmax')
mom = tflearn.Momentum(learning_rate=0.1, lr_decay=0.0001, decay_step=32000, staircase=True, momentum=0.9)
net = tflearn.regression(net, optimizer=mom,
loss='categorical_crossentropy')
self.model = tflearn.DNN(net, checkpoint_path='models/model_resnet_emotion',
max_checkpoints=10, tensorboard_verbose=0,
clip_gradients=0.)
self.model.load('current_model/model_resnet_emotion-42000')
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
self.process_image(roi_gray, img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()