当前位置: 首页>>代码示例>>Python>>正文


Python inception_resnet_v2.preprocess_input方法代码示例

本文整理汇总了Python中keras.applications.inception_resnet_v2.preprocess_input方法的典型用法代码示例。如果您正苦于以下问题:Python inception_resnet_v2.preprocess_input方法的具体用法?Python inception_resnet_v2.preprocess_input怎么用?Python inception_resnet_v2.preprocess_input使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.applications.inception_resnet_v2的用法示例。


在下文中一共展示了inception_resnet_v2.preprocess_input方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: data

# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import preprocess_input [as 别名]
def data():
    train_datagen = ImageDataGenerator(shear_range=0.2,
                                       rotation_range=20.,
                                       width_shift_range=0.3,
                                       height_shift_range=0.3,
                                       zoom_range=0.2,
                                       horizontal_flip=True,
                                       preprocessing_function=preprocess_input)
    test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(train_data, (img_width, img_height), batch_size=batch_size,
                                                        class_mode='categorical', shuffle=True)
    validation_generator = test_datagen.flow_from_directory(valid_data, (img_width, img_height), batch_size=batch_size,
                                                            class_mode='categorical', shuffle=True)

    return train_generator, validation_generator 
开发者ID:foamliu,项目名称:Scene-Classification,代码行数:18,代码来源:hp_search.py

示例2: __getitem__

# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import preprocess_input [as 别名]
def __getitem__(self, idx):
        i = idx * batch_size

        length = min(batch_size, (len(self.samples) - i))
        batch_inputs = np.empty((3, length, img_size, img_size, channel), dtype=np.float32)
        batch_dummy_target = np.zeros((length, embedding_size * 3), dtype=np.float32)

        for i_batch in range(length):
            sample = self.samples[i + i_batch]
            for j, role in enumerate(['a', 'p', 'n']):
                image_name = sample[role]
                filename = os.path.join(self.image_folder, image_name)
                image = cv.imread(filename)  # BGR
                image = image[:, :, ::-1]  # RGB
                dets = self.detector(image, 1)

                num_faces = len(dets)
                if num_faces > 0:
                    # Find the 5 face landmarks we need to do the alignment.
                    faces = dlib.full_object_detections()
                    for detection in dets:
                        faces.append(self.sp(image, detection))
                    image = dlib.get_face_chip(image, faces[0], size=img_size)
                else:
                    image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)

                if self.usage == 'train':
                    image = aug_pipe.augment_image(image)

                batch_inputs[j, i_batch] = preprocess_input(image)

        return [batch_inputs[0], batch_inputs[1], batch_inputs[2]], batch_dummy_target 
开发者ID:foamliu,项目名称:FaceNet,代码行数:34,代码来源:data_generator.py

示例3: run

# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import preprocess_input [as 别名]
def run(self):
        # set enviornment
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
        print("InferenceWorker init, GPU ID: {}".format(self.gpuid))

        from model import build_model

        # load models
        model_weights_path = 'models/model.00-0.0296.hdf5'
        model = build_model()
        model.load_weights(model_weights_path)

        while True:
            try:
                try:
                    item = self.in_queue.get(block=False)
                except queue.Empty:
                    continue

                image_name_0, image_name_1, image_name_2 = item

                filename = os.path.join(image_folder, image_name_0)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_0 = preprocess_input(image_rgb)
                filename = os.path.join(image_folder, image_name_1)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_1 = preprocess_input(image_rgb)
                filename = os.path.join(image_folder, image_name_2)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_2 = preprocess_input(image_rgb)

                batch_inputs = np.empty((3, 1, img_size, img_size, 3), dtype=np.float32)
                batch_inputs[0] = image_rgb_0
                batch_inputs[1] = image_rgb_1
                batch_inputs[2] = image_rgb_2
                y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])

                a = y_pred[0, 0:128]
                p = y_pred[0, 128:256]
                n = y_pred[0, 256:384]

                self.out_queue.put({'image_name': image_name_0, 'embedding': a})
                self.out_queue.put({'image_name': image_name_1, 'embedding': p})
                self.out_queue.put({'image_name': image_name_2, 'embedding': n})
                if self.in_queue.qsize() == 0:
                    break
            except Exception as e:
                print(e)

        import keras.backend as K
        K.clear_session()
        print('InferenceWorker done, GPU ID {}'.format(self.gpuid)) 
开发者ID:foamliu,项目名称:FaceNet,代码行数:61,代码来源:inference.py

示例4: run

# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import preprocess_input [as 别名]
def run(self):
        # set enviornment
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
        print("InferenceWorker init, GPU ID: {}".format(self.gpuid))

        from model import build_model

        # load models
        model = build_model()
        model.load_weights(get_best_model())

        while True:
            try:
                sample = {}
                try:
                    sample['a'] = self.in_queue.get(block=False)
                    sample['p'] = self.in_queue.get(block=False)
                    sample['n'] = self.in_queue.get(block=False)
                except queue.Empty:
                    break

                batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)

                for j, role in enumerate(['a', 'p', 'n']):
                    image_name = sample[role]
                    filename = os.path.join(image_folder, image_name)
                    image_bgr = cv.imread(filename)
                    image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                    image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                    batch_inputs[j, 0] = preprocess_input(image_rgb)

                y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
                a = y_pred[0, 0:128]
                p = y_pred[0, 128:256]
                n = y_pred[0, 256:384]

                self.out_queue.put({'image_name': sample['a'], 'embedding': a})
                self.out_queue.put({'image_name': sample['p'], 'embedding': p})
                self.out_queue.put({'image_name': sample['n'], 'embedding': n})
                self.signal_queue.put(SENTINEL)

                if self.in_queue.qsize() == 0:
                    break
            except Exception as e:
                print(e)

        import keras.backend as K
        K.clear_session()
        print('InferenceWorker done, GPU ID {}'.format(self.gpuid)) 
开发者ID:foamliu,项目名称:FaceNet,代码行数:52,代码来源:train_eval.py

示例5: train

# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import preprocess_input [as 别名]
def train(task):
    if (task == 'design'):
        task_list = task_list_design
    else:
        task_list = task_list_length

    label_names = list(task_list.keys())
    print(n)
    y = [np.zeros((n, task_list[x])) for x in task_list.keys()]
    for i in range(n):
        label_name = df.label_name[i]
        label = df.label[i]
        y[label_names.index(label_name)][i, label.find('y')] = 1

    X = getX()
    n_train = int(n * 0.9)
    X_train = X[:n_train]
    X_valid = X[n_train:]
    y_train = [x[:n_train] for x in y]
    y_valid = [x[n_train:] for x in y]
    gen_train = Generator(X_train, y_train, batch_size=40, aug=True)

    base_model = inception_v4.create_model(weights='imagenet', width=width, include_top=False)
    input_tensor = Input((width, width, 3))
    x = input_tensor
    x = Lambda(preprocess_input, name='preprocessing')(x)
    x = base_model(x)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.5)(x)
    x = [Dense(count, activation='softmax', name=name)(x) for name, count in task_list.items()]

    model = Model(input_tensor, x)
    # model.load_weights('models/base.h5',by_name=True)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
    model2 = multi_gpu_model(model, 2)

    model2.compile(optimizer=Adam(0.0001), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=3, validation_data=(X_valid, y_valid))

    model2.compile(optimizer=Adam(0.000025), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=2, validation_data=(X_valid, y_valid))

    model2.compile(optimizer=Adam(0.00000625), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=3, validation_data=(X_valid, y_valid))

    model2.compile(optimizer=Adam(0.00000425), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=1, validation_data=(X_valid, y_valid))

    model2.compile(optimizer=Adam(0.000001), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=1, validation_data=(X_valid, y_valid))
    model.save_weights('models/%s.h5' % model_name)

    del X
    del model
    gc.collect()

# load the label file and split it into two portions 
开发者ID:Jeremyczhj,项目名称:FashionAI_Tianchi_2018,代码行数:59,代码来源:Multitask_train.py

示例6: calculate_inception_score

# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import preprocess_input [as 别名]
def calculate_inception_score(images_path, batch_size=1, splits=10):
    # Create an instance of InceptionV3
    model = InceptionResNetV2()

    images = None
    for image_ in glob.glob(images_path):
        # Load image
        loaded_image = image.load_img(image_, target_size=(299, 299))

        # Convert PIL image to numpy ndarray
        loaded_image = image.img_to_array(loaded_image)

        # Another another dimension (Add batch dimension)
        loaded_image = np.expand_dims(loaded_image, axis=0)

        # Concatenate all images into one tensor
        if images is None:
            images = loaded_image
        else:
            images = np.concatenate([images, loaded_image], axis=0)

    # Calculate number of batches
    num_batches = (images.shape[0] + batch_size - 1) // batch_size

    probs = None

    # Use InceptionV3 to calculate probabilities
    for i in range(num_batches):
        image_batch = images[i * batch_size:(i + 1) * batch_size, :, :, :]
        prob = model.predict(preprocess_input(image_batch))

        if probs is None:
            probs = prob
        else:
            probs = np.concatenate([prob, probs], axis=0)

    # Calculate Inception scores
    divs = []
    split_size = probs.shape[0] // splits

    for i in range(splits):
        prob_batch = probs[(i * split_size):((i + 1) * split_size), :]
        p_y = np.expand_dims(np.mean(prob_batch, 0), 0)
        div = prob_batch * (np.log(prob_batch / p_y))
        div = np.mean(np.sum(div, 1))
        divs.append(np.exp(div))

    return np.mean(divs), np.std(divs) 
开发者ID:PacktPublishing,项目名称:Generative-Adversarial-Networks-Projects,代码行数:50,代码来源:run.py


注:本文中的keras.applications.inception_resnet_v2.preprocess_input方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。