當前位置: 首頁>>代碼示例>>Python>>正文


Python preprocessing.image方法代碼示例

本文整理匯總了Python中keras.preprocessing.image方法的典型用法代碼示例。如果您正苦於以下問題:Python preprocessing.image方法的具體用法?Python preprocessing.image怎麽用?Python preprocessing.image使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.preprocessing的用法示例。


在下文中一共展示了preprocessing.image方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: detection_as_classification

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def detection_as_classification(model, test_generator):
    """
    Given a test_generator that is a regular Keras image generator (for classification tasks), run a DAC evaluate using
    the given model, and return the toal number of TP's and FP's
    :param model: model to run predictions
    :param test_generator: Keras ImageGenerator iterator
    :return: true positive number, and false positive number (detections)
    """
    i = 0
    TP = 0
    FP = 0
    
    for X,Y in test_generator:
        if i >= len(test_generator):
            break # otherwise will run indefinitely
        X = rgb2bgr(X)
        X = preprocess_image(X)
        boxes, scores, labels = model.predict_on_batch(X)
        tp, fp = evaluate(filter(scores, labels, score_threshold), Y)
        i += 1
        TP += tp
        FP += fp

    return TP, FP 
開發者ID:921kiyo,項目名稱:3d-dl,代碼行數:26,代碼來源:train_keras_retinanet.py

示例2: add_salt_pepper_noise

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def add_salt_pepper_noise(X_img):
    """
    Custom Image Augmentation Function which can be added to the keras
    fit_generator function call
    Takes an numpy array as input and returns the same array with salt & pepper
    noise (similar to what one might expect from bad quality images)
    """

    # Need to produce a copy as to not modify the original image
    X_img_copy = X_img.copy()
    row, col, _ = X_img_copy.shape
    salt_vs_pepper = 0.2
    amount = 0.004
    num_salt = np.ceil(amount * X_img_copy.size * salt_vs_pepper)
    num_pepper = np.ceil(amount * X_img_copy.size * (1.0 - salt_vs_pepper))

    # Add Salt noise
    coords = [np.random.randint(0, i - 1, int(num_salt)) for i in X_img.shape]
    X_img[coords[0], coords[1], :] = 1

    # Add Pepper noise

    coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in X_img.shape]
    X_img[coords[0], coords[1], :] = 0
    return X_img_copy 
開發者ID:921kiyo,項目名稱:3d-dl,代碼行數:27,代碼來源:retrain.py

示例3: ResNet50_resize

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def ResNet50_resize(imgs, scope):
    # bicubic doesn't have gradient defined
    return Lambda(lambda x: tf.image.resize_images(x, (224,224), method=tf.image.ResizeMethod.BILINEAR), \
            name=scope+'resnet50_resize')(imgs) 
開發者ID:sangxia,項目名稱:nips-2017-adversarial,代碼行數:6,代碼來源:network_utils.py

示例4: plot_bar

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def plot_bar(self, x,heights, heights2=None, title='Bar Chart', xlabel='X', ylabel='Y'):
        """
        Drawing precision and sensitivity plot
        :param x: length of dict containing the encoding of each label
        :param heights: precision
        :param heights2: sensitivity
        :param title: title of the plot
        :param xlabel: x label for the plot
        :param ylabel: y label for the plot
        :return: tensor image to be exported to Tensorboard
        """
        bar_width = 0.4
        x = np.array(x)
        plt.bar(x,heights,bar_width)
        if heights2 is not None:
            plt.bar(x-bar_width,heights2,bar_width)
        plt.xlabel(xlabel)
        plt.ylabel(ylabel)

        plt.tight_layout()

        # convert to tf image
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        image = tf.image.decode_png(buf.getvalue(), channels=4)
        image = tf.expand_dims(image, 0)
        plt.clf()

        return image 
開發者ID:921kiyo,項目名稱:3d-dl,代碼行數:32,代碼來源:keras_eval.py

示例5: create_generators

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def create_generators(args, preprocess_image):
    """Create generators for training and validation.

    Args:
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size': args.batch_size,
        'config': args.config,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
        visual_effect_generator = random_visual_effect_generator(
            contrast_range=(0.9, 1.1),
            brightness_range=(-.1, .1),
            hue_range=(-0.05, 0.05),
            saturation_range=(0.95, 1.05))
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)
        visual_effect_generator = None
    if args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       visual_effect_generator=visual_effect_generator,
                                       **common_args)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                shuffle_groups=False,
                                                **common_args)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(args.dataset_type))

    return train_generator, validation_generator 
開發者ID:weecology,項目名稱:DeepForest,代碼行數:57,代碼來源:retinanet_train.py

示例6: img_seg

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def img_seg(volpath,
            segpath,
            batch_size=1,
            verbose=False,
            nb_restart_cycle=None,
            name='img_seg', # name, optional
            ext='.png',
            vol_rand_seed=None,
            **kwargs):
    """
    generator for (image, segmentation)
    """

    def imggen(path, ext, nb_restart_cycle=None):
        """
        TODO: should really use the volume generators for this
        """
        files = _get_file_list(path, ext, vol_rand_seed)
        if nb_restart_cycle is None:
            nb_restart_cycle = len(files)

        idx = -1
        while 1:
            idx = np.mod(idx+1, nb_restart_cycle)
            im = scipy.misc.imread(os.path.join(path, files[idx]))[:, :, 0]
            yield im.reshape((1,) + im.shape)

    img_gen = imggen(volpath, ext, nb_restart_cycle)
    seg_gen = imggen(segpath, ext)

    # on next (while):
    while 1:
        input_vol = np.vstack([next(img_gen).astype('float16')/255 for i in range(batch_size)])
        input_vol = np.expand_dims(input_vol, axis=-1)

        output_vols = [np_utils.to_categorical(next(seg_gen).astype('int8'), num_classes=2) for i in range(batch_size)]
        output_vol = np.vstack([np.expand_dims(f, axis=0) for f in output_vols])

        # output input and output
        yield (input_vol, output_vol)


# Some internal use functions 
開發者ID:voxelmorph,項目名稱:voxelmorph,代碼行數:45,代碼來源:generators.py

示例7: parse_args

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def parse_args(args):
    """ Parse the arguments.
    """
    parser     = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
    subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
    subparsers.required = True

    coco_parser = subparsers.add_parser('coco')
    coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')

    pascal_parser = subparsers.add_parser('pascal')
    pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')

    kitti_parser = subparsers.add_parser('kitti')
    kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')

    def csv_list(string):
        return string.split(',')

    oid_parser = subparsers.add_parser('oid')
    oid_parser.add_argument('main_dir', help='Path to dataset directory.')
    oid_parser.add_argument('--version',  help='The current dataset version is v4.', default='v4')
    oid_parser.add_argument('--labels-filter',  help='A list of labels to filter.', type=csv_list, default=None)
    oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')
    oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)

    csv_parser = subparsers.add_parser('csv')
    csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')
    csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
    csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')

    group = parser.add_mutually_exclusive_group()
    group.add_argument('--snapshot',          help='Resume training from a snapshot.')
    group.add_argument('--imagenet-weights',  help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
    group.add_argument('--weights',           help='Initialize the model with weights from a file.')
    group.add_argument('--no-weights',        help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)

    parser.add_argument('--backbone',        help='Backbone model used by retinanet.', default='resnet50', type=str)
    parser.add_argument('--batch-size',      help='Size of the batches.', default=1, type=int)
    parser.add_argument('--gpu',             help='Id of the GPU to use (as reported by nvidia-smi).')
    parser.add_argument('--mode',            help='Evaluate as classifier or detector.', default='detector', type=str)
    parser.add_argument('--multi-gpu',       help='Number of GPUs to use for parallel processing.', type=int, default=0)
    parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
    parser.add_argument('--epochs',          help='Number of epochs to train.', type=int, default=50)
    parser.add_argument('--stratified_folds',help='Path to file with fold information', type=str)
    parser.add_argument('--fold',            help='Specify current validation fold.', type=int)
    parser.add_argument('--data_dir', 	     help='Directory containing validation images.', type=str)
    parser.add_argument('--steps',           help='Number of steps per epoch.', type=int, default=10000)
    parser.add_argument('--snapshot-path',   help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots')
    parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
    parser.add_argument('--no-snapshots',    help='Disable saving snapshots.', dest='snapshots', action='store_false')
    parser.add_argument('--no-evaluation',   help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
    parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
    parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
    parser.add_argument('--image_min_side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
    parser.add_argument('--image_max_side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)

    return check_args(parser.parse_args(args)) 
開發者ID:i-pan,項目名稱:kaggle-rsna18,代碼行數:60,代碼來源:train_kaggle.py

示例8: parse_args

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def parse_args(args):
    """ Parse the arguments.
    """
    parser     = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
    subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
    subparsers.required = True

    coco_parser = subparsers.add_parser('coco')
    coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')

    pascal_parser = subparsers.add_parser('pascal')
    pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')

    kitti_parser = subparsers.add_parser('kitti')
    kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')

    def csv_list(string):
        return string.split(',')

    oid_parser = subparsers.add_parser('oid')
    oid_parser.add_argument('main_dir', help='Path to dataset directory.')
    oid_parser.add_argument('--version',  help='The current dataset version is v4.', default='v4')
    oid_parser.add_argument('--labels-filter',  help='A list of labels to filter.', type=csv_list, default=None)
    oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')
    oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)

    csv_parser = subparsers.add_parser('csv')
    csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')
    csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
    csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')

    group = parser.add_mutually_exclusive_group()
    group.add_argument('--snapshot',          help='Resume training from a snapshot.')
    group.add_argument('--imagenet-weights',  help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
    group.add_argument('--weights',           help='Initialize the model with weights from a file.')
    group.add_argument('--no-weights',        help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)

    parser.add_argument('--backbone',        help='Backbone model used by retinanet.', default='resnet50', type=str)
    parser.add_argument('--batch-size',      help='Size of the batches.', default=1, type=int)
    parser.add_argument('--gpu',             help='Id of the GPU to use (as reported by nvidia-smi).')
    parser.add_argument('--multi-gpu',       help='Number of GPUs to use for parallel processing.', type=int, default=0)
    parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
    parser.add_argument('--epochs',          help='Number of epochs to train.', type=int, default=50)
    parser.add_argument('--steps',           help='Number of steps per epoch.', type=int, default=10000)
    parser.add_argument('--snapshot-path',   help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots')
    parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
    parser.add_argument('--no-snapshots',    help='Disable saving snapshots.', dest='snapshots', action='store_false')
    parser.add_argument('--no-evaluation',   help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
    parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
    parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
    parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
    parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
    parser.add_argument('--weighted-average',   help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')

    return check_args(parser.parse_args(args)) 
開發者ID:i-pan,項目名稱:kaggle-rsna18,代碼行數:57,代碼來源:train.py

示例9: model_with_weights

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def model_with_weights(model, weights, skip_mismatch, config=None, num_classes=None):
    """ Load weights for model.

    :param model:           <keras.Model>       The model to load weights for
    :param weights:         <string>            Path to the weights file to load
    :param skip_mismatch:   <bool>              If True, skips layers whose shape of weights doesn't match with the model.

    :return model:          <keras.Model>       The model with loaded weights
    """

    if weights is not None:
        model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)
        if len(config.channels) > 3:
            config.channels = [0,1,2]
            img_backbone = architectures.backbone('vgg16')
            ## get img weights
            # create img model
            img_model, _, _ = create_models(
                backbone_retinanet=img_backbone.retinanet,
                num_classes=num_classes,
                weights=weights,
                multi_gpu=0, 
                freeze_backbone=False,
                lr=config.learning_rate,
                inputs=(None,None,3),
                cfg=config,
                distance = config.distance_detection,
                distance_alpha = config.distance_alpha
            )

            img_model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)
            # layers with mismatch
            if 'max' in config.network:
                layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1']
            else:
                layers = ['block1_conv1']
            for layer_name in layers:
                model_weights = model.get_layer(layer_name).get_weights()
                img_weights = img_model.get_layer(layer_name).get_weights()
                # [0] is weights
                model_weights[0][:,:,:img_weights[0].shape[2],:] = img_weights[0]
                # [1] is bias
                model_weights[1] = img_weights[1]
                model.get_layer(layer_name).set_weights(model_weights)
                print('Loaded available image weights for layer {}'.format(layer_name))
    return model 
開發者ID:TUMFTM,項目名稱:CameraRadarFusionNet,代碼行數:48,代碼來源:train_crfnet.py

示例10: parse_args

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def parse_args(args):
    """ Parse the arguments.
    """
    parser     = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
    subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
    subparsers.required = True

    rsna_parser = subparsers.add_parser('rsna')
    rsna_parser.add_argument('rsna_path', help='Path to dataset directory (ie. /tmp/COCO).')
    rsna_parser.add_argument('rsna_train_json', help='Path to training json.')
    rsna_parser.add_argument('rsna_val_json', help='Path to validation json.')

    group = parser.add_mutually_exclusive_group()
    group.add_argument('--snapshot',          help='Resume training from a snapshot.')
    group.add_argument('--imagenet-weights',  help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
    group.add_argument('--weights',           help='Initialize the model with weights from a file.')
    group.add_argument('--no-weights',        help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)

    parser.add_argument('--backbone',        help='Backbone model used by retinanet.', default='resnet50', type=str)
    parser.add_argument('--batch-size',      help='Size of the batches.', default=1, type=int)
    parser.add_argument('--gpu',             help='Id of the GPU to use (as reported by nvidia-smi).')
    parser.add_argument('--multi-gpu',       help='Number of GPUs to use for parallel processing.', type=int, default=0)
    parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
    parser.add_argument('--epochs',          help='Number of epochs to train.', type=int, default=50)
    parser.add_argument('--steps',           help='Number of steps per epoch.', type=int, default=10000)
    parser.add_argument('--val_steps',           help='Number of steps per epoch.', type=int, default=400)
    parser.add_argument('--snapshot-path',   help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots')
    parser.add_argument('--tensorboard_dir', help='Log directory for Tensorboard output', default='./logs')
    parser.add_argument('--no-snapshots',    help='Disable saving snapshots.', dest='snapshots', action='store_false')
    parser.add_argument('--no-evaluation',   help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
    parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
    parser.add_argument('--data-aug', help='Enables random-transforms and image-only-transforms.', action='store_true')
    parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
    parser.add_argument('--image_only_transformations', help='Randomly perform image-only transformations.', action='store_true')
    parser.add_argument('--noise_aug_std', help='Defines de std of the random noise added during training. If noise_aug_std=None, no noise is added.', type=float,default=None)
    parser.add_argument('--bbox_aug_std', help='Defines the std of the bounding box augs (none aug with not set).', type=float,default=None)
    parser.add_argument('--dropout_rate', help='Defines the dropout rate.', type=float,default=None)

    parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
    parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
    parser.add_argument('--dicom_load_mode', help='Decide to load only image (image) or sex and view position as well (image_sex_view).', type=str, default='image')
    parser.add_argument('--hist_eq', help='Perform histogram equalization', action='store_true')
    
    parser.add_argument('--anchor_boxes', help='List of anchor boxes', type=str, default='0.5,1,2')
    parser.add_argument('--anchor_scales', help='List of anchor scales', type=str, default='1, 1.25992105, 1.58740105')
    parser.add_argument('--score_threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.2, type=float)
    parser.add_argument('--nms_threshold',   help='Non maximum suppression threshold',type=float, default=0.1)

    return check_args(parser.parse_args(args)) 
開發者ID:alessonscap,項目名稱:rsna-challenge-2018,代碼行數:51,代碼來源:rsna_train.py

示例11: parse_args

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def parse_args(args):
    """
    Parse the arguments.
    """
    today = str(date.today() + timedelta(days=0))
    parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
    subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
    subparsers.required = True

    coco_parser = subparsers.add_parser('coco')
    coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')

    pascal_parser = subparsers.add_parser('pascal')
    pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')

    csv_parser = subparsers.add_parser('csv')
    csv_parser.add_argument('annotations_path', help='Path to CSV file containing annotations for training.')
    csv_parser.add_argument('classes_path', help='Path to a CSV file containing class label mapping.')
    csv_parser.add_argument('--val-annotations-path',
                            help='Path to CSV file containing annotations for validation (optional).')

    parser.add_argument('--snapshot', help='Resume training from a snapshot.',
                        default='/home/adam/.keras/models/ResNet-50-model.keras.h5')
    parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')

    parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
    parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
    parser.add_argument('--num_gpus', help='Number of GPUs to use for parallel processing.', type=int, default=0)
    parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.',
                        action='store_true')
    parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=200)
    parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
    parser.add_argument('--snapshot-path',
                        help='Path to store snapshots of models during training',
                        default='checkpoints/{}'.format(today))
    parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output',
                        default='logs/{}'.format(today))
    parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
    parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation',
                        action='store_false')
    parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
    parser.add_argument('--input-size', help='Rescale the image so the smallest side is min_side.', type=int,
                        default=512)
    parser.add_argument('--multi-scale', help='Multi-Scale training', default=False, action='store_true')
    parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss',
                        action='store_true')

    # Fit generator arguments
    parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')
    parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)
    parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int,
                        default=10)
    print(vars(parser.parse_args(args)))
    return check_args(parser.parse_args(args)) 
開發者ID:xuannianz,項目名稱:keras-CenterNet,代碼行數:56,代碼來源:train.py

示例12: create_generators

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.

    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size': args.batch_size,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'oid':
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset='train',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset='validation',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            **common_args
        )

    else:
        raise ValueError('Invalid data type received: {}'.format(args.dataset_type))

    return train_generator, validation_generator 
開發者ID:mukeshmithrakumar,項目名稱:RetinaNet,代碼行數:59,代碼來源:model.py

示例13: seg_data_generator

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def seg_data_generator(stride,n_classes,img_dir,label_dir,img_list,preprocess = True):
	while 1:
		LUT = np.eye(n_classes)

		for img_id in img_list:

			# load image
			img_path = img_dir + img_id
			x = skimage.io.imread(img_path)

			# load label
			label_path = label_dir + img_id[:-3] + 'png'
			y = skimage.io.imread(label_path) # interprets the image as a colour image
			
			#only yield is the images exist
			is_img = type(x) is np.ndarray and type(y) is np.ndarray
			not_empty = len(x.shape) > 0 and len(y.shape) > 0 

			if  is_img and not_empty:
				#deal with gray value images
				if len(x.shape) == 2:
					x = skimage.color.gray2rgb(x)

				# only take one channel
				if len(y.shape) > 2:
					y = y[...,0] 

				# treat binary images
				if np.max(y) == 255:
					y = np.clip(y,0,1)

				# crop if image dims do not match stride
				w_rest = x.shape[0] % stride
				h_rest = x.shape[1] % stride
				
				if w_rest > 0:
					w_crop_1 = np.round(w_rest / 2)
					w_crop_2 = w_rest - w_crop_1
					
					x = x[w_crop_1:-w_crop_2,:,:]
					y = y[w_crop_1:-w_crop_2,:]
				if h_rest > 0:
					h_crop_1 = np.round(h_rest / 2)
					h_crop_2 = h_rest - h_crop_1

					x = x[:,h_crop_1:-h_crop_2,:]
					y = y[:,h_crop_1:-h_crop_2]

				# prepare for NN
				x = np.array(x,dtype='float')
				x = x[np.newaxis,...]

				if preprocess == True:
					x = preprocess_input(x)

				y = LUT[y]
				y = y[np.newaxis,...] # make it a 4D tensor

				yield x, y 
開發者ID:theduynguyen,項目名稱:Keras-FCN,代碼行數:61,代碼來源:data_generator.py

示例14: plot_confusion_matrix

# 需要導入模塊: from keras import preprocessing [as 別名]
# 或者: from keras.preprocessing import image [as 別名]
def plot_confusion_matrix(self, cm, classes, normalize=False,
                              title='Confusion matrix',
                              cmap=plt.cm.Blues):
        """
        This function prints and plots the confusion matrix.
        Normalization can be applied by setting `normalize=True`.
        :param cm: Confusion Matrix plot
        :param classes: list for classes with labels and encoding of each label
        :param normalize: Boolean for applying normalisation
        :param title: Title of the plot
        :param cmap: color map for plot
        :return: tensor image to be exported to Tensorboard
        """
        if (not check_confusion_matrix(cm)):
            raise InvalidInputError('Confusion Matrix Invalid!')
        if not (len(classes) == cm.shape[0]):
            raise InvalidInputError('Number of classes incompatible with CM!')
        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

        plt.imshow(cm, interpolation='nearest', cmap=cmap)
        plt.title(title)
        plt.colorbar()
        tick_marks = np.arange(len(classes))
        plt.xticks(tick_marks, classes, rotation=45)
        plt.yticks(tick_marks, classes)

        fmt = '.2f' if normalize else 'd'
        thresh = cm.max() / 2.
        for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
            plt.text(j, i, format(cm[i, j], fmt),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')

        # convert to tf image
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        image = tf.image.decode_png(buf.getvalue(), channels=4)
        image = tf.expand_dims(image, 0)
        plt.clf()

        return image 
開發者ID:921kiyo,項目名稱:3d-dl,代碼行數:49,代碼來源:keras_eval.py


注:本文中的keras.preprocessing.image方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。