当前位置: 首页>>代码示例>>Python>>正文


Python feature.hog方法代码示例

本文整理汇总了Python中skimage.feature.hog方法的典型用法代码示例。如果您正苦于以下问题:Python feature.hog方法的具体用法?Python feature.hog怎么用?Python feature.hog使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在skimage.feature的用法示例。


在下文中一共展示了feature.hog方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extract_features

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def extract_features(image_dir_path, feature_dir_path, n_samples, ext='.feat'):
    progress_bar = tqdm(total=n_samples)
    i = 0
    for image_path in os.listdir(image_dir_path):
        if i == n_samples:
            break

        image = scipy.misc.imread(os.path.join(image_dir_path, image_path))
        image = rgb2gray(image)

        features = hog(image, orientations=ORIENTATIONS, pixels_per_cell=PIXELS_PER_CELL,
                       cells_per_block=CELLS_PER_BLOCK, visualise=VISUALISE, normalise=NORMALISE)

        features_file_name = image_path.split('.')[0] + ext
        features_dir_path = feature_dir_path
        features_file_path = os.path.join(features_dir_path, features_file_name)
        joblib.dump(features, features_file_path, compress=3)

        i += 1
        progress_bar.update(1) 
开发者ID:VladKha,项目名称:object_detector,代码行数:22,代码来源:extract_features.py

示例2: get_hog_features

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
                        vis=False, feature_vec=True):
    # Call with two outputs if vis==True
    if vis == True:
        features, hog_image = hog(img, orientations=orient,
                                  pixels_per_cell=(pix_per_cell, pix_per_cell),
                                  cells_per_block=(cell_per_block, cell_per_block),
                                  transform_sqrt=True,
                                  visualise=vis, feature_vector=feature_vec)
        return features, hog_image
    # Otherwise call with one output
    else:
        features = hog(img, orientations=orient,
                       pixels_per_cell=(pix_per_cell, pix_per_cell),
                       cells_per_block=(cell_per_block, cell_per_block),
                       transform_sqrt=True,
                       visualise=vis, feature_vector=feature_vec)
        return features 
开发者ID:JunshengFu,项目名称:vehicle-detection,代码行数:20,代码来源:svm_pipeline.py

示例3: predict

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def predict(image, model, shape_predictor=None):
    # get landmarks
    if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks or NETWORK.use_hog_sliding_window_and_landmarks:
        face_rects = [dlib.rectangle(left=0, top=0, right=NETWORK.input_size, bottom=NETWORK.input_size)]
        face_landmarks = np.array([get_landmarks(image, face_rects, shape_predictor)])
        features = face_landmarks
        if NETWORK.use_hog_sliding_window_and_landmarks: 
            hog_features = sliding_hog_windows(image)
            hog_features = np.asarray(hog_features)
            face_landmarks = face_landmarks.flatten()
            features = np.concatenate((face_landmarks, hog_features))
        else:
            hog_features, _ = hog(image, orientations=8, pixels_per_cell=(16, 16),
                                    cells_per_block=(1, 1), visualise=True)
            hog_features = np.asarray(hog_features)
            face_landmarks = face_landmarks.flatten()
            features = np.concatenate((face_landmarks, hog_features))
        tensor_image = image.reshape([-1, NETWORK.input_size, NETWORK.input_size, 1])
        predicted_label = model.predict([tensor_image, features.reshape((1, -1))])
        return get_emotion(predicted_label[0])
    else:
        tensor_image = image.reshape([-1, NETWORK.input_size, NETWORK.input_size, 1])
        predicted_label = model.predict(tensor_image)
        return get_emotion(predicted_label[0])
    return None 
开发者ID:amineHorseman,项目名称:facial-expression-recognition-using-cnn,代码行数:27,代码来源:predict.py

示例4: describe

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def describe(self, image):
        # Compute HOG for the image
        hist = feature.hog(image, orientations=self.orienations, pixels_per_cell=self.pixels_per_cell,
                           cells_per_block=self.cells_per_block, transform_sqrt=self.transform)

        # Return the HOG features
        return hist 
开发者ID:hsSam,项目名称:PracticalPythonAndOpenCV_CaseStudies,代码行数:9,代码来源:hog.py

示例5: read_images

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def read_images(f_pos, f_neg):

    print ("Reading Images")

    array_pos_features = []
    array_neg_features = []
    global total_pos_samples
    global total_neg_samples
    for imgfile in f_pos:
        img = cv2.imread(os.path.join(pos_img_dir, imgfile))
        cropped = crop_centre(img)
        gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
        features = hog(gray, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm="L2", feature_vector=True)
        array_pos_features.append(features.tolist())

        total_pos_samples += 1

    for imgfile in f_neg:
        img = cv2.imread(os.path.join(neg_img_dir, imgfile))
        cropped = crop_centre(img)
        gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
        features = hog(gray, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm="L2", feature_vector=True)
        array_neg_features.append(features.tolist())
        total_neg_samples += 1

    return array_pos_features, array_neg_features 
开发者ID:vinay0410,项目名称:Pedestrian_Detection,代码行数:28,代码来源:test.py

示例6: read_images

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def read_images(pos_files, neg_files):

    X = []
    Y = []

    pos_count = 0

    for img_file in pos_files:
        print os.path.join(pos_img_dir, img_file)
        img = cv2.imread(os.path.join(pos_img_dir, img_file))

        cropped = crop_centre(img)

        gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
        features = hog(gray, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm="L2", transform_sqrt=True, feature_vector=True)
        pos_count += 1

        X.append(features)
        Y.append(1)


    neg_count = 0

    for img_file in neg_files:
        print os.path.join(neg_img_dir, img_file)
        img = cv2.imread(os.path.join(neg_img_dir, img_file))
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        windows = ten_random_windows(gray_img)

        for win in windows:
            features = hog(win, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm="L2", transform_sqrt=True, feature_vector=True)
            neg_count += 1
            X.append(features)
            Y.append(0)


    return X, Y, pos_count, neg_count 
开发者ID:vinay0410,项目名称:Pedestrian_Detection,代码行数:39,代码来源:train.py

示例7: hard_negative_mine

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def hard_negative_mine(f_neg, winSize, winStride):

    hard_negatives = []
    hard_negative_labels = []

    count = 0
    num = 0
    for imgfile in f_neg:

        img = cv2.imread(os.path.join(neg_img_dir, imgfile))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        for (x, y, im_window) in sliding_window(gray, winSize, winStride):
            features = hog(im_window, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm="L2", transform_sqrt=True, feature_vector=True)
            if (clf1.predict([features]) == 1):
                hard_negatives.append(features)
                hard_negative_labels.append(0)

                count = count + 1

            if (count == MAX_HARD_NEGATIVES):
                return np.array(hard_negatives), np.array(hard_negative_labels)

        num = num + 1

        sys.stdout.write("\r" + "\tHard Negatives Mined: " + str(count) + "\tCompleted: " + str(round((count / float(MAX_HARD_NEGATIVES))*100, 4)) + " %" )

        sys.stdout.flush()

    return np.array(hard_negatives), np.array(hard_negative_labels) 
开发者ID:vinay0410,项目名称:Pedestrian_Detection,代码行数:31,代码来源:train.py

示例8: hog_char

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def hog_char(image, luminance):
    rows = image.shape[0]
    cols = image.shape[1]
    rows_cellsize = int(image.shape[0]/BROWS)
    cols_cellsize = int(image.shape[1]/BCOLS)
    fd, img = feature.hog(image,
                          orientations=8,
                          pixels_per_cell=(rows_cellsize, cols_cellsize),  # (16, 16),
                          cells_per_block=(1, 1),
                          block_norm='L1',
                          visualize=True,
                          feature_vector=False)

    # With 1x1 blocks we don't care about some of the fd dimensions
    # Remove them for easier coding
    fd = np.squeeze(fd)

    # Normalize each histogram to the luminance of the block it derived from
    n_cells_row = int(rows // rows_cellsize)  # number of cells along row-axis
    n_cells_col = int(cols // cols_cellsize)  # number of cells along col-axis

    for iy in range(0, n_cells_row):
        for ix in range(0, n_cells_col):
            px = ix * cols_cellsize
            py = iy * rows_cellsize
            # cell = image[py: py + rows_cellsize, px: px + cols_cellsize]
            # luminance = cell.mean()

            hog_cell = img[py: py + rows_cellsize, px: px + cols_cellsize]
            hog_cell *= luminance / (hog_cell.mean() + sys.float_info.epsilon)

            fd_cell = fd[iy, ix]
            fd_cell *= luminance / (fd_cell.mean() + sys.float_info.epsilon)

    return (fd, img) 
开发者ID:hughpyle,项目名称:ASR33,代码行数:37,代码来源:prep_overstrike.py

示例9: hog_char

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def hog_char(image, count, luminance):
    rows = image.shape[0]
    cols = image.shape[1]
    rows_cellsize = int(image.shape[0]/BROWS)
    cols_cellsize = int(image.shape[1]/BCOLS)
    fd, img = feature.hog(image,
                          orientations=8,
                          pixels_per_cell=(rows_cellsize, cols_cellsize),  # (16, 16),
                          cells_per_block=(1, 1),
                          block_norm='L2-Hys',
                          visualize=True,
                          feature_vector=False)

    # With 1x1 blocks we don't care about some of the fd dimensions
    # Remove them for easier coding
    fd = np.squeeze(fd)

    # Normalize each histogram to the luminance of the block it derived from
    n_cells_row = int(rows // rows_cellsize)  # number of cells along row-axis
    n_cells_col = int(cols // cols_cellsize)  # number of cells along col-axis

    for iy in range(0, n_cells_row):
        for ix in range(0, n_cells_col):
            px = ix * cols_cellsize
            py = iy * rows_cellsize
            # cell = image[py: py + rows_cellsize, px: px + cols_cellsize]
            # luminance = cell.mean()

            hog_cell = img[py: py + rows_cellsize, px: px + cols_cellsize]
            hog_cell *= luminance / (hog_cell.mean() + sys.float_info.epsilon)

            fd_cell = fd[iy, ix]
            fd_cell *= luminance / (fd_cell.mean() + sys.float_info.epsilon)

    return (fd, img) 
开发者ID:hughpyle,项目名称:ASR33,代码行数:37,代码来源:prep_ascii.py

示例10: sliding_hog_windows

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def sliding_hog_windows(image):
    hog_windows = []
    for y in range(0, image_height, window_step):
        for x in range(0, image_width, window_step):
            window = image[y:y+window_size, x:x+window_size]
            hog_windows.extend(hog(window, orientations=8, pixels_per_cell=(8, 8),
                                            cells_per_block=(1, 1), visualise=False))
    return hog_windows 
开发者ID:amineHorseman,项目名称:facial-expression-recognition-svm,代码行数:10,代码来源:convert_fer2013_to_images_and_landmarks.py

示例11: sliding_hog_windows

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def sliding_hog_windows(image):
    hog_windows = []
    for y in range(0, NETWORK.input_size, window_step):
        for x in range(0, NETWORK.input_size, window_step):
            window = image[y:y+window_size, x:x+window_size]
            hog_windows.extend(hog(window, orientations=8, pixels_per_cell=(8, 8),
                                            cells_per_block=(1, 1), visualise=False))
    return hog_windows 
开发者ID:amineHorseman,项目名称:facial-expression-recognition-using-cnn,代码行数:10,代码来源:predict.py

示例12: hog

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def hog(img, bins =9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), transform_sqrt=False, feature_vector=True):
    """
    Extract hog feature from image.
    See detail at https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_hog.py
    """
    from skimage.feature import hog
    return hog(img, 
                orientations = bins, 
                pixels_per_cell = pixels_per_cell,
                cells_per_block = cells_per_block, 
                visualise = False, 
                transform_sqrt=False,
                feature_vector=True) 
开发者ID:HLIG,项目名称:HUAWEIOCR-2019,代码行数:15,代码来源:feature.py

示例13: detect

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def detect(self, image):
        clone = image.copy()

        image = rgb2gray(image)

        # list to store the detections
        detections = []
        # current scale of the image
        downscale_power = 0

        # downscale the image and iterate
        for im_scaled in pyramid(image, downscale=self.downscale, min_size=self.window_size):
            # if the width or height of the scaled image is less than
            # the width or height of the window, then end the iterations
            if im_scaled.shape[0] < self.window_size[1] or im_scaled.shape[1] < self.window_size[0]:
                break
            for (x, y, im_window) in sliding_window(im_scaled, self.window_step_size,
                                                    self.window_size):
                if im_window.shape[0] != self.window_size[1] or im_window.shape[1] != self.window_size[0]:
                    continue

                # calculate the HOG features
                feature_vector = hog(im_window)
                X = np.array([feature_vector])
                prediction = self.clf.predict(X)
                if prediction == 1:
                    x1 = int(x * (self.downscale ** downscale_power))
                    y1 = int(y * (self.downscale ** downscale_power))
                    detections.append((x1, y1,
                                       x1 + int(self.window_size[0] * (
                                               self.downscale ** downscale_power)),
                                       y1 + int(self.window_size[1] * (
                                               self.downscale ** downscale_power))))

            # Move the the next scale
            downscale_power += 1

        # Display the results before performing NMS
        clone_before_nms = clone.copy()
        for (x1, y1, x2, y2) in detections:
            # Draw the detections
            cv2.rectangle(clone_before_nms, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)

        # Perform Non Maxima Suppression
        detections = non_max_suppression(np.array(detections), self.threshold)

        clone_after_nms = clone
        # Display the results after performing NMS
        for (x1, y1, x2, y2) in detections:
            # Draw the detections
            cv2.rectangle(clone_after_nms, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)

        return clone_before_nms, clone_after_nms 
开发者ID:VladKha,项目名称:object_detector,代码行数:55,代码来源:test_classifier.py

示例14: process

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def process(image):
    (rows, cols) = image.shape
    cellsize = CELLPX

    # Make sure the image is a multiple of 4x3 x cellsize in both dimensions
    rn = cellsize * BROWS
    cn = cellsize * BCOLS
    newshape = (((rows + rn - 1) // rn) * rn, ((cols + cn - 1) // cn) * cn)
    if newshape != image.shape:
        image = np.resize(image, newshape)
        (rows, cols) = image.shape
    n_cells_row = int(rows // cellsize)  # number of cells along row-axis
    n_cells_col = int(cols // cellsize)  # number of cells along col-axis

    # Put a dot in the middle of each cell, so that the HOG
    # doesn't end up as exactly zero for areas with no gradient
    for iy in range(0, n_cells_row):
        for ix in range(0, n_cells_col):
            px = ix * cellsize
            py = iy * cellsize
            image[py + int(cellsize / 2), px + int(cellsize / 2)] += 0.001
            image[py + int(cellsize / 2), px + int(cellsize / 2)+1] += 0.001
            image[py + int(cellsize / 2)+1, px + int(cellsize / 2)] += 0.001
            image[py + int(cellsize / 2)+1, px + int(cellsize / 2)+1] += 0.001

    # HOG the whole image
    fd, img = feature.hog(image,
                          orientations=HOG_ORIENTATIONS,
                          pixels_per_cell=(cellsize, cellsize),
                          cells_per_block=(1, 1),
                          block_norm='L1',  # ''L2-Hys',
                          visualize=True,  # VISUALIZE,
                          feature_vector=False)

    # With 1x1 blocks we don't care about some of the fd dimensions
    # Remove them for easier coding
    fd = np.squeeze(fd)

    # Normalize each histogram to the luminance of the block it derived from
    for iy in range(0, n_cells_row):
        for ix in range(0, n_cells_col):
            px = ix * cellsize
            py = iy * cellsize
            cell = image[py: py + cellsize, px: px + cellsize]
            luminance = cell.mean()
            if VISUALIZE:
                hog_cell = img[py: py + cellsize, px: px + cellsize]
                hcm = hog_cell.mean()
                hog_cell *= luminance / (hcm + sys.float_info.epsilon)

            fd_cell = fd[iy, ix]
            fdm = fd_cell.mean()
            fd_cell *= luminance / (fdm + sys.float_info.epsilon)

    if VISUALIZE:
        # Normalize the image-of-HOG and save it just so we can see
        img *= 1 / img.max()
        imageio.imsave("hog.png", img)

    return fd 
开发者ID:hughpyle,项目名称:ASR33,代码行数:62,代码来源:image2.py

示例15: process

# 需要导入模块: from skimage import feature [as 别名]
# 或者: from skimage.feature import hog [as 别名]
def process(image):
    (rows, cols) = image.shape
    cellsize = 16

    # Make sure the image is a multiple of 3x5 x cellsize in both dimensions
    rn = cellsize * BROWS
    cn = cellsize * BCOLS
    newshape = (((rows + rn - 1) // rn) * rn, ((cols + cn - 1) // cn) * cn)
    if newshape != image.shape:
        image = np.resize(image, newshape)

    # HOG the whole image
    fd, img = feature.hog(image,
                          orientations=HOG_ORIENTATIONS,
                          pixels_per_cell=(cellsize, cellsize),
                          cells_per_block=(1, 1),
                          block_norm='L2-Hys',
                          visualize=VISUALIZE,
                          feature_vector=False)

    # With 1x1 blocks we don't care about some of the fd dimensions
    # Remove them for easier coding
    fd = np.squeeze(fd)

    # Normalize each histogram to the luminance of the block it derived from
    n_cells_row = int(rows // cellsize)  # number of cells along row-axis
    n_cells_col = int(cols // cellsize)  # number of cells along col-axis
    for iy in range(0, n_cells_row):
        for ix in range(0, n_cells_col):
            px = ix * cellsize
            py = iy * cellsize
            cell = image[py: py + cellsize, px: px + cellsize]
            luminance = cell.mean()
            if VISUALIZE:
                hog_cell = img[py: py + cellsize, px: px + cellsize]
                hog_cell *= luminance / (hog_cell.mean() + sys.float_info.epsilon)

            fd_cell = fd[iy, ix]
            fd_cell *= luminance / (fd_cell.mean() + sys.float_info.epsilon)

    if VISUALIZE:
        # Normalize the image-of-HOG and save it just so we can see
        img *= 1 / img.max()
        imageio.imsave("hog.png", img)

    return fd 
开发者ID:hughpyle,项目名称:ASR33,代码行数:48,代码来源:image1.py


注:本文中的skimage.feature.hog方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。