当前位置: 首页>>代码示例>>Python>>正文


Python feature.hog函数代码示例

本文整理汇总了Python中skimage.feature.hog函数的典型用法代码示例。如果您正苦于以下问题:Python hog函数的具体用法?Python hog怎么用?Python hog使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了hog函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: getHOG3

def getHOG3(imgs, ori=8, ppc=(4, 4), cpb=(4, 4)):
    # determine the shape of the output
    fd = hog(imgs[0, :, :, 0], orientations=ori, pixels_per_cell=ppc, cells_per_block=cpb, visualise=False)
    # print fd.shape
    hogs = np.zeros((imgs.shape[0], fd.shape[0] * 3))
    # HOG
    for i in range(imgs.shape[0]):
        # zimgs[i,:] = exposure.equalize_hist(imgs[i,:])
        # imgs[i,:] = rank.equalize(imgs[i,:]/255,selem=disk(0))
        # plt.imshow(imgs[i,:]),plt.show()
        hogs[i, 0 : fd.shape[0]] = hog(
            imgs[i, :, :, 0], orientations=ori, pixels_per_cell=ppc, cells_per_block=cpb, visualise=False
        )
        hogs[i, fd.shape[0] : (2 * fd.shape[0])] = hog(
            imgs[i, :, :, 1], orientations=ori, pixels_per_cell=ppc, cells_per_block=cpb, visualise=False
        )
        hogs[i, 2 * fd.shape[0] : (3 * fd.shape[0])] = hog(
            imgs[i, :, :, 2], orientations=ori, pixels_per_cell=ppc, cells_per_block=cpb, visualise=False
        )
        sys.stdout.write("\rIteration {0}/{1}".format((i + 1), imgs.shape[0]))
        sys.stdout.flush()

    mean = np.mean(hogs, axis=0)
    hogs -= mean
    return hogs
开发者ID:pjh5,项目名称:spring2016_comp540,代码行数:25,代码来源:utils2.py

示例2: ComputeDescriptors

 def ComputeDescriptors(self,RGB,Depth,dep_mask,h):
     dep = np.float32(Depth)
     dep_mask =cv2.bitwise_not(dep_mask)
     ret, mask = cv2.threshold(dep, 1.7, 1, cv2.THRESH_BINARY_INV)
     mask = np.uint8(mask)
     ret, mask2 = cv2.threshold(dep, 0.01, 1, cv2.THRESH_BINARY)
     mask2 = np.uint8(mask2)
     mask = cv2.bitwise_and(mask,mask2)
     mask = cv2.bitwise_and(mask,dep_mask)
     if h:
         masked_data = cv2.bitwise_and(RGB, RGB, mask=mask)
         masked_data = cv2.bitwise_and(masked_data, masked_data, mask=mask2)
         sp = cv2.cvtColor(masked_data, cv2.COLOR_RGB2GRAY)
         sp = cv2.GaussianBlur(sp, (5, 5),10)
         fd, imn = hog(dep, self.orientations, self.pixels_per_cell, self.cells_per_block,
                       self.visualize, self.normalize)
         if self.HogDepth:
             fdn,im = hog(sp, self.orientations, self.pixels_per_cell, self.cells_per_block,
                   self.visualize, self.normalize)
             fd = np.concatenate((fd, fdn))
     else:
         fd = []
     fgrid = np.array([])
     for i in xrange(4):
         for j in xrange(4):
             sub = RGB[25*i:25*(i+1),25*j:25*(j+1)]
             sub_mask = mask[25*i:25*(i+1),25*j:25*(j+1)]
             fsub = self.ComputeHC(sub,sub_mask)
             fgrid = np.concatenate((fgrid,fsub))
     fd2 = fgrid.copy()
     return fd,fd2,masked_data
开发者ID:pazagra,项目名称:catkin_ws,代码行数:31,代码来源:Interaction_Recogn.py

示例3: extract_features

def extract_features():
    des_type = 'HOG'

    # If feature directories don't exist, create them
    if not os.path.isdir(pos_feat_ph):
        os.makedirs(pos_feat_ph)

    # If feature directories don't exist, create them
    if not os.path.isdir(neg_feat_ph):
        os.makedirs(neg_feat_ph)

    print "Calculating the descriptors for the positive samples and saving them"
    for im_path in glob.glob(os.path.join(pos_im_path, "*")):
        #print im_path
        
        im = imread(im_path, as_grey=True)
        if des_type == "HOG":
            fd = hog(im, orientations, pixels_per_cell, cells_per_block, visualize, normalize)
        fd_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        fd_path = os.path.join(pos_feat_ph, fd_name)
        joblib.dump(fd, fd_path)
    print "Positive features saved in {}".format(pos_feat_ph)

    print "Calculating the descriptors for the negative samples and saving them"
    for im_path in glob.glob(os.path.join(neg_im_path, "*")):
        im = imread(im_path, as_grey=True)
        if des_type == "HOG":
            fd = hog(im,  orientations, pixels_per_cell, cells_per_block, visualize, normalize)
        fd_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        fd_path = os.path.join(neg_feat_ph, fd_name)
    
        joblib.dump(fd, fd_path)
    print "Negative features saved in {}".format(neg_feat_ph)

    print "Completed calculating features from training images"
开发者ID:BUPTLdy,项目名称:object-detector,代码行数:35,代码来源:extract_features.py

示例4: SingleDecisionTreeClassifier

def SingleDecisionTreeClassifier(pix):
	print "\nCreating HOG Dataset from MNIST Data"
	start_time = time.time()
	training_image_data_hog = [hog(img, orientations=9, pixels_per_cell=(pix,pix), cells_per_block=(3, 3))
					for img in training_image_data]
	testing_image_data_hog = [hog(img, orientations=9, pixels_per_cell=(pix, pix), cells_per_block=(3, 3))
					for img in testing_image_data]
	end_time = time.time() - start_time
	print "It took "+ str(end_time) + " to make the HOG Images"

	print '\nTraining data'
	start_time = time.time()
	single_decision_tree_classifier = DecisionTreeClassifier()
	single_decision_tree_classifier.fit(training_image_data_hog, training_label_data)
	end_time = time.time() - start_time
	print "It took "+ str(end_time) + " to train the classifier"
	print 'Training Completed'

	print '\nTesting data '
	start_time = time.time()
	single_decision_tree_classifier_accuracy = single_decision_tree_classifier.score(testing_image_data_hog, testing_label_data)
	end_time = time.time() - start_time
	print "It took "+ str(end_time) + " to test the data "
# 
	print '\n# printing Accuracy'
	print "\nTesting for Single Decision Tree Classifier with pixels per cell = ("+str(pix)+','+str(pix)+') :'
	print "-------------------------------------------------"
	print "\nSingleDecisionTreeClassifier accuracy for ("+str(pix)+','+str(pix)+") : "+ str(single_decision_tree_classifier_accuracy)

	return single_decision_tree_classifier_accuracy
开发者ID:luckysahani,项目名称:Machine-Learning-Decision-trees-and-forests,代码行数:30,代码来源:1.py

示例5: transfer_to_hog

def transfer_to_hog(file_name):
	if file_name =='train':
		csv_file_object = csv.reader(open('train.csv', 'rb'))
		hog_write = csv.writer(open('hog.csv', 'wb'))      
		header = csv_file_object.next()
		imsize=(28,28)                                                                                  
		for row in csv_file_object:
			hogr=[]
			hogr.append(int(row[0]))
			image=map(int,row[1:])
			image=np.reshape(image, imsize)
			fd= hog(image, orientations=8, pixels_per_cell=(4, 4),
                    cells_per_block=(1, 1))
			hogr.extend(fd)
			fd= hog(image[2:26,2:26], orientations=8, pixels_per_cell=(4, 4),
                    cells_per_block=(1, 1))
			hogr.extend(fd)
			hog_write.writerow(hogr)
	if file_name =='test':
		csv_file_object = csv.reader(open('test.csv', 'rb'))
		hog_write = csv.writer(open('hog_test.csv', 'wb'))      
		header = csv_file_object.next()
		imsize=(28,28)                                                                                  
		for row in csv_file_object:
			hogr=[]
			image=map(int,row)
			image=np.reshape(image, imsize)
			fd= hog(image, orientations=8, pixels_per_cell=(4, 4),
                    cells_per_block=(1, 1))
			hogr.extend(fd)
			fd= hog(image[2:26,2:26], orientations=8, pixels_per_cell=(4, 4),
                    cells_per_block=(1, 1))
			hogr.extend(fd)
			hog_write.writerow(hogr)
开发者ID:yunhaolucky,项目名称:digit_recog,代码行数:34,代码来源:input_helper.py

示例6: extract

    def extract(self, image):
        features = np.array([])
        vec = []
        if 'raw' in self.features:
            vec = image.flatten()
        features = np.append(features, vec)
        vec = []
        if 'textons' in self.features:
            import gen_histogram as tx
            vec = np.array(tx.histogram(image, self.centers))
        features = np.append(features, vec)
        vec = []
        if 'hog' in self.features:
            vec = hog(image, cells_per_block=(3, 3))
            vec = np.append(vec, hog(image, cells_per_block=(4, 4)))
            vec = np.append(vec, hog(image, cells_per_block=(1, 1)))
            vec = np.append(vec, hog(image, cells_per_block=(2, 2)))
        features = np.append(features, vec)
        vec = []
        if 'lbp' in self.features:
            vec = local_binary_pattern(image, 24, 3).flatten()
        features = np.append(features, vec)
        vec = []
        if 'daisy' in self.features:
            vec = daisy(image).flatten()
        features = np.append(features, vec)

        return features
开发者ID:DuongHoangThuy,项目名称:iris-recognition-1,代码行数:28,代码来源:features.py

示例7: get_hog_features

def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):
    """
    Extract the HOG features from the input image.
        Parameters:
            img: Input image.
            orient: Number of orientation bins.
            pix_per_cell: Size (in pixels) of a cell.
            cell_per_block: Number of cells in each block.
            vis: Visualization flag.
            feature_vec: Return the data as a feature vector.
    """
    if vis == True:
        features, hog_image = hog(img, orientations=orient, 
                                  pixels_per_cell=(pix_per_cell, pix_per_cell),
                                  cells_per_block=(cell_per_block, cell_per_block), 
                                  transform_sqrt=True, 
                                  visualise=vis, feature_vector=feature_vec)
        return features, hog_image
    else:
        features = hog(img, orientations=orient, 
                       pixels_per_cell=(pix_per_cell, pix_per_cell),
                       cells_per_block=(cell_per_block, cell_per_block), 
                       transform_sqrt=True, 
                       visualise=vis, feature_vector=feature_vec)
        return features
开发者ID:muhammedabdelnasser,项目名称:Vehicle-Detection-and-Tracking,代码行数:25,代码来源:VehicleDetectionModel.py

示例8: classifier

def classifier():
	train_images,train_labels = processData('train')
	test_images,test_labels   = processData('test')
	# sss = StratifiedShuffleSplit(train_labels, 3, test_size=0.5, random_state=0)
	# print sss
	# raw_input()
	# for train_index, test_index in sss:
	# 	print train_labels[train_index]
	# 	raw_input()


	train_images, train_labels = refineSets(train_images, train_labels, 1111)
	test_images, test_labels = refineSets(test_images, test_labels, 111)
	
	hog_train_images = [ hog(image) for image in train_images]
	hog_test_images = [ hog(image) for image in test_images]

	print 'Accuracy on test data : '
		#DistanceMetric.get_metric(metric)

	forest_sizes = [100,200,300,400,500]
	for size in forest_sizes:
		clf = RandomForestClassifier(n_estimators=size,criterion='entropy',n_jobs=-1)
		clf.fit(hog_train_images,train_labels)

		print size,"->",clf.score(hog_test_images,test_labels)
开发者ID:aayushmudgal,项目名称:CS771-MLT,代码行数:26,代码来源:df.py

示例9: getHOG

def getHOG(imgs, ori=8, ppc=(4, 4), cpb=(4, 4), vis=True):
    # determine the shape of the output
    if vis:
        fd, im = hog(imgs[0, :], orientations=ori, pixels_per_cell=ppc, cells_per_block=cpb, visualise=vis)
        imgs2 = imgs
    else:
        fd = hog(imgs[0, :], orientations=ori, pixels_per_cell=ppc, cells_per_block=cpb, visualise=vis)

    hogs = np.zeros((imgs.shape[0], fd.shape[0]))
    # HOG
    for i in range(imgs.shape[0]):
        # zimgs[i,:] = exposure.equalize_hist(imgs[i,:])
        # imgs[i,:] = rank.equalize(imgs[i,:]/255,selem=disk(0))
        # plt.imshow(imgs[i,:]),plt.show()
        if vis:
            hogs[i, :], imgs2[i] = hog(
                imgs[i, :], orientations=ori, pixels_per_cell=ppc, cells_per_block=cpb, visualise=vis
            )
        else:
            hogs[i, :] = hog(imgs[i, :], orientations=ori, pixels_per_cell=ppc, cells_per_block=cpb, visualise=vis)
        sys.stdout.write("\rIteration {0}/{1}".format((i + 1), imgs.shape[0]))
        sys.stdout.flush()
    mean = np.mean(hogs, axis=0)
    hogs -= mean

    if vis:
        return hogs, imgs2
    else:
        return hogs
开发者ID:pjh5,项目名称:spring2016_comp540,代码行数:29,代码来源:utils2.py

示例10: extractHOG

def extractHOG(inputimg, showHOG=False):

    # convert image to single-channel, grayscale
    image = color.rgb2gray(inputimg)

    #extract HOG features
    if showHOG:
        fd, hog_image = feature.hog(image, orientations=36,
                                    pixels_per_cell=(16, 16),
                                    cells_per_block=(2, 2),
                                    visualise=showHOG)
    else:
        fd = feature.hog(image, orientations=8, pixels_per_cell=(16, 16),
                         cells_per_block=(1, 1), visualise=showHOG)
    if(showHOG):
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)
        ax1.axis('off')
        ax1.imshow(image, cmap=plt.cm.gray)
        ax1.set_title('Input image')
        ax1.set_adjustable('box-forced')
        # Rescale histogram for better display
        hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
        ax2.axis('off')
        ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
        ax2.set_title('Histogram of Oriented Gradients')
        ax1.set_adjustable('box-forced')
        plt.show()
    return fd
开发者ID:Gozel,项目名称:autoemoji,代码行数:28,代码来源:autoemoji.py

示例11: test_img

def test_img(svm, img_path, scales, subwindow=None):
    base_img = cv2.imread(img_path)

    prev_img_path = utils.get_prev_img(img_path)
    base_prev_img = cv2.imread(prev_img_path)

    windows = []
    windows_features = []
    sc = []

    for scale in scales:
        img = cv2.resize(base_img, (0, 0), fx=scale, fy=scale)
        img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        prev_img = cv2.resize(base_prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_bw = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        height, width, _ = img.shape

        flow = cv2.calcOpticalFlowFarneback(prev_img_bw, img_bw, 0.5, 3, 15, 3, 5, 1.2, 0)

        hsv = np.zeros_like(img)
        hsv[..., 1] = 255

        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180/ np.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        flowRGB = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
        flow_bw = cv2.cvtColor(flowRGB, cv2.COLOR_BGR2GRAY)

        if subwindow == None:
            nsx, nsy, nw, nh = 0, 0, width, height
        else:
            nsx, nsy, nw, nh = utils.getDetectionWindow(subwindow, width, height, scale)

        for x in range(nsx, nsx + nw - 64, 16):
            for y in range(nsy, nsy + nh - 128, 16):
                img_crop = img_bw[y:y + 128, x:x + 64]
                hog_gray = hog(img_crop, orientations=9, pixels_per_cell=(8, 8),
                         cells_per_block=(2, 2), visualise=False)

                flow_crop = flow_bw[y:y + 128, x:x + 64]
                fd_flow = hog(flow_crop, orientations=9, pixels_per_cell=(8, 8),
                              cells_per_block=(2, 2), visualise=False)
                fd = hog_gray + fd_flow

                windows.append((x, y))
                windows_features.append(fd)
                sc.append(scale)

    classes = svm.predict(windows_features)

    results = []
    for i in range(0, len(windows)):
            if classes[i] == 1:
                scale = sc[i]
                results.append((int(windows[i][0] / scale), int(windows[i][1] / scale), int(64 / scale), int(128 / scale)))
    return results
开发者ID:mataevs,项目名称:persondetector,代码行数:58,代码来源:tester_hog.py

示例12: test_hog_output_equivariance_multichannel

def test_hog_output_equivariance_multichannel():
    img = data.astronaut()
    img[:, :, (1, 2)] = 0
    hog_ref = feature.hog(img, multichannel=True, block_norm='L1')

    for n in (1, 2):
        hog_fact = feature.hog(np.roll(img, n, axis=2), multichannel=True,
                               block_norm='L1')
        assert_almost_equal(hog_ref, hog_fact)
开发者ID:jarrodmillman,项目名称:scikit-image,代码行数:9,代码来源:test_hog.py

示例13: get_spat_arrng_ftrs

def get_spat_arrng_ftrs(gray_img):
	# resize img to 600 * 600
	resized_img = transform.resize(gray_img, (600,600))
	left = resized_img.transpose()[:300].transpose()
	right = resized_img.transpose()[300:].transpose()

	I_anti = np.identity(600)[::-1] # anti - diagonal identity matrix
	inner = feature.hog(left) - feature.hog(I_anti.dot(right))

	return dict(symmetry = np.linalg.norm(inner))
开发者ID:smenon8,项目名称:AnimalWildlifeEstimator,代码行数:10,代码来源:ExtractBtyFtrs.py

示例14: test_img_new

def test_img_new(svm, img_path, scales, subwindow=None):
    base_img = cv2.imread(img_path)

    prev_img_path = utils.get_prev_img(img_path)
    base_prev_img = cv2.imread(prev_img_path)

    windows = []
    windows_features = []
    sc = []

    for scale in scales:
        img = cv2.resize(base_img, (0, 0), fx=scale, fy=scale)
        img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        prev_img = cv2.resize(base_prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_bw = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        height, width, _ = img.shape

        flow = cv2.calcOpticalFlowFarneback(prev_img_bw, img_bw, 0.5, 3, 15, 3, 5, 1.2, 0)

        flowx, flowy = flow[..., 0], flow[..., 1]

        if subwindow == None:
            nsx, nsy, nw, nh = 0, 0, width, height
        else:
            nsx, nsy, nw, nh = utils.getDetectionWindow(subwindow, width, height, scale)

        for x in range(nsx, nsx + nw - 64, 16):
            for y in range(nsy, nsy + nh - 128, 16):
                img_crop = img_bw[y:y + 128, x:x + 64]
                hog_gray = hog(img_crop, orientations=9, pixels_per_cell=(8, 8),
                         cells_per_block=(2, 2), visualise=False)

                flowx_crop, flowy_crop = flowx[y:y+128, x:x+64], flowy[y:y+128, x:x+64]

                hog_flow_x = hog(flowx_crop, orientations=9, pixels_per_cell=(8, 8),
                                 cells_per_block=(2, 2), visualise=False)
                hog_flow_y = hog(flowy_crop, orientations=9, pixels_per_cell=(8, 8),
                                 cells_per_block=(2, 2), visualise=False)

                fd = numpy.concatenate((hog_gray, hog_flow_x, hog_flow_y))

                windows.append((x, y))
                windows_features.append(fd)
                sc.append(scale)

    classes = svm.predict(windows_features)

    results = []
    for i in range(0, len(windows)):
            if classes[i] == 1:
                scale = sc[i]
                results.append((int(windows[i][0] / scale), int(windows[i][1] / scale), int(64 / scale), int(128 / scale)))
    return results
开发者ID:mataevs,项目名称:persondetector,代码行数:55,代码来源:tester_hog.py

示例15: get_set

def get_set(metadataFile, classType):
    set = []

    with open(metadataFile, "r") as f:
        entries = f.readlines()

    for entry in entries:
        entry = entry.split()
        filePath = entry[0]
        x, y, scale = int(entry[1]), int(entry[2]), float(entry[3])

        img = cv2.imread(filePath)
        img = cv2.resize(img, (0, 0), fx=scale, fy=scale)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_gray_crop = img_gray[y:y+128, x:x+64]

        hog_gray = hog(img_gray_crop, orientations=9, pixels_per_cell=(8, 8),
                         cells_per_block=(2, 2), visualise=False)

        prevFilePath = utils.get_prev_img(filePath)


        prev_img = cv2.imread(prevFilePath)
        prev_img = cv2.resize(prev_img, (0, 0), fx=scale, fy=scale)
        prev_img_gray = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)

        flow = cv2.calcOpticalFlowFarneback(prev_img_gray, img_gray, 0.5, 3, 15, 3, 5, 1.2, 0)


        # flowx, flowy = flow[..., 0], flow[..., 1]
        # flowx_crop, flowy_crop = flowx[y:y+128, x:x+64], flowy[y:y+128, x:x+64]
        #
        # hog_flow_x = hog(flowx_crop, orientations=9, pixels_per_cell=(8, 8),
        #                  cells_per_block=(2, 2), visualise=False)
        # hog_flow_y = hog(flowy_crop, orientations=9, pixels_per_cell=(8, 8),
        #                  cells_per_block=(2, 2), visualise=False)

        hsv = numpy.zeros_like(img)
        hsv[..., 1] = 255

        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180/ numpy.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        flowRGB = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
        flow_gray = cv2.cvtColor(flowRGB, cv2.COLOR_BGR2GRAY)

        flow_gray_crop = flow_gray[y:y+128, x:x+64]

        hog_flow = hog(flow_gray_crop, orientations=9, pixels_per_cell=(8, 8),
                         cells_per_block=(2, 2), visualise=False)

        desc = hog_gray + hog_flow

        set.append(desc)
    return set, [classType] * len(entries)
开发者ID:mataevs,项目名称:persondetector,代码行数:55,代码来源:hog_train_set.py


注:本文中的skimage.feature.hog函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。