本文整理汇总了Python中cv2.TERM_CRITERIA_COUNT属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.TERM_CRITERIA_COUNT属性的具体用法?Python cv2.TERM_CRITERIA_COUNT怎么用?Python cv2.TERM_CRITERIA_COUNT使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.TERM_CRITERIA_COUNT属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self, min_area=400, min_shift2=5):
"""Constructor
This method initializes the multiple-objects tracking algorithm.
:param min_area: Minimum area for a proto-object contour to be
considered a real object
:param min_shift2: Minimum distance for a proto-object to drift
from frame to frame ot be considered a real
object
"""
self.object_roi = []
self.object_box = []
self.min_cnt_area = min_area
self.min_shift2 = min_shift2
# Setup the termination criteria, either 100 iteration or move by at
# least 1 pt
self.term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
100, 1)
示例2: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self):
self.of_params = {'st_pars': dict(maxCorners=200, qualityLevel=0.2,
minDistance=7, blockSize=21),
'lk_pars': dict(winSize=(20, 20), maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0))}
self.extrapolation = "linear"
self.warper = "affine"
self.input_data = None
self.scaler = RYScaler
self.inverse_scaler = inv_RYScaler
self.lead_steps = 12
示例3: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self, piscopeController):
Thread.__init__(self)
self.mutex = Lock()
self.piscopeController = piscopeController
self.setDaemon(True) # terminate on exit
self.status = "Initial"
self.reset()
self.lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
self.feature_params = dict( maxCorners = 5,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
示例4: align
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def align(self, blob):
"""Aligns the positions of active and inactive tracks depending on camera motion."""
if self.im_index > 0:
im1 = np.transpose(self.last_image.cpu().numpy(), (1, 2, 0))
im2 = np.transpose(blob['img'][0].cpu().numpy(), (1, 2, 0))
im1_gray = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)
im2_gray = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)
warp_matrix = np.eye(2, 3, dtype=np.float32)
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, self.number_of_iterations, self.termination_eps)
cc, warp_matrix = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, self.warp_mode, criteria)
warp_matrix = torch.from_numpy(warp_matrix)
for t in self.tracks:
t.pos = warp_pos(t.pos, warp_matrix)
# t.pos = clip_boxes(Variable(pos), blob['im_info'][0][:2]).data
if self.do_reid:
for t in self.inactive_tracks:
t.pos = warp_pos(t.pos, warp_matrix)
if self.motion_model_cfg['enabled']:
for t in self.tracks:
for i in range(len(t.last_pos)):
t.last_pos[i] = warp_pos(t.last_pos[i], warp_matrix)
示例5: feature_tracking
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def feature_tracking(img1,img2, points1,points2,status): #track matching features
err = np.array([])
winSize = (15,15)
maxLevel = 3
termcriteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))
cv2.calcOpticalFlowPyrLK(img1, img2, points1, points2, status, err, winSize, maxLevel, termcriteria, 0, 0.001)
indexcorrection = 0
#remove bad points
for i in range(len(status)):
pt = points2[i - indexcorrection]
if (status[i]==0 or pt[0,0]<0 or pt[0,1]<0):
if pt[0,0]<0 or pt[0,1]<0:
status[i]=0
np.delete(points1, i-indexcorrection)
np.delete(points2, i-indexcorrection)
indexcorrection+=1
示例6: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self):
self.term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
self.tracks = []
self.current_track = 0
示例7: train
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def train(self, samples, responses):
sample_n, var_n = samples.shape
new_responses = self.unroll_responses(responses).reshape(-1, self.class_n)
layer_sizes = np.int32([var_n, 100, 100, self.class_n])
self.model.setLayerSizes(layer_sizes)
self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP)
self.model.setBackpropMomentumScale(0.0)
self.model.setBackpropWeightScale(0.001)
self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 20, 0.01))
self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1)
self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses))
示例8: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self):
self.track_len = 5
self.tracks = []
self.lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
self.feature_params = dict(maxCorners=500,
qualityLevel=0.3,
minDistance=7,
blockSize=7)
示例9: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self, videoSource, featurePtMask=None, verbosity=0):
# cap the length of optical flow tracks
self.maxTrackLength = 10
# detect feature points in intervals of frames; adds robustness for
# when feature points disappear.
self.detectionInterval = 5
# Params for Shi-Tomasi corner (feature point) detection
self.featureParams = dict(
maxCorners=500,
qualityLevel=0.3,
minDistance=7,
blockSize=7
)
# Params for Lucas-Kanade optical flow
self.lkParams = dict(
winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
)
# # Alternatively use a fast feature detector
# self.fast = cv2.FastFeatureDetector_create(500)
self.verbosity = verbosity
(self.videoStream,
self.width,
self.height,
self.featurePtMask) = self._initializeCamera(videoSource)
示例10: estimate_loop
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def estimate_loop(self):
opt_flow_params = dict(winSize=(15,15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
while not self.stopped:
frame = self.video_feed.read()
frame_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Pull data from each human and bodypart -> put into np array w/shape (num_humans, 18, 2) and reshape to (num_humans 18, 1, 2) for use by optical flow
with self.lock:
all_human_points = np.asarray([np.asarray([[[body_part.x * self.frame_shape[1], body_part.y * self.frame_shape[0]]] for key, body_part in human.body_parts.iteritems()], dtype=np.float32) for human in self.humans])
for idx, human_points in enumerate(all_human_points):
p1, st, err = cv2.calcOpticalFlowPyrLK(self.old_frame_grey, frame_grey, human_points, None, **opt_flow_params)
self.repack_humans(p1, idx)
# Grab the points that have gone out of frame
oof_points = p1[st!=1]
if oof_points.shape != 0:
# Get all the matches
tmp = np.isin(human_points, oof_points)
# Get the indexes of those matches
msng_idxz = [msng for msng in range(len(human_points)) if tmp[msng].all()]
#print "msng_idxz %s" % str(msng_idxz)
cur_part_exist = self.humans[idx].body_parts.keys()
for foo_idx in range(len(msng_idxz)):
del self.humans[idx].body_parts[cur_part_exist[msng_idxz[foo_idx]]]
if len(self.humans[idx].body_parts.keys()) == 0:
del self.humans[idx]
self.old_frame = frame
self.old_frame_grey = frame_grey.copy()
示例11: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self, flags_handler, points_to_track, input_image):
self.logger = logging.getLogger('tracker_handler')
self.flags_handler = flags_handler
self.points_to_track = points_to_track
self._input_image = input_image
self._old_gray = None
self._p0 = None
self.lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
self.track(self.points_to_track, self._input_image)
示例12: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self, chn, c, e, **kwargs):
self.chn = chn
self.m = cv2.ml.SVM_create()
self.m.setType(cv2.ml.SVM_EPS_SVR)
self.m.setC(c)
self.m.setDegree(1)
self.m.setP(e)
max_iter = kwargs.get('max_iter', 10000)
self.m.setTermCriteria(
(cv2.TERM_CRITERIA_COUNT + cv2.TERM_CRITERIA_EPS, max_iter, 1e-09))
示例13: processImage
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def processImage(fn):
print('processing %s... ' % fn)
img = cv.imread(fn, 0)
if img is None:
print("Failed to load", fn)
return None
assert w == img.shape[1] and h == img.shape[0], ("size: %d x %d ... " % (img.shape[1], img.shape[0]))
found, corners = cv.findChessboardCorners(img, pattern_size)
if found:
term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)
cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
_path, name, _ext = splitfn(fn)
outfile = os.path.join(debug_dir, name + '_chess.png')
cv.imwrite(outfile, vis)
if not found:
print('chessboard not found')
return None
print(' %s... OK' % fn)
return (corners.reshape(-1, 2), pattern_points)
示例14: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def __init__(self, num_features=kMinNumFeatureDefault,
num_levels = 3, # number of pyramid levels for detector
scale_factor = 1.2, # detection scale factor (if it can be set, otherwise it is automatically computed)
detector_type = FeatureDetectorTypes.FAST,
descriptor_type = FeatureDescriptorTypes.NONE,
match_ratio_test = kRatioTest,
tracker_type = FeatureTrackerTypes.LK):
super().__init__(num_features=num_features,
num_levels=num_levels,
scale_factor=scale_factor,
detector_type=detector_type,
descriptor_type=descriptor_type,
tracker_type=tracker_type)
self.feature_manager = feature_manager_factory(num_features=num_features,
num_levels=num_levels,
scale_factor=scale_factor,
detector_type=detector_type,
descriptor_type=descriptor_type)
#if num_levels < 3:
# Printer.green('LkFeatureTracker: forcing at least 3 levels on LK pyr optic flow')
# num_levels = 3
optic_flow_num_levels = max(kLkPyrOpticFlowNumLevelsMin,num_levels)
Printer.green('LkFeatureTracker: num levels on LK pyr optic flow: ', optic_flow_num_levels)
# we use LK pyr optic flow for matching
self.lk_params = dict(winSize = (21, 21),
maxLevel = optic_flow_num_levels,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))
# out: keypoints and empty descriptors
示例15: test_features
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import TERM_CRITERIA_COUNT [as 别名]
def test_features():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
# r, h, c, w = 200, 100, 200, 100
# track_window = (c, r, w, h)
# oldimg = cv2.imread('base1.png')
# roi = oldimg[r:r+h, c:c+w]
# hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# mask = cv2.inRange(hsv_roi, 0, 255)
# roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
# cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while True:
try:
w, h = d._screen.shape[:2]
img = cv2.resize(d._screen, (h/2, w/2))
cv2.imshow('preview', img)
hist = cv2.calcHist([img], [0], None, [256], [0,256])
plt.plot(plt.hist(hist.ravel(), 256))
plt.show()
# if img.shape == oldimg.shape:
# # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
# # x, y, w, h = track_window
# cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
# cv2.imshow('preview', img)
# # cv2.imshow('preview', img)
cv2.waitKey(1)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')