本文整理汇总了Python中tst_scene_render.TestSceneRender类的典型用法代码示例。如果您正苦于以下问题:Python TestSceneRender类的具体用法?Python TestSceneRender怎么用?Python TestSceneRender使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TestSceneRender类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: feature_homography_test
class feature_homography_test(NewOpenCVTests):
render = None
tracker = None
framesCounter = 0
frame = None
def test_feature_homography(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'),
self.get_sample('samples/data/box.png'), noise = 0.5, speed = 0.5)
self.frame = self.render.getNextFrame()
self.tracker = PlaneTracker()
self.tracker.clear()
self.tracker.add_target(self.frame, self.render.getCurrentRect())
while self.framesCounter < 100:
self.framesCounter += 1
tracked = self.tracker.track(self.frame)
if len(tracked) > 0:
tracked = tracked[0]
self.assertGreater(intersectionRate(self.render.getCurrentRect(), np.int32(tracked.quad)), 0.6)
else:
self.assertEqual(0, 1, 'Tracking error')
self.frame = self.render.getNextFrame()
示例2: Cube
class Cube(VideoSynthBase):
def __init__(self, **kw):
super(Cube, self).__init__(**kw)
self.render = TestSceneRender(cv.imread('../data/pca_test1.jpg'), deformation = True, speed = 1)
def read(self, dst=None):
noise = np.zeros(self.render.sceneBg.shape, np.int8)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3)
示例3: Book
class Book(VideoSynthBase):
def __init__(self, **kw):
super(Book, self).__init__(**kw)
backGr = cv.imread('../data/graf1.png')
fgr = cv.imread('../data/box.png')
self.render = TestSceneRender(backGr, fgr, speed = 1)
def read(self, dst=None):
noise = np.zeros(self.render.sceneBg.shape, np.int8)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3)
示例4: test_lk_homography
def test_lk_homography(self):
self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'),
self.get_sample('samples/c/box.png'), noise = 0.1, speed = 1.0)
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.frame0 = frame.copy()
self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
isForegroundHomographyFound = False
if self.p0 is not None:
self.p1 = self.p0
self.gray0 = frame_gray
self.gray1 = frame_gray
currRect = self.render.getCurrentRect()
for (x,y) in self.p0[:,0]:
if isPointInRect((x,y), currRect):
self.numFeaturesInRectOnStart += 1
while self.framesCounter < 200:
self.framesCounter += 1
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if self.p0 is not None:
p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)
self.p1 = p2[trace_status].copy()
self.p0 = self.p0[trace_status].copy()
self.gray1 = frame_gray
if len(self.p0) < 4:
self.p0 = None
continue
H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0)
goodPointsInRect = 0
goodPointsOutsideRect = 0
for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
if good:
if isPointInRect((x1,y1), self.render.getCurrentRect()):
goodPointsInRect += 1
else: goodPointsOutsideRect += 1
if goodPointsOutsideRect < goodPointsInRect:
isForegroundHomographyFound = True
self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6)
else:
p = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
self.assertEqual(isForegroundHomographyFound, True)
示例5: __init__
def __init__(self, **kw):
super(Cube, self).__init__(**kw)
self.render = TestSceneRender(cv.imread('../data/pca_test1.jpg'), deformation = True, speed = 1)
示例6: __init__
def __init__(self, **kw):
super(Book, self).__init__(**kw)
backGr = cv.imread(cv.samples.findFile('graf1.png'))
fgr = cv.imread(cv.samples.findFile('box.png'))
self.render = TestSceneRender(backGr, fgr, speed = 1)
示例7: prepareRender
def prepareRender(self):
self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), deformation = True)
示例8: camshift_test
class camshift_test(NewOpenCVTests):
framesNum = 300
frame = None
selection = None
drag_start = None
show_backproj = False
track_window = None
render = None
errors = 0
def prepareRender(self):
self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), deformation = True)
def runTracker(self):
framesCounter = 0
self.selection = True
xmin, ymin, xmax, ymax = self.render.getCurrentRect()
self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
while True:
framesCounter += 1
self.frame = self.render.getNextFrame()
hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, x1, y1 = self.render.getCurrentRect() + 50
x0 -= 100
y0 -= 100
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX)
self.hist = hist.reshape(-1)
self.selection = False
if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
self.selection = None
prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
_track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit)
trackingRect = np.array(self.track_window)
trackingRect[2] += trackingRect[0]
trackingRect[3] += trackingRect[1]
if intersectionRate(self.render.getCurrentRect(), trackingRect) < 0.4:
self.errors += 1
if framesCounter > self.framesNum:
break
self.assertLess(float(self.errors) / self.framesNum, 0.4)
def test_camshift(self):
self.prepareRender()
self.runTracker()
示例9: test_lk_track
def test_lk_track(self):
self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'), self.get_sample('samples/c/box.png'))
self.runTracker()
示例10: lk_track_test
class lk_track_test(NewOpenCVTests):
track_len = 10
detect_interval = 5
tracks = []
frame_idx = 0
render = None
def test_lk_track(self):
self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'), self.get_sample('samples/c/box.png'))
self.runTracker()
def runTracker(self):
foregroundPointsNum = 0
while True:
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append([(x, y), self.frame_idx])
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
self.tracks = new_tracks
if self.frame_idx % self.detect_interval == 0:
goodTracksCount = 0
for tr in self.tracks:
oldRect = self.render.getRectInTime(self.render.timeStep * tr[0][1])
newRect = self.render.getRectInTime(self.render.timeStep * tr[-1][1])
if isPointInRect(tr[0][0], oldRect) and isPointInRect(tr[-1][0], newRect):
goodTracksCount += 1
if self.frame_idx == self.detect_interval:
foregroundPointsNum = goodTracksCount
fgIndex = float(foregroundPointsNum) / (foregroundPointsNum + 1)
fgRate = float(goodTracksCount) / (len(self.tracks) + 1)
if self.frame_idx > 0:
self.assertGreater(fgIndex, 0.9)
self.assertGreater(fgRate, 0.2)
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([[(x, y), self.frame_idx]])
self.frame_idx += 1
self.prev_gray = frame_gray
if self.frame_idx > 300:
break