本文整理汇总了Python中detector.Detector类的典型用法代码示例。如果您正苦于以下问题:Python Detector类的具体用法?Python Detector怎么用?Python Detector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Detector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DetectorUI
class DetectorUI(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
QtCore.QObject.connect(self.ui.startButton,QtCore.SIGNAL("clicked()"), self.start)
QtCore.QObject.connect(self.ui.pathButton,QtCore.SIGNAL("clicked()"), self.set_source_file_path)
QtCore.QObject.connect(self.ui.patternCheckBox,QtCore.SIGNAL("clicked()"), self.patternClicked)
self.detector = Detector()
def start(self):
if self.detector.isRunning():
self.setWindowTitle('Stopped')
self.detector.stop()
self.ui.startButton.setText("Start")
else:
self.setWindowTitle('Running')
self.detector.start()
self.ui.startButton.setText("Stop")
def set_source_file_path(self):
dir = os.path.dirname(".")
fileName = QtGui.QFileDialog.getOpenFileName (self, "Open data file", dir , "Image Files (*.txt)")
#Todo set the txt file as the data source
self.ui.pathEdit.setText(fileName)
def patternClicked(self):
if self.ui.patternCheckBox.isChecked() :
#output data pattern
print "patern enabeled"
else:
#output data source
print "patern disable"
示例2: __init__
def __init__(self):
super(EventProcessor, self).__init__()
self.message_handler = MessageHandler()
#self.gps = GPS()
Message.init(self.message_handler.plugin.address())
Detector.start_plugins()
Reactor.add_plugin_events()
示例3: __init__
def __init__(self, modelNb=3):
"""Load the tensorflow model of VGG16-GAP trained on caltech
Keyword arguments:
modelNb -- iteration of the model to consider
"""
dataset_path = '/home/cuda/datasets/perso_db/'
trainset_path = dataset_path+'train.pkl'
testset_path = dataset_path+'test.pkl'
weight_path = '../caffe_layers_value.pickle'
model_path = '../models/perso/model-'+str(modelNb)
# load labels
testset = pickle.load( open(testset_path, "rb") )
self.label_dict = testset.keys()
n_labels = len(self.label_dict)
# Initialize some tensorflow variables
batch_size = 1
self.images_tf = tf.placeholder( tf.float32, [None, 224, 224, 3], name="images")
self.labels_tf = tf.placeholder( tf.int64, [None], name='labels')
detector = Detector( weight_path, n_labels )
c1,c2,c3,c4,conv5, self.conv6, gap, self.output = detector.inference( self.images_tf )
self.classmap = detector.get_classmap( self.labels_tf, self.conv6 )
self.sess = tf.InteractiveSession()
saver = tf.train.Saver()
saver.restore( self.sess, model_path )
示例4: run
def run(self):
'''Reads assignments from Assignment_* folders, student numbers from
their Assignment_x folder, generates a zip of each students' code
files and runs the Detector on it'''
assignment_id_map = {}
assignments = self.get_folder_names(self.path)
database = DatabaseManager()
for assignment in assignments:
document_assignment_number = self.get_assignment_number(assignment)
if document_assignment_number not in (6, 5):
continue
if document_assignment_number not in assignment_id_map:
name = 'Assignment %d' % document_assignment_number
assignment_id_map[document_assignment_number] = database.store_assignment(name, name, '2015-01-01')
assignment_number = assignment_id_map[document_assignment_number]
assignment_path = os.path.join(self.path, assignment)
student_numbers = self.get_folder_names(assignment_path)
for student_number in student_numbers:
student_folder = os.path.join(assignment_path, student_number)
output_file = StringIO()
zip = zipfile.ZipFile(output_file, 'w')
for file in self.get_file_names(student_folder):
zip.write(os.path.join(student_folder, file), file)
print('zipping: ' + file)
zip.close()
detector = Detector()
detector.run(output_file, assignment_number,student_number)
示例5: main
def main(argv=None): # pylint: disable=unused-argument
assert args.ckpt > 0 or args.batch_eval
assert args.detect or args.segment, "Either detect or segment should be True"
if args.trunk == 'resnet50':
net = ResNet
depth = 50
if args.trunk == 'resnet101':
net = ResNet
depth = 101
if args.trunk == 'vgg16':
net = VGG
depth = 16
net = net(config=net_config, depth=depth, training=False)
if args.dataset == 'voc07' or args.dataset == 'voc07+12':
loader = VOCLoader('07', 'test')
if args.dataset == 'voc12':
loader = VOCLoader('12', 'val', segmentation=args.segment)
if args.dataset == 'coco':
loader = COCOLoader(args.split)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
detector = Detector(sess, net, loader, net_config, no_gt=args.no_seg_gt)
if args.dataset == 'coco':
tester = COCOEval(detector, loader)
else:
tester = Evaluation(detector, loader, iou_thresh=args.voc_iou_thresh)
if not args.batch_eval:
detector.restore_from_ckpt(args.ckpt)
tester.evaluate_network(args.ckpt)
else:
log.info('Evaluating %s' % args.run_name)
ckpts_folder = CKPT_ROOT + args.run_name + '/'
out_file = ckpts_folder + evaluation_logfile
max_checked = get_last_eval(out_file)
log.debug("Maximum checked ckpt is %i" % max_checked)
with open(out_file, 'a') as f:
start = max(args.min_ckpt, max_checked+1)
ckpt_files = glob(ckpts_folder + '*.data*')
folder_has_nums = np.array(list((map(filename2num, ckpt_files))), dtype='int')
nums_available = sorted(folder_has_nums[folder_has_nums >= start])
nums_to_eval = [nums_available[-1]]
for n in reversed(nums_available):
if nums_to_eval[-1] - n >= args.step:
nums_to_eval.append(n)
nums_to_eval.reverse()
for ckpt in nums_to_eval:
log.info("Evaluation of ckpt %i" % ckpt)
tester.reset()
detector.restore_from_ckpt(ckpt)
res = tester.evaluate_network(ckpt)
f.write(res)
f.flush()
示例6: populateUI
def populateUI(self):
self.availableObjectsList.addItems(Detector.getDefaultAvailableObjects())
for sourceMode in self.__sourceModes:
self.sourceCBox.addItem(sourceMode)
for displayMode in self.__displayModes:
self.displayCBox.addItem(displayMode)
for shapeMode in self.__shapeModes:
self.shapeCBox.addItem(shapeMode)
for fillMode in self.__fillModes:
self.fillCBox.addItem(fillMode)
for bgMode in self.__bgModes:
self.bgCBox.addItem(bgMode)
model = QtGui.QStandardItemModel(self)
func = lambda node, parent: self.populateTree(node, parent)
Detector.getDefaultObjectsTree().map(model, func)
self.objectsTree.setModel(model)
示例7: free_at_position
def free_at_position(new_sudoku, y, x):
"""
This method is returning all available choices
in sudoku at current position.
"""
now_available = Solver.available[:]
init_avail = set(now_available)
first_step_avail = init_avail.intersection(Detector.free_in_column(new_sudoku, x))
second_step_avail = first_step_avail.intersection(Detector.free_in_row(new_sudoku, y))
third_step_avail = second_step_avail.intersection(Detector.free_in_squere3x3(new_sudoku, y, x))
return list(third_step_avail)
示例8: simulate_basic_with_pods
def simulate_basic_with_pods(ptypy_pars_tree=None,sim_pars=None,save=False):
"""
Basic Simulation
"""
p = DEFAULT.copy()
ppt = ptypy_pars_tree
if ppt is not None:
p.update(ppt.get('simulation'))
if sim_pars is not None:
p.update(sim_pars)
P = ptypy.core.Ptycho(ppt,level=1)
# make a data source that has is basicaly empty
P.datasource = make_sim_datasource(P.modelm,p.pos_drift,p.pos_scale,p.pos_noise)
P.modelm.new_data()
u.parallel.barrier()
P.print_stats()
# Propagate and apply psf for simulationg partial coherence (if not done so with modes)
for name,pod in P.pods.iteritems():
if not pod.active: continue
pod.diff += conv(u.abs2(pod.fw(pod.exit)),p.psf)
# Filter storage data similar to a detector.
if p.detector is not None:
Det = Detector(p.detector)
save_dtype = Det.dtype
for ID,Sdiff in P.diff.S.items():
# get the mask storage too although their content will be overriden
Smask = P.mask.S[ID]
dat, mask = Det.filter(Sdiff.data)
if p.frame_size is not None:
hplanes = u.expect2(p.frame_size)-u.expect2(dat.shape[-2:])
dat = u.crop_pad(dat,hplanes,axes=[-2,-1]).astype(dat.dtype)
mask = u.crop_pad(mask,hplanes,axes=[-2,-1]).astype(mask.dtype)
Sdiff.fill(dat)
Smask.fill(mask)
else:
save_dtype = None
if save:
P.modelm.collect_diff_mask_meta(save=save,dtype=save_dtype)
u.parallel.barrier()
return P
示例9: init_detectot
def init_detectot(self):
assert args.detect or args.segment, "Either detect or segment should be True"
assert args.ckpt > 0, "Specify the number of checkpoint"
net = ResNet(config=net_config, depth=50, training=False)
self.loader = Loader(opj(EVAL_DIR, 'demodemo'))
self.detector = Detector(self.sess, net, self.loader, net_config, no_gt=args.no_seg_gt,
folder=opj(self.loader.folder, 'output'))
self.detector.restore_from_ckpt(args.ckpt)
示例10: __init__
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
QtCore.QObject.connect(self.ui.startButton,QtCore.SIGNAL("clicked()"), self.start)
QtCore.QObject.connect(self.ui.pathButton,QtCore.SIGNAL("clicked()"), self.set_source_file_path)
QtCore.QObject.connect(self.ui.patternCheckBox,QtCore.SIGNAL("clicked()"), self.patternClicked)
self.detector = Detector()
示例11: is_person
def is_person(image):
det = Detector(image)
faces = len(det.face())
print "FACE: ", det.drawColors[det.drawn-1 % len(det.drawColors)], faces
uppers = len(det.upper_body())
print "UPPR: ", det.drawColors[det.drawn-1 % len(det.drawColors)], uppers
fulls = len(det.full_body())
print "FULL: ", det.drawColors[det.drawn-1 % len(det.drawColors)], fulls
peds = len(det.pedestrian())
print "PEDS: ", det.drawColors[det.drawn-1 % len(det.drawColors)], peds
det.draw()
det.overlay()
return faces + uppers + fulls + peds
示例12: init_components
def init_components(self, **kwargs):
print('Setting up spectrometer')
source = kwargs.get('source', 'blue')
detector = kwargs.get('detector', 0)
assert isinstance(source, str)
assert isinstance(detector, int)
self.source = Source(source)
self.detector = Detector(detector)
示例13: main
def main(argv=None): # pylint: disable=unused-argument
assert args.detect or args.segment, "Either detect or segment should be True"
assert args.ckpt > 0, "Specify the number of checkpoint"
net = ResNet(config=net_config, depth=50, training=False)
loader = Loader(osp.join(EVAL_DIR, 'demodemo'))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
detector = Detector(sess, net, loader, net_config, no_gt=args.no_seg_gt,
folder=osp.join(loader.folder, 'output'))
detector.restore_from_ckpt(args.ckpt)
for name in loader.get_filenames():
image = loader.load_image(name)
h, w = image.shape[:2]
print('Processing {}'.format(name + loader.data_format))
detector.feed_forward(img=image, name=name, w=w, h=h, draw=True,
seg_gt=None, gt_bboxes=None, gt_cats=None)
print('Done')
示例14: __init__
def __init__(self):
"""
Configuration
"""
# Camera settings
self.FRAME_WIDTH = 341
self.FRAME_HEIGHT = 256
self.flip_camera = True # Mirror image
self.camera = cv2.VideoCapture(1)
# ...you can also use a test video for input
#video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov"
#self.camera = cv2.VideoCapture(video)
#self.skip_input(400) # Skip to an interesting part of the video
if not self.camera.isOpened():
print "couldn't load webcam"
return
#self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH)
#self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT)
self.filters_dir = "filters/" # Filter settings in trackbar
self.filters_file = "filters_default"
# Load filter settings
current_config = self.filters_dir + self.filters_file
self.filters = Filters(current_config)
# No actions will be triggered in test mode
# (can be used to adjust settings at runtime)
self.test_mode = False
# Create a hand detector
# In fact, this is a wrapper for many detectors
# to increase detection confidence
self.detector = Detector(self.filters.config)
# Knowledge base for all detectors
self.kb = KB()
# Create gesture recognizer.
# A gesture consists of a motion and a hand state.
self.gesture = Gesture()
# The action module executes keyboard and mouse commands
self.action = Action()
# Show output of detectors
self.output = Output()
self.run()
示例15: Cropper
class Cropper(object):
"""Cropper"""
def __init__(self):
super(Cropper, self).__init__()
self.detector = Detector()
@staticmethod
def _bounding_rect(faces):
top, left = sys.maxint, sys.maxint
bottom, right = -sys.maxint, -sys.maxint
for (x, y, w, h) in faces:
if x < left:
left = x
if x+w > right:
right = x+w
if y < top:
top = y
if y+h > bottom:
bottom = y+h
return top, left, bottom, right
def crop(self, img, target_width, target_height):
original_height, original_width = img.shape[:2]
faces = self.detector.detect_faces(img)
if len(faces) == 0: # no detected faces
target_center_x = original_width / 2
target_center_y = original_height / 2
else:
top, left, bottom, right = self._bounding_rect(faces)
target_center_x = (left + right) / 2
target_center_y = (top + bottom) / 2
target_left = target_center_x - target_width / 2
target_right = target_left + target_width
target_top = target_center_y - target_height / 2
target_bottom = target_top + target_height
if target_top < 0:
delta = abs(target_top)
target_top += delta
target_bottom += delta
if target_bottom > original_height:
target_bottom = original_height
if target_left < 0:
delta = abs(target_left)
target_left += delta
target_right += delta
if target_right > original_width:
target_right = original_width
return img[target_top:target_bottom, target_left:target_right]