當前位置: 首頁>>代碼示例>>Python>>正文


Python detector.Detector方法代碼示例

本文整理匯總了Python中detector.Detector方法的典型用法代碼示例。如果您正苦於以下問題:Python detector.Detector方法的具體用法?Python detector.Detector怎麽用?Python detector.Detector使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在detector的用法示例。


在下文中一共展示了detector.Detector方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: extract_componnets

# 需要導入模塊: import detector [as 別名]
# 或者: from detector import Detector [as 別名]
def extract_componnets(main, input_path):
    from extract_apk import COMPONENTS_SUFFIX, load_extract_result, extract_apk
    # expected components file
    expected_outname_exists = False
    expected_outname = None
    if main.comp_sig_load_dirs:
        for d in main.comp_sig_load_dirs:
            expected_outname = join(d, basename(input_path) + COMPONENTS_SUFFIX)
            if os.path.exists(expected_outname):
                expected_outname_exists = True
                break
    if expected_outname_exists:
        extracted = load_extract_result(proto_path=expected_outname, binary=False)
    else:
        from detector import Detector
        extract_main = Detector(mode="ApkExtract")
        if main.comp_sig_load_dirs:
            extract_main.RESULT_DIR = main.comp_sig_load_dirs[0]
        # load the extracted components, but skip the step to store them in redis.
        extracted = extract_apk(main=extract_main, input_path=input_path, load_result=True, store_redis=False)
    return extracted 
開發者ID:osssanitizer,項目名稱:osspolice,代碼行數:23,代碼來源:searching_apk.py

示例2: main

# 需要導入模塊: import detector [as 別名]
# 或者: from detector import Detector [as 別名]
def main(argv=None):  # pylint: disable=unused-argument
    assert args.detect or args.segment, "Either detect or segment should be True"
    assert args.ckpt > 0, "Specify the number of checkpoint"
    net = ResNet(config=net_config, depth=50, training=False)
    loader = Loader(osp.join(EVAL_DIR, 'demodemo'))


    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        detector = Detector(sess, net, loader, net_config, no_gt=args.no_seg_gt,
                            folder=osp.join(loader.folder, 'output'))
        detector.restore_from_ckpt(args.ckpt)
        for name in loader.get_filenames():
            image = loader.load_image(name)
            h, w = image.shape[:2]
            print('Processing {}'.format(name + loader.data_format))
            detector.feed_forward(img=image, name=name, w=w, h=h, draw=True,
                                  seg_gt=None, gt_bboxes=None, gt_cats=None)
    print('Done') 
開發者ID:dvornikita,項目名稱:blitznet,代碼行數:21,代碼來源:demo.py

示例3: create_network

# 需要導入模塊: import detector [as 別名]
# 或者: from detector import Detector [as 別名]
def create_network(self,state_dim,action_dim):
        layer1_size = LAYER1_SIZE
        layer2_size = LAYER2_SIZE

        state_input = tf.placeholder("float",[None,state_dim])
        program_order = tf.placeholder("float",[None,4]);
        self.program_order = program_order;
        #detector
        self.detector=Detector(self.sess,state_dim,5,15,state_input,"_action");
        Theta=self.detector.Theta;
        detector_params=self.detector.net;
        #program
        self.program=Program(self.sess,state_dim,5,15,Theta,program_order,"_action");
        p=self.program.p;
        #message_passing
        self.message_passing=Message_passing(self.sess,state_dim,5,15,p,state_input,150,64,64,"_action");
        state_input2 = self.message_passing.state_output;
        message_passing_params = self.message_passing.net;
        #get h
        state_input2 = tf.reshape(state_input2,[-1,5,150]);
        state_input2 = tf.unstack(state_input2,5,1);
        p=tf.unstack(p,5,1);
        h=0;
        for i in range(5):
          h+=tf.stack([p[i]]*150,1)*state_input2[i];

        #action net
        W1 = self.variable([150,action_dim],150)
        b1 = self.variable([action_dim],150)
        action_output=tf.tanh(tf.matmul(tf.tanh(h),W1)+b1);
        params = detector_params+message_passing_params+[W1,b1];

        return state_input,action_output,params,is_training 
開發者ID:jsikyoon,項目名稱:programmable-agents_tensorflow,代碼行數:35,代碼來源:actor_network.py

示例4: create_q_network

# 需要導入模塊: import detector [as 別名]
# 或者: from detector import Detector [as 別名]
def create_q_network(self,state_dim,action_dim):
        # the layer size could be changed
        layer1_size = LAYER1_SIZE
        layer2_size = LAYER2_SIZE

        state_input = tf.placeholder("float",[None,state_dim])
        program_order = tf.placeholder("float",[None,4]);
        self.program_order = program_order;
        #detector
        self.detector=Detector(self.sess,state_dim,5,15,state_input,"_critic");
        Theta=self.detector.Theta;
        detector_params=self.detector.net;
        #program
        self.program=Program(self.sess,state_dim,5,15,Theta,program_order,"_critic");
        p=self.program.p;
        #message_passing
        self.message_passing=Message_passing(self.sess,state_dim,5,15,p,state_input,150,64,64,"_critic");
        state_input2 = self.message_passing.state_output;
        message_passing_params = self.message_passing.net;
        #get h
        state_input2 = tf.reshape(state_input2,[-1,5,150]);
        state_input2 = tf.unstack(state_input2,5,1);
        p=tf.unstack(p,5,1);
        h=0;
        for i in range(5):
          h+=tf.stack([p[i]]*150,1)*state_input2[i];
        action_input = tf.placeholder("float",[None,action_dim])

        W1 = self.variable([action_dim,150],action_dim)
        b1 = self.variable([150],action_dim)
        W2 = tf.Variable(tf.random_uniform([150,1],-3e-3,3e-3))
        b2 = tf.Variable(tf.random_uniform([1],-3e-3,3e-3))
        q_value_output = tf.matmul(tf.tanh(h+tf.matmul(action_input,W1)+b1),W2)+b2;
        params = detector_params+message_passing_params+[W1,b1,W2,b2];
        
        return state_input,action_input,q_value_output,params 
開發者ID:jsikyoon,項目名稱:programmable-agents_tensorflow,代碼行數:38,代碼來源:critic_network.py

示例5: run_demo

# 需要導入模塊: import detector [as 別名]
# 或者: from detector import Detector [as 別名]
def run_demo(args):
    ie = IECore()
    detector_person = Detector(ie, path_to_model_xml=args.model_od,
                              device=args.device,
                              label_class=args.person_label)

    single_human_pose_estimator = HumanPoseEstimator(ie, path_to_model_xml=args.model_hpe,
                                                  device=args.device)
    if args.input != '':
        img = cv2.imread(args.input[0], cv2.IMREAD_COLOR)
        frames_reader, delay = (VideoReader(args.input), 1) if img is None else (ImageReader(args.input), 0)
    else:
        raise ValueError('--input has to be set')

    for frame in frames_reader:
        bboxes = detector_person.detect(frame)
        human_poses = [single_human_pose_estimator.estimate(frame, bbox) for bbox in bboxes]

        colors = [(0, 0, 255),
                  (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0),
                  (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0),
                  (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0),
                  (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0)]

        for pose, bbox in zip(human_poses, bboxes):
            cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (255, 0, 0), 2)
            for id_kpt, kpt in enumerate(pose):
                cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, colors[id_kpt], -1)

        cv2.putText(frame, 'summary: {:.1f} FPS (estimation: {:.1f} FPS / detection: {:.1f} FPS)'.format(
            float(1 / (detector_person.infer_time + single_human_pose_estimator.infer_time * len(human_poses))),
            float(1 / single_human_pose_estimator.infer_time),
            float(1 / detector_person.infer_time)), (5, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 200))
        if args.no_show:
            continue
        cv2.imshow('Human Pose Estimation Demo', frame)
        key = cv2.waitKey(delay)
        if key == 27:
            return 
開發者ID:opencv,項目名稱:open_model_zoo,代碼行數:41,代碼來源:single_human_pose_estimation_demo.py

示例6: main

# 需要導入模塊: import detector [as 別名]
# 或者: from detector import Detector [as 別名]
def main():
    args = build_argparser().parse_args()

    ie = IECore()
    detector = Detector(ie, args.model, args.prob_threshold, args.device)

    img = cv2.imread(args.input[0], cv2.IMREAD_COLOR)
    frames_reader, delay = (VideoReader(args.input), 1) if img is None else (ImageReader(args.input), 0)

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    for frame in frames_reader:
        detections = detector.detect(frame)
        for det in detections:
            xmin, ymin, xmax, ymax = det[:4].astype(np.int)
            xmin = max(0, xmin)
            ymin = max(0, ymin)
            xmax = min(frame.shape[1], xmax)
            ymax = min(frame.shape[0], ymax)
            class_id = det[5]
            det_label = labels_map[int(class_id)] if labels_map else str(int(class_id))
            color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 3, 255))
            cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
            cv2.putText(frame, det_label + ' ' + str(round(det[4] * 100, 1)) + ' %', (xmin, ymin - 7),
                         cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)

        cv2.putText(frame, 'summary: {:.1f} FPS'.format(
            float(1 / (detector.infer_time * len(detections)))), (5, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 200))
        if args.no_show:
            continue
        cv2.imshow('CenterNet Detection Demo', frame)
        key = cv2.waitKey(delay)
        if key == 27:
            return 
開發者ID:opencv,項目名稱:open_model_zoo,代碼行數:40,代碼來源:object_detection_demo_centernet.py

示例7: search_apk

# 需要導入模塊: import detector [as 別名]
# 或者: from detector import Detector [as 別名]
def search_apk(main, input_path):
    global logger, stats_logger
    logger = main.logger
    stats_logger = main.stats_logger

    search_results = {}

    # For native search, get the native components within this app and search them one by one
    logger.info("searching the native part of %s now!", input_path)
    so_paths = extract_componnets(main, input_path)["so"]
    logger.info("found %d native libraries!", len(so_paths))
    if len(so_paths):
        from searching import search_library
        for lib_path in so_paths:
            logger.info("searching the native library %s of %s now!", lib_path, input_path)
            if main.MODE == "Celery":
                native_main = main
            else:
                from detector import Detector
                native_main = Detector(mode="Searching")
            native_result = search_library(main=native_main, lib_path=lib_path)
            search_results[lib_path] = native_result

    # For java search, directly invoke search java worker
    logger.info("searching the java part of %s now!", input_path)
    from searching_java import search_classes
    if main.MODE == "Celery":
        java_main = main
    else:
        from detector import Detector
        java_main = Detector(mode="JavaSearching")
    java_result = search_classes(main=java_main, input_path=input_path, input_type='apk')
    search_results[input_path] = java_result

    # Map the apk to so files
    main.rrc.handle().hset(input_path, 'so_paths', so_paths)
    logger.info("Finished querying app: %s, and the results are: %s", input_path, search_results)


###########################################################
# Searcher
########################################################### 
開發者ID:osssanitizer,項目名稱:osspolice,代碼行數:44,代碼來源:searching_apk.py

示例8: main

# 需要導入模塊: import detector [as 別名]
# 或者: from detector import Detector [as 別名]
def main(argv=None):  # pylint: disable=unused-argument
    assert args.ckpt > 0 or args.batch_eval
    assert args.detect or args.segment, "Either detect or segment should be True"
    if args.trunk == 'resnet50':
        net = ResNet
        depth = 50
    if args.trunk == 'resnet101':
        net = ResNet
        depth = 101
    if args.trunk == 'vgg16':
        net = VGG
        depth = 16

    net = net(config=net_config, depth=depth, training=False)

    if args.dataset == 'voc07' or args.dataset == 'voc07+12':
        loader = VOCLoader('07', 'test')
    if args.dataset == 'voc12':
        loader = VOCLoader('12', 'val', segmentation=args.segment)
    if args.dataset == 'coco':
        loader = COCOLoader(args.split)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        detector = Detector(sess, net, loader, net_config, no_gt=args.no_seg_gt)
        if args.dataset == 'coco':
            tester = COCOEval(detector, loader)
        else:
            tester = Evaluation(detector, loader, iou_thresh=args.voc_iou_thresh)
        if not args.batch_eval:
            detector.restore_from_ckpt(args.ckpt)
            tester.evaluate_network(args.ckpt)
        else:
            log.info('Evaluating %s' % args.run_name)
            ckpts_folder = CKPT_ROOT + args.run_name + '/'
            out_file = ckpts_folder + evaluation_logfile

            max_checked = get_last_eval(out_file)
            log.debug("Maximum checked ckpt is %i" % max_checked)
            with open(out_file, 'a') as f:
                start = max(args.min_ckpt, max_checked+1)
                ckpt_files = glob(ckpts_folder + '*.data*')
                folder_has_nums = np.array(list((map(filename2num, ckpt_files))), dtype='int')
                nums_available = sorted(folder_has_nums[folder_has_nums >= start])
                nums_to_eval = [nums_available[-1]]
                for n in reversed(nums_available):
                    if nums_to_eval[-1] - n >= args.step:
                        nums_to_eval.append(n)
                nums_to_eval.reverse()

                for ckpt in nums_to_eval:
                    log.info("Evaluation of ckpt %i" % ckpt)
                    tester.reset()
                    detector.restore_from_ckpt(ckpt)
                    res = tester.evaluate_network(ckpt)
                    f.write(res)
                    f.flush() 
開發者ID:dvornikita,項目名稱:blitznet,代碼行數:59,代碼來源:test.py


注:本文中的detector.Detector方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。