当前位置: 首页>>代码示例>>Python>>正文


Python predict.predict方法代码示例

本文整理汇总了Python中predict.predict方法的典型用法代码示例。如果您正苦于以下问题:Python predict.predict方法的具体用法?Python predict.predict怎么用?Python predict.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在predict的用法示例。


在下文中一共展示了predict.predict方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def main(args):

    model_storage_type = args.model_storage_type
    if (model_storage_type == "local" or model_storage_type == "oss"):
      print ( "The storage type is " + model_storage_type)
    else:
      raise Exception("Only supports storage types like local and OSS")

    if args.job_type == "Predict":
        logging.info("starting the predict job")
        predict(args)

    elif args.job_type == "Train":
        logging.info("starting the train job")
        model = train(args)

        if model is not None:
            logging.info("finish the model training, and start to dump model ")
            model_path = args.model_path
            dump_model(model, model_storage_type, model_path, args)

    elif args.job_type == "All":
        logging.info("starting the train and predict job")

    logging.info("Finish distributed XGBoost job") 
开发者ID:kubeflow,项目名称:xgboost-operator,代码行数:27,代码来源:main.py

示例2: predict

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def predict(input_file):

    config = {"pixel_per_second": 50, "input_shape": [129, 500, 1], "num_classes": 4}
    data_generator = SpectrogramGenerator(input_file, config, shuffle=False, run_only_once=True).get_generator()
    data = [np.divide(image, 255.0) for image in data_generator]
    data = np.stack(data)

    # Model Generation
    probabilities = model.predict(data)
    probabilities = probabilities[3:-5] # ignore first 30 sec and last 50 sec

    classes = np.argmax(probabilities, axis=1)
    average_prob = np.mean(probabilities, axis=0)
    average_class = np.argmax(average_prob)

    print(classes, class_labels[average_class], average_prob)
    return average_class 
开发者ID:HPI-DeepLearning,项目名称:crnn-lid,代码行数:19,代码来源:songs.py

示例3: main

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def main(_):
    config = get_configs()

    # Check if Uncertainty Quantification mode
    if config.UQ:
        assert (config.UQ_model_type in ['MVE', 'PIE'])
        # Check to see if we are in training or testing mode
        if config.train is True:
            train_model_uq(config)
        else:
            predict_uq(config)

    else:
        # Check to see if we are in training or testing mode
        if config.train is True:
            train_model(config)
        else:
            predict(config) 
开发者ID:euclidjda,项目名称:deep-quant,代码行数:20,代码来源:deep_quant.py

示例4: test

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def test(i, predict):
    model.eval()
    t = pre = groud = 0
    inf = open("data/dev_data.json", encoding="utf8")
    for line in inf:
        line = json.loads(line)
        text = line["text"]
        g_triples = set()
        for trip in line["spo_list"]:
            g_triples.add((trip["subject"], trip["predicate"], trip["object"]))

        p_triples = predict.predict(text)
        pre += len(p_triples)
        groud += len(g_triples)
        t += len(p_triples.intersection(g_triples))

    print(
        f"test epoch {i+1}/{args.epochs} precision: {t/(pre+0.001):.4f} recall: {t/groud:.4f} f1: {2*t/(pre+groud):.4f}")
    return 2*t/(pre+groud) 
开发者ID:ne7ermore,项目名称:torch-light,代码行数:21,代码来源:train.py

示例5: eval

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def eval(root_dir):
    languages = get_immediate_subdirectories(root_dir)

    # Count all files for each language
    for lang in languages:
        print(lang)
        files = list(recursive_glob(os.path.join(root_dir, lang), "*.mp3"))
        classes = []

        for file in files:
            print(file)
            average_class = predict(file)
            classes.append(average_class)

        y_true = np.full((len(classes)), LABELS[lang])

        print(lang)
        print(accuracy_score(y_true, classes))
        print(classification_report(y_true, classes)) 
开发者ID:HPI-DeepLearning,项目名称:crnn-lid,代码行数:21,代码来源:songs.py

示例6: main

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def main():
    global image
    cv2.namedWindow("Input")
    cv2.setMouseCallback("Input", click)
    output = np.ones((512, 512, 1))
    font = cv2.FONT_HERSHEY_SIMPLEX
    bottomLeftCornerOfText = (1, 511)
    fontScale = 23
    fontColor = (0, 0, 0)
    lineType = 2
    while True:
        cv2.imshow("Input", image)
        cv2.imshow("Output", output)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("f"):
            cv2.destroyAllWindows()
            break
        if key == ord("r"):
            image = np.ones((640, 640, 1))
        if key == ord("p"):
            clone = image.copy()
            clone = cv2.resize(clone, (32,32))
            final = np.zeros((32, 32, 1))
            for x in range(len(clone)):
                for y in range(len(clone[x])):
                    final[x][y][0] = clone[x][y]
            pred = p.predict(final)
            print("Predicted " , pred)
            output = np.ones((512, 512, 1))
            cv2.putText(output, pred, (10, 500), font, fontScale, fontColor, 10,  2) 
开发者ID:frereit,项目名称:TensorflowHandwritingRecognition,代码行数:32,代码来源:demo.py

示例7: get_tasks

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def get_tasks():
    #get url from form
    # url = request.form['url']
    url = request.files['url']

    #sends url for prediction
    sender = predict.predict(url)

    #get values from prediction
    rec = sender.predict_only()

    # #list of out values
    # outputlist=[rec]

    # #for multiple json apis
    # tasks = []

    # tasks1 = [
    #     {
    #         'value': outputlist[0],

    #     },

    # ]
    # tasks.append(tasks1)
    # return jsonify({'tasks': tasks})
    return jsonify({'cash': rec}) 
开发者ID:devSessions,项目名称:crvi,代码行数:29,代码来源:api.py

示例8: get_prediction

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def get_prediction(file_path):

    LABEL_MAP = {
        0 : "English",
        1 : "German",
        2 : "French",
        3 : "Spanish"
    }

    # TODO remove this for production
    # predictions = [[0.3, 0.7]]
    predictions = predict(file_path, app.config["PROTOTXT"], app.config["MODEL"], app.config["UPLOAD_FOLDER"])
    predictions = np.mean(predictions, axis=0).tolist()

    print predictions

    pred_with_label = {LABEL_MAP[index] : prob for index, prob in enumerate(predictions)}

    file_path = file_path + "?cachebuster=%s" % time.time()
    result = {
        "audio" : {
            "url" : "%s" % file_path,
        },
        "predictions" : pred_with_label
    }

    return result 
开发者ID:twerkmeister,项目名称:iLID,代码行数:29,代码来源:server.py

示例9: predict_emotion

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def predict_emotion(self, image):
        image.resize([NETWORK.input_size, NETWORK.input_size], refcheck=False)
        emotion, confidence = predict(image, self.model, self.shape_predictor)
        return emotion, confidence 
开发者ID:amineHorseman,项目名称:facial-expression-recognition-using-cnn,代码行数:6,代码来源:predict-from-video.py

示例10: main

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def main(opt):
    # load vocab
    word2idx, idx2word, vocab = load_vocab(opt)
    # load data
    # read tokenized text file and convert them to 2d list of words
    src_file = opt.src_file
    #trg_file = opt.trg_file
    #tokenized_train_pairs = read_src_and_trg_files(src_file, trg_file, is_train=False, remove_eos=opt.remove_title_eos)  # 2d list of word
    if opt.title_guided:
        tokenized_src, tokenized_title = read_tokenized_src_file(src_file, remove_eos=opt.remove_title_eos, title_guided=True)
    else:
        tokenized_src = read_tokenized_src_file(src_file, remove_eos=opt.remove_title_eos, title_guided=False)
        tokenized_title = None
    # convert the 2d list of words to a list of dictionary, with keys 'src', 'src_oov', 'trg', 'trg_copy', 'src_str', 'trg_str', 'oov_dict', 'oov_list'
    # since we don't need the targets during testing, 'trg' and 'trg_copy' are some dummy variables
    #test_one2many = build_dataset(tokenized_train_pairs, word2idx, idx2word, opt, mode="one2many", include_original=True)
    test_one2many = build_interactive_predict_dataset(tokenized_src, word2idx, idx2word, opt, tokenized_title)
    # build the data loader
    test_one2many_dataset = KeyphraseDataset(test_one2many, word2idx=word2idx, idx2word=idx2word,
                                             type='one2many', delimiter_type=opt.delimiter_type, load_train=False, remove_src_eos=opt.remove_src_eos, title_guided=opt.title_guided)
    test_loader = DataLoader(dataset=test_one2many_dataset,
                             collate_fn=test_one2many_dataset.collate_fn_one2many,
                             num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True,
                             shuffle=False)
    # init the pretrained model
    model = predict.init_pretrained_model(opt)

    # Print out predict path
    print("Prediction path: %s" % opt.pred_path)

    # predict the keyphrases of the src file and output it to opt.pred_path/predictions.txt
    predict.predict(test_loader, model, opt) 
开发者ID:kenchan0226,项目名称:keyphrase-generation-rl,代码行数:34,代码来源:interactive_predict.py

示例11: main

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def main():
    # Get Model:
    model_file = open('Data/Model/model.json', 'r')
    model = model_file.read()
    model_file.close()
    model = model_from_json(model)
    model.load_weights("Data/Model/weights.h5")

    print('AI start now!')

    while 1:
        # Get screenshot:
        screen = ImageGrab.grab()
        # Image to numpy array:
        screen = np.array(screen)
        # 4 channel(PNG) to 3 channel(JPG)
        Y = predict(model, screen)
        if Y == [0,0,0,0]:
            # Not action
            continue
        elif Y[0] == -1 and Y[1] == -1:
            # Only keyboard action.
            key = get_key(Y[3])
            if Y[2] == 1:
                # Press:
                press(key)
            else:
                # Release:
                release(key)
        elif Y[2] == 0 and Y[3] == 0:
            # Only mouse action.
            click(Y[0], Y[1])
        else:
            # Mouse and keyboard action.
            # Mouse:
            click(Y[0], Y[1])
            # Keyboard:
            key = get_key(Y[3])
            if Y[2] == 1:
                # Press:
                press(key)
            else:
                # Release:
                release(key) 
开发者ID:ardamavi,项目名称:Game-Bot,代码行数:46,代码来源:ai.py

示例12: process_opt

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def process_opt(opt):
    if opt.seed > 0:
        torch.manual_seed(opt.seed)

    if torch.cuda.is_available():
        if not opt.gpuid:
            opt.gpuid = 0
        opt.device = torch.device("cuda:%d" % opt.gpuid)
    else:
        opt.device = torch.device("cpu")
        opt.gpuid = -1
        print("CUDA is not available, fall back to CPU.")

    opt.exp = 'predict.' + opt.exp
    if opt.one2many:
        opt.exp += '.one2many'

    if opt.one2many_mode == 1:
        opt.exp += '.cat'

    if opt.copy_attention:
        opt.exp += '.copy'

    if opt.coverage_attn:
        opt.exp += '.coverage'

    if opt.review_attn:
        opt.exp += '.review'

    if opt.orthogonal_loss:
        opt.exp += '.orthogonal'

    if opt.use_target_encoder:
        opt.exp += '.target_encode'

    if hasattr(opt, 'bidirectional') and opt.bidirectional:
        opt.exp += '.bi-directional'
    else:
        opt.exp += '.uni-directional'

        # fill time into the name
    if opt.pred_path.find('%s') > 0:
        opt.pred_path = opt.pred_path % (opt.exp, opt.timemark)

    if not os.path.exists(opt.pred_path):
        os.makedirs(opt.pred_path)

    if not opt.one2many and opt.one2many_mode > 0:
        raise ValueError("You cannot choose one2many mode without the -one2many options.")

    if opt.one2many and opt.one2many_mode == 0:
        raise ValueError("If you choose one2many, you must specify the one2many mode.")

    #if opt.greedy and not opt.one2many:
    #    raise ValueError("Greedy sampling can only be used in one2many mode.")
    return opt 
开发者ID:kenchan0226,项目名称:keyphrase-generation-rl,代码行数:58,代码来源:interactive_predict.py

示例13: main

# 需要导入模块: import predict [as 别名]
# 或者: from predict import predict [as 别名]
def main():
    # Getting model:
    model_file = open('Data/Model/model.json', 'r')
    model = model_file.read()
    model_file.close()
    model = model_from_json(model)
    # Getting weights
    model.load_weights("Data/Model/weights.h5")
    
    print('Press "ESC" button for exit.')

    # Get image from camera, get predict and say it with another process, repeat.
    cap = cv2.VideoCapture(0)
    old_char = ''
    while 1:
        ret, img = cap.read()
        
        # Cropping image:
        img_height, img_width = img.shape[:2]
        side_width = int((img_width-img_height)/2)
        img = img[0:img_height, side_width:side_width+img_height]
        
        # Show window:
        cv2.imshow('VSL', cv2.flip(img,1)) # cv2.flip(img,1) : Flip(mirror effect) for easy handling.
        
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = imresize(img, (img_size, img_size, channel_size))
        img = 1-np.array(img).astype('float32')/255.
        img = img.reshape(1, img_size, img_size, channel_size)
        
        Y_string, Y_possibility = predict(model, img)
        
        if Y_possibility < 0.4: # For secondary vocalization
            old_char = ''
        
        if(platform.system() == 'Darwin') and old_char != Y_string and Y_possibility > 0.6:
            print(Y_string, Y_possibility)
            arg = 'say {0}'.format(Y_string)
            # Say predict with multiprocessing
            Process(target=os.system, args=(arg,)).start()
            old_char = Y_string
        if cv2.waitKey(200) == 27: # Decimal 27 = Esc
            break
    cap.release()
    cv2.destroyAllWindows() 
开发者ID:ardamavi,项目名称:Vocalize-Sign-Language,代码行数:47,代码来源:live.py


注:本文中的predict.predict方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。