当前位置: 首页>>代码示例>>Python>>正文


Python Helper.unserialize方法代码示例

本文整理汇总了Python中Helper.unserialize方法的典型用法代码示例。如果您正苦于以下问题:Python Helper.unserialize方法的具体用法?Python Helper.unserialize怎么用?Python Helper.unserialize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Helper的用法示例。


在下文中一共展示了Helper.unserialize方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: roc

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def roc():
    '''
    Plots an ROC curve illustrating the performance of the classifier used in
    grain detection.
    '''
    n_classes = 2
    clf = get_model()
    (train_data, train_targets, test_data, test_targets) = Helper.unserialize("../Datasets/new_data_glcm_d1_a4_75_25.data")
    y_score = clf.decision_function(test_data)

    fpr, tpr, thresholds = metrics.roc_curve(test_targets, y_score)

    xnew = np.linspace(fpr.min(),fpr.max(),300)
    spl = UnivariateSpline(fpr,tpr)

    plt.figure()
    plt.plot(fpr, tpr, label='Exact ROC curve')
    plt.plot(xnew, spl(xnew), label='Smoothed ROC curve', color='red', linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('1 - Specificity (False Positive Rate)')
    plt.ylabel('Sensitivity (True Positive Rate)')
    plt.title('Receiver Operating Characteristic curve')
    plt.legend(loc="lower right")
    plt.show()
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:27,代码来源:MLP.py

示例2: experiment_with_parameters

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def experiment_with_parameters(ser_filename,
                               batch_sizes=[64],
                               learning_rates=[0.05],
                               optimizers=['Ftrl', 'RMSProp', 'Adam', 'Adagrad', 'SGD'],
                               class_weights=[[0.4,0.6], [0.6,0.4]]):
    '''
    Calculate and print accuracies for different combinations of hyper-paramters.
    '''
    # Load dataset
    train_data, train_targets, test_data, expected = Helper.unserialize(ser_filename)

    # Build Classifier
    for b_size in batch_sizes:
        for l_rate in learning_rates:
            for optimizer in optimizers:
                for class_weight in class_weights:
                    classifier = skflow.TensorFlowEstimator(model_fn=multilayer_conv_model, n_classes=2,
                                                            steps=500, learning_rate=l_rate, batch_size=b_size,
                                                            optimizer=optimizer, class_weight=class_weight)
                    classifier.fit(train_data, train_targets)

                    # Assess
                    predictions = classifier.predict(test_data)
                    accuracy = metrics.accuracy_score(expected, predictions)
                    confusion_matrix = metrics.confusion_matrix(expected, predictions)
                    print('Accuracy for batch_size %.2d learn_rate %.3f Cost Function %s: %f' % (b_size, l_rate, optimizer, accuracy))
                    print("Confusion matrix:\n%s" % confusion_matrix)
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:29,代码来源:CNN.py

示例3: get_model

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def get_model(filename=CLASSIFIER_FILE):
    ''' Get CNN classifier object from file or create one if none exists on file.'''
    if(filename == None):
        # Load dataset
        train_data, train_targets, test_data, expected = Helper.unserialize("../Datasets/raw_new_80.data")
        train_data2, train_targets2, test_data2, expected2 = Helper.unserialize("../Datasets/raw.data")

        train_data = np.concatenate((train_data, train_data2), axis=0)
        train_targets = np.concatenate((train_targets, train_targets2), axis=0)
        test_data = np.concatenate((test_data, test_data2), axis=0)
        expected = np.concatenate((expected, expected2), axis=0)
        print(train_data.shape)

        raw_train_data = np.zeros((train_data.shape[0], 20, 20))
        i = 0
        for item in train_data:
            raw_train_data[i] = item.reshape((20,20))
            #Display.show_image(raw_train_data[i])
            i = i+1

        raw_test_data = np.zeros((test_data.shape[0], 20, 20))
        i = 0
        for item in test_data:
            raw_test_data[i] = item.reshape((20,20))
            #Display.show_image(raw_test_data[i])
            i = i+1


        # Build Classifier
        # classifier = skflow.TensorFlowEstimator(model_fn=multilayer_conv_model, n_classes=2,
        #                                         steps=500, learning_rate=0.05, batch_size=128)
        classifier = skflow.TensorFlowEstimator(model_fn=conv_model, n_classes=2,
                                                steps=500, learning_rate=0.05, batch_size=128,
                                                optimizer='Ftrl')
        classifier.fit(raw_train_data, train_targets)

        # Assess built classifier
        predictions = classifier.predict(raw_test_data)
        accuracy = metrics.accuracy_score(expected, predictions)
        confusion_matrix = metrics.confusion_matrix(expected, predictions)
        print("Confusion matrix:\n%s" % confusion_matrix)
        print('Accuracy: %f' % accuracy)

        return classifier
    else:
        serialized_classifier = Helper.unserialize(filename)
        return serialized_classifier
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:49,代码来源:CNN.py

示例4: get_model

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def get_model(filename=MLP_FILE):
    ''' Fetch MLP classifier object from file'''
    classifier = Helper.unserialize(filename)

    if(classifier == None):
        classifier = build_model('glcm', dataset_file='../Datasets/old_data.data', iters=2)
        Helper.serialize(filename, classifier)

    return classifier
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:11,代码来源:MLP.py

示例5: run_with_mlp

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def run_with_mlp(image_filename="../Wheat_Images/004.jpg", ser_filename=None):
    '''
	Estimates the number of grains in a given image using a
	Multilayer Perceptron neural network.

	Args:
		image_filename: The path to the image from which a grain count
			is to be obtained.

		ser_filename: path to serialized list of isub-images already extracted
		from the image from which a grain count is to be obtained.

	Returns:
		count: An estimate of the number of grains in the provided image.
    '''
    global img_data

    # Chop image up into sub-images and serilaise or just load serialised data if
    # it already exists.
    if(ser_filename == None and image_filename == "../Wheat_Images/004.jpg"):
		ser_filename = "../Wheat_Images/xxx_004.data"
    if(Helper.unserialize(ser_filename) == None):
        img = img_as_ubyte(io.imread(image_filename))
        roi_img = spectral_roi.extract_roi(img, [1])
        Helper.block_proc(roi_img, (20,20), blockfunc)
        #Helper.serialize(ser_filename, img_data)
    else:
        img_data = Helper.unserialize(ser_filename)

    # classify
    #MLP.build_model('glcm', iters=30, glcm_isMultidirectional=True)
    r = MLP.classify(img_data, featureRepresentation='glcm', shouldSaveResult=True)

    # Count number of '1s' in the result and return
    count = r.tolist().count(1)
    print("COUNT: {}".format(count))
    return count
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:39,代码来源:PicNumero.py

示例6: train

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def train():
    '''
    Builds linear regression from wheat images using GLCM properties.

    Returns:
        linear regression model
    '''
    if(Helper.unserialize(LIN_REGRESSION_MODEL_NAME) == None):
        numberOfImages = 12;

    # TODO: AUTOMATICALLY GET NUMBER OF IMAGES
    # Get number of images. Remeber to divide by 2 as for every relevant image,
    # theres also the comparison image.
    # if ".DS_Store" in os.listdir("Wheat_ROIs"):
    #     numberOfImages = (len(os.listdir("Wheat_ROIs")) - 1)/2;
    # else:
    #     numberOfImages = len(os.listdir("Wheat_ROIs"))/2;

    featureList = np.zeros((numberOfImages, FEATURE_SIZE))

    # For each ROI image in folder
    for i in range(1, numberOfImages+1):
        # Load image
        filename = "../Wheat_Images/{:03d}.jpg".format(i);
        img = misc.imread(filename);
        img_gray = img_as_ubyte(rgb2gray(img));

        glcm = greycomatrix(img_gray, [5], [0], 256, symmetric=True, normed=True)
        dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0]
        correlation = greycoprops(glcm, 'correlation')[0, 0]
        homogeneity = greycoprops(glcm, 'homogeneity')[0, 0]
        energy = greycoprops(glcm, 'energy')[0, 0]
        feature = np.array([dissimilarity, correlation, homogeneity, energy])
        featureList[i-1] = feature
        #print("{} = {}A + {}B + {}C + {}D".format(filename, dissimilarity, correlation, homogeneity, energy))
        #print(feature)

    # Build regression model
    regression_model = linear_model.LinearRegression()
    regression_model.fit(featureList, COUNTS[:numberOfImages])
    Helper.serialize(LIN_REGRESSION_MODEL_NAME, regression_model)
    print("COEFF: {}\nINTERCEPT: {}".format(regression_model.coef_, regression_model.intercept_))
    print("SCORE: {}".format(regression_model.score(featureList, COUNTS[:numberOfImages])))
    return regression_model
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:46,代码来源:glcm.py

示例7: run_with_dataset

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def run_with_dataset(ser_filename):
    '''
    Apply a CNN on a dataset and print test accuracies.
    That is, train it on training data and test it on test data.
    '''
    # Load dataset
    train_data, train_targets, test_data, expected = Helper.unserialize(ser_filename)

    # Build Classifier
    classifier = skflow.TensorFlowEstimator(model_fn=multilayer_conv_model, n_classes=2,
                                            steps=500, learning_rate=0.05, batch_size=128)
    classifier.fit(train_data, train_targets)

    # Assess
    predictions = classifier.predict(test_data)
    accuracy = metrics.accuracy_score(expected, predictions)
    confusion_matrix = metrics.confusion_matrix(expected, predictions)
    print("Confusion matrix:\n%s" % confusion_matrix)
    print('Accuracy: %f' % (accuracy))
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:21,代码来源:CNN.py

示例8: main

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def main(featureRepresentation='image'):
    # Load train data
    train_filenames = []
    for filename in os.listdir("../train/positive"):
        if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
    train_targets = [1]*(len(os.listdir("../train/positive"))-1)

    for filename in os.listdir("../train/negative"):
        if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
    train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)

    n_train_samples = len(train_filenames)
    if(featureRepresentation == 'glcm'):
        sample_size = 4
    else:
        sample_size = 20*20
    train_data = np.zeros((n_train_samples, sample_size))
    i = 0
    for filename in train_filenames:
        img = io.imread(filename)
        if(featureRepresentation == 'image'):
            train_data[i] = img.flatten()
        elif(featureRepresentation == 'pca'):
            train_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
        elif(featureRepresentation == 'glcm'):
            train_data[i] = get_textural_features(img)
        i = i + 1;

    # Apply pca to compute reduced representation in case needed
    #train_data_reduced = decomposition.PCA(n_components=8).fit_transform(train_data)


    # Load test data
    test_filenames = []
    expected = []
    for filename in os.listdir("test"):
        if(filename != ".DS_Store"):
            test_filenames.append("../test/" + filename)
            expected.append(int(filename.split('_')[1].split('.')[0]))

    n_test_samples = len(test_filenames)
    test_data = np.zeros((n_test_samples, sample_size))
    i = 0
    for filename in test_filenames:
        img = io.imread(filename)
        if(featureRepresentation == 'image'):
            test_data[i] = img.flatten()
        elif(featureRepresentation == 'pca'):
            test_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
        elif(featureRepresentation == 'glcm'):
            test_data[i] = get_textural_features(img)
        i = i + 1;

    # Apply pca to compute reduced representation in case needed
    #test_data_reduced = decomposition.PCA(n_components=8).fit_transform(test_data)



    # Create a classifier: a support vector classifier
    # param_grid = {'C': [1e0, 5e0, 1e1, 5e1, 1e2, 5e2, 1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.05, 0.01, 0.5, 0.1], 'kernel': ['rbf', 'poly'] }
    # clf = grid_search.GridSearchCV(svm.SVC(kernel='rbf', class_weight='balanced'), param_grid)
    # clf.fit(train_data, train_targets)
    # print(clf.best_estimator_)
    # classifier = clf.best_estimator_
    #classifier = svm.SVC()
    classifier = MLPClassifier()
    param_grid = {"algorithm":["l-bfgs", "sgd", "adam"], "activation":["logistic", "relu", "tanh"], "hidden_layer_sizes":[(5,2), (5), (100), (150), (200)] }
    clf = grid_search.GridSearchCV(MLPClassifier(), param_grid)
    clf.fit(train_data, train_targets)
    print(clf);
    classifier = clf

    # Get previous model and assess
    serialized_classifier = Helper.unserialize(MLP_FILE)
    predictions = serialized_classifier.predict(test_data)
    confusion_matrix = metrics.confusion_matrix(expected, predictions)
    print("Old Confusion matrix:\n%s" % confusion_matrix)
    serialized_n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
    predictions = classifier.predict(test_data)
    confusion_matrix = metrics.confusion_matrix(expected, predictions)
    print("New Confusion matrix:\n%s" % confusion_matrix)
    n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
    if(n_correct > serialized_n_correct):
        Helper.serialize(MLP_FILE, classifier)
        print("SAVED MODEL")
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:87,代码来源:build_classifier.py

示例9: generate_model

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def generate_model(featureRepresentation='image', iters=10):
    # Load train data
    train_filenames = []
    for filename in os.listdir("../train/positive"):
        if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
    train_targets = [1]*(len(os.listdir("../train/positive"))-1)

    for filename in os.listdir("../train/negative"):
        if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
    train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)

    n_train_samples = len(train_filenames)
    if(featureRepresentation == 'glcm'):
        sample_size = 4
    else:
        sample_size = 20*20
    train_data = np.zeros((n_train_samples, sample_size))
    i = 0
    for filename in train_filenames:
        img = io.imread(filename)
        if(featureRepresentation == 'image'):
            train_data[i] = img.flatten()
        elif(featureRepresentation == 'pca'):
            train_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
        elif(featureRepresentation == 'glcm'):
            train_data[i] = get_textural_features(img)
        i = i + 1;


    # Load test data
    test_filenames = []
    expected = []
    for filename in os.listdir("test"):
        if(filename != ".DS_Store"):
            test_filenames.append("../test/" + filename)
            expected.append(int(filename.split('_')[1].split('.')[0]))

    n_test_samples = len(test_filenames)
    test_data = np.zeros((n_test_samples, sample_size))
    i = 0
    for filename in test_filenames:
        img = io.imread(filename)
        if(featureRepresentation == 'image'):
            test_data[i] = img.flatten()
        elif(featureRepresentation == 'pca'):
            test_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
        elif(featureRepresentation == 'glcm'):
            test_data[i] = get_textural_features(img)
        i = i + 1;



    # Perform build iterations
    for i in tqdm.tqdm(range(0, iters)):
        # Build Classifier
        param_grid = {"algorithm":["l-bfgs", "sgd", "adam"], "activation":["logistic", "relu", "tanh"], "hidden_layer_sizes":[(5,2), (5), (100), (150), (200)] }
        classifier = grid_search.GridSearchCV(MLPClassifier(), param_grid)
        classifier.fit(train_data, train_targets)

        # Get previous classifier and assess
        serialized_classifier = Helper.unserialize(MLP_FILE)
        if(serialized_classifier):
            predictions = serialized_classifier.predict(test_data)
            confusion_matrix = metrics.confusion_matrix(expected, predictions)
            serialized_n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
            predictions = classifier.predict(test_data)
            confusion_matrix = metrics.confusion_matrix(expected, predictions)
            n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
            if(n_correct > serialized_n_correct):
                Helper.serialize(MLP_FILE, classifier)
        else:
            Helper.serialize(MLP_FILE, classifier)

    # Display final model performance
    serialized_classifier = Helper.unserialize(MLP_FILE)
    predictions = serialized_classifier.predict(test_data)
    confusion_matrix = metrics.confusion_matrix(expected, predictions)
    print("Old Confusion matrix:\n%s" % confusion_matrix)
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:80,代码来源:build_classifier.py

示例10: build_model

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import unserialize [as 别名]
def build_model(featureRepresentation='image', dataset_file=None, iters=10, glcm_distance=1, glcm_isMultidirectional=False):
    '''
    Creates, trains and serialises an MLP classifier.

    Args:
        featureRepresentation: Type of features to be used in classification.
            Can ake of one of the values 'image', 'pca' or 'glcm'.

        dataset_file: filename of serialized data set upon which to build the
            MLP. If none, default dataset is used.

        iters: Number of training iterations.

        glcm_distance: Distance between pixels for co-occurence. Only used if
            featureRepresentation=glcm.

        isMultidirectional: Controls whether co-occurence should be calculated
            in other directions (ie 45 degrees, 90 degrees and 135 degrees).
            Only used if featureRepresentation=glcm.
    '''

    if(dataset_file == None):
        # Load train data
        train_filenames = []
        for filename in os.listdir("../train/positive"):
            if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
        train_targets = [1]*(len(os.listdir("../train/positive"))-1)

        for filename in os.listdir("../train/negative"):
            if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
        train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)

        n_train_samples = len(train_filenames)
        if(featureRepresentation == 'glcm'):
            if(glcm_isMultidirectional):
                sample_size = 16
            else:
                sample_size = 4
        else:
            sample_size = 20*20
        train_data = np.zeros((n_train_samples, sample_size))
        i = 0
        for filename in train_filenames:
            img = io.imread(filename)
            if(featureRepresentation == 'image'):
                train_data[i] = img.flatten()
            elif(featureRepresentation == 'pca'):
                train_data[i] = decomposition.PCA(n_components=8).fit_transform(img.flatten())
            elif(featureRepresentation == 'glcm'):
                train_data[i] = Helper.get_textural_features(img, glcm_distance, glcm_isMultidirectional)
            i = i + 1;


        # Load test data
        test_filenames = []
        expected = []
        for filename in os.listdir("test"):
            if(filename != ".DS_Store"):
                test_filenames.append("../test/" + filename)
                expected.append(int(filename.split('_')[1].split('.')[0]))

        n_test_samples = len(test_filenames)
        test_data = np.zeros((n_test_samples, sample_size))
        i = 0
        for filename in test_filenames:
            img = io.imread(filename)
            if(featureRepresentation == 'image'):
                test_data[i] = img.flatten()
            elif(featureRepresentation == 'pca'):
                test_data[i] = decomposition.PCA(n_components=8).fit_transform(img.flatten())
            elif(featureRepresentation == 'glcm'):
                test_data[i] = Helper.get_textural_features(img, glcm_distance, glcm_isMultidirectional)
            i = i + 1;
    else:
        train_data, train_targets, test_data, expected = Helper.unserialize(dataset_file)

    # Perform build iterations
    for i in tqdm.tqdm(range(0, iters)):
        # Build Classifier
        param_grid = {"algorithm":["l-bfgs", "sgd", "adam"], "activation":["logistic", "relu", "tanh"], "hidden_layer_sizes":[(5,2), (5), (100), (150), (200)] }
        classifier = grid_search.GridSearchCV(MLPClassifier(), param_grid)
        classifier.fit(train_data, train_targets)

        # Get previous classifier and assess
        serialized_classifier = Helper.unserialize(MLP_FILE)
        if(serialized_classifier):
            predictions = serialized_classifier.predict(test_data)
            confusion_matrix = metrics.confusion_matrix(expected, predictions)
            serialized_n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
            predictions = classifier.predict(test_data)
            confusion_matrix = metrics.confusion_matrix(expected, predictions)
            n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
            if(n_correct > serialized_n_correct):
                Helper.serialize(MLP_FILE, classifier)
        else:
            Helper.serialize(MLP_FILE, classifier)

    # Display final model performance
    serialized_classifier = Helper.unserialize(MLP_FILE)
    predictions = serialized_classifier.predict(test_data)
#.........这里部分代码省略.........
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:103,代码来源:MLP.py


注:本文中的Helper.unserialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。