当前位置: 首页>>代码示例>>Python>>正文


Python Helper.serialize方法代码示例

本文整理汇总了Python中Helper.serialize方法的典型用法代码示例。如果您正苦于以下问题:Python Helper.serialize方法的具体用法?Python Helper.serialize怎么用?Python Helper.serialize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Helper的用法示例。


在下文中一共展示了Helper.serialize方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_model

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import serialize [as 别名]
def get_model(filename=MLP_FILE):
    ''' Fetch MLP classifier object from file'''
    classifier = Helper.unserialize(filename)

    if(classifier == None):
        classifier = build_model('glcm', dataset_file='../Datasets/old_data.data', iters=2)
        Helper.serialize(filename, classifier)

    return classifier
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:11,代码来源:MLP.py

示例2: train

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import serialize [as 别名]
def train():
    '''
    Builds linear regression from wheat images using GLCM properties.

    Returns:
        linear regression model
    '''
    if(Helper.unserialize(LIN_REGRESSION_MODEL_NAME) == None):
        numberOfImages = 12;

    # TODO: AUTOMATICALLY GET NUMBER OF IMAGES
    # Get number of images. Remeber to divide by 2 as for every relevant image,
    # theres also the comparison image.
    # if ".DS_Store" in os.listdir("Wheat_ROIs"):
    #     numberOfImages = (len(os.listdir("Wheat_ROIs")) - 1)/2;
    # else:
    #     numberOfImages = len(os.listdir("Wheat_ROIs"))/2;

    featureList = np.zeros((numberOfImages, FEATURE_SIZE))

    # For each ROI image in folder
    for i in range(1, numberOfImages+1):
        # Load image
        filename = "../Wheat_Images/{:03d}.jpg".format(i);
        img = misc.imread(filename);
        img_gray = img_as_ubyte(rgb2gray(img));

        glcm = greycomatrix(img_gray, [5], [0], 256, symmetric=True, normed=True)
        dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0]
        correlation = greycoprops(glcm, 'correlation')[0, 0]
        homogeneity = greycoprops(glcm, 'homogeneity')[0, 0]
        energy = greycoprops(glcm, 'energy')[0, 0]
        feature = np.array([dissimilarity, correlation, homogeneity, energy])
        featureList[i-1] = feature
        #print("{} = {}A + {}B + {}C + {}D".format(filename, dissimilarity, correlation, homogeneity, energy))
        #print(feature)

    # Build regression model
    regression_model = linear_model.LinearRegression()
    regression_model.fit(featureList, COUNTS[:numberOfImages])
    Helper.serialize(LIN_REGRESSION_MODEL_NAME, regression_model)
    print("COEFF: {}\nINTERCEPT: {}".format(regression_model.coef_, regression_model.intercept_))
    print("SCORE: {}".format(regression_model.score(featureList, COUNTS[:numberOfImages])))
    return regression_model
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:46,代码来源:glcm.py

示例3: run

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import serialize [as 别名]
def run(featureRepresentation='image', glcm_distance=1, glcm_isMultidirectional=False):
    '''
    Apply a CNN on the grain_images dataset and print test accuracies.
    That is, train it on training data and test it on test data.
    '''
    train_data, train_targets, test_data, expected = Helper.extract_features_from_new_data(featureRepresentation, glcm_distance, glcm_isMultidirectional, train_size=0.5)
    Helper.serialize("../Datasets/grain_glcm_d1_a4_2_new.data", (train_data, train_targets, test_data, expected))

    # Build Classifier
    classifier = skflow.TensorFlowEstimator(model_fn=multilayer_conv_model, n_classes=2,
                                            steps=500, learning_rate=0.05, batch_size=128)
    classifier.fit(train_data, train_targets)

    # Assess
    predictions = classifier.predict(test_data)
    accuracy = metrics.accuracy_score(expected, predictions)
    confusion_matrix = metrics.confusion_matrix(expected, predictions)
    print("Confusion matrix:\n%s" % confusion_matrix)
    print('Accuracy: %f' % accuracy)
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:21,代码来源:CNN.py

示例4: main

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import serialize [as 别名]
def main(featureRepresentation='image'):
    # Load train data
    train_filenames = []
    for filename in os.listdir("../train/positive"):
        if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
    train_targets = [1]*(len(os.listdir("../train/positive"))-1)

    for filename in os.listdir("../train/negative"):
        if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
    train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)

    n_train_samples = len(train_filenames)
    if(featureRepresentation == 'glcm'):
        sample_size = 4
    else:
        sample_size = 20*20
    train_data = np.zeros((n_train_samples, sample_size))
    i = 0
    for filename in train_filenames:
        img = io.imread(filename)
        if(featureRepresentation == 'image'):
            train_data[i] = img.flatten()
        elif(featureRepresentation == 'pca'):
            train_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
        elif(featureRepresentation == 'glcm'):
            train_data[i] = get_textural_features(img)
        i = i + 1;

    # Apply pca to compute reduced representation in case needed
    #train_data_reduced = decomposition.PCA(n_components=8).fit_transform(train_data)


    # Load test data
    test_filenames = []
    expected = []
    for filename in os.listdir("test"):
        if(filename != ".DS_Store"):
            test_filenames.append("../test/" + filename)
            expected.append(int(filename.split('_')[1].split('.')[0]))

    n_test_samples = len(test_filenames)
    test_data = np.zeros((n_test_samples, sample_size))
    i = 0
    for filename in test_filenames:
        img = io.imread(filename)
        if(featureRepresentation == 'image'):
            test_data[i] = img.flatten()
        elif(featureRepresentation == 'pca'):
            test_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
        elif(featureRepresentation == 'glcm'):
            test_data[i] = get_textural_features(img)
        i = i + 1;

    # Apply pca to compute reduced representation in case needed
    #test_data_reduced = decomposition.PCA(n_components=8).fit_transform(test_data)



    # Create a classifier: a support vector classifier
    # param_grid = {'C': [1e0, 5e0, 1e1, 5e1, 1e2, 5e2, 1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.05, 0.01, 0.5, 0.1], 'kernel': ['rbf', 'poly'] }
    # clf = grid_search.GridSearchCV(svm.SVC(kernel='rbf', class_weight='balanced'), param_grid)
    # clf.fit(train_data, train_targets)
    # print(clf.best_estimator_)
    # classifier = clf.best_estimator_
    #classifier = svm.SVC()
    classifier = MLPClassifier()
    param_grid = {"algorithm":["l-bfgs", "sgd", "adam"], "activation":["logistic", "relu", "tanh"], "hidden_layer_sizes":[(5,2), (5), (100), (150), (200)] }
    clf = grid_search.GridSearchCV(MLPClassifier(), param_grid)
    clf.fit(train_data, train_targets)
    print(clf);
    classifier = clf

    # Get previous model and assess
    serialized_classifier = Helper.unserialize(MLP_FILE)
    predictions = serialized_classifier.predict(test_data)
    confusion_matrix = metrics.confusion_matrix(expected, predictions)
    print("Old Confusion matrix:\n%s" % confusion_matrix)
    serialized_n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
    predictions = classifier.predict(test_data)
    confusion_matrix = metrics.confusion_matrix(expected, predictions)
    print("New Confusion matrix:\n%s" % confusion_matrix)
    n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
    if(n_correct > serialized_n_correct):
        Helper.serialize(MLP_FILE, classifier)
        print("SAVED MODEL")
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:87,代码来源:build_classifier.py

示例5: generate_model

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import serialize [as 别名]
def generate_model(featureRepresentation='image', iters=10):
    # Load train data
    train_filenames = []
    for filename in os.listdir("../train/positive"):
        if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
    train_targets = [1]*(len(os.listdir("../train/positive"))-1)

    for filename in os.listdir("../train/negative"):
        if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
    train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)

    n_train_samples = len(train_filenames)
    if(featureRepresentation == 'glcm'):
        sample_size = 4
    else:
        sample_size = 20*20
    train_data = np.zeros((n_train_samples, sample_size))
    i = 0
    for filename in train_filenames:
        img = io.imread(filename)
        if(featureRepresentation == 'image'):
            train_data[i] = img.flatten()
        elif(featureRepresentation == 'pca'):
            train_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
        elif(featureRepresentation == 'glcm'):
            train_data[i] = get_textural_features(img)
        i = i + 1;


    # Load test data
    test_filenames = []
    expected = []
    for filename in os.listdir("test"):
        if(filename != ".DS_Store"):
            test_filenames.append("../test/" + filename)
            expected.append(int(filename.split('_')[1].split('.')[0]))

    n_test_samples = len(test_filenames)
    test_data = np.zeros((n_test_samples, sample_size))
    i = 0
    for filename in test_filenames:
        img = io.imread(filename)
        if(featureRepresentation == 'image'):
            test_data[i] = img.flatten()
        elif(featureRepresentation == 'pca'):
            test_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
        elif(featureRepresentation == 'glcm'):
            test_data[i] = get_textural_features(img)
        i = i + 1;



    # Perform build iterations
    for i in tqdm.tqdm(range(0, iters)):
        # Build Classifier
        param_grid = {"algorithm":["l-bfgs", "sgd", "adam"], "activation":["logistic", "relu", "tanh"], "hidden_layer_sizes":[(5,2), (5), (100), (150), (200)] }
        classifier = grid_search.GridSearchCV(MLPClassifier(), param_grid)
        classifier.fit(train_data, train_targets)

        # Get previous classifier and assess
        serialized_classifier = Helper.unserialize(MLP_FILE)
        if(serialized_classifier):
            predictions = serialized_classifier.predict(test_data)
            confusion_matrix = metrics.confusion_matrix(expected, predictions)
            serialized_n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
            predictions = classifier.predict(test_data)
            confusion_matrix = metrics.confusion_matrix(expected, predictions)
            n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
            if(n_correct > serialized_n_correct):
                Helper.serialize(MLP_FILE, classifier)
        else:
            Helper.serialize(MLP_FILE, classifier)

    # Display final model performance
    serialized_classifier = Helper.unserialize(MLP_FILE)
    predictions = serialized_classifier.predict(test_data)
    confusion_matrix = metrics.confusion_matrix(expected, predictions)
    print("Old Confusion matrix:\n%s" % confusion_matrix)
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:80,代码来源:build_classifier.py

示例6: main

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import serialize [as 别名]
def main():
    #dataset = extract_features_from_old_data(featureRepresentation='glcm', glcm_distance=1, glcm_isMultidirectional=True)
    #Helper.serialize("../Datasets/old_data.data", dataset)
    dataset = Helper.extract_features_from_new_data(featureRepresentation='glcm', glcm_distance=1, glcm_isMultidirectional=True, train_size=0.75)
    Helper.serialize("../Datasets/new_data_glcm_d1_a4_75_25.data", dataset)
    build_model('glcm', dataset_file="../Datasets/new_data_glcm_d1_a4_75_25.data", iters=4, glcm_isMultidirectional=True)
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:8,代码来源:MLP.py

示例7: build_model

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import serialize [as 别名]
def build_model(featureRepresentation='image', dataset_file=None, iters=10, glcm_distance=1, glcm_isMultidirectional=False):
    '''
    Creates, trains and serialises an MLP classifier.

    Args:
        featureRepresentation: Type of features to be used in classification.
            Can ake of one of the values 'image', 'pca' or 'glcm'.

        dataset_file: filename of serialized data set upon which to build the
            MLP. If none, default dataset is used.

        iters: Number of training iterations.

        glcm_distance: Distance between pixels for co-occurence. Only used if
            featureRepresentation=glcm.

        isMultidirectional: Controls whether co-occurence should be calculated
            in other directions (ie 45 degrees, 90 degrees and 135 degrees).
            Only used if featureRepresentation=glcm.
    '''

    if(dataset_file == None):
        # Load train data
        train_filenames = []
        for filename in os.listdir("../train/positive"):
            if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
        train_targets = [1]*(len(os.listdir("../train/positive"))-1)

        for filename in os.listdir("../train/negative"):
            if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
        train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)

        n_train_samples = len(train_filenames)
        if(featureRepresentation == 'glcm'):
            if(glcm_isMultidirectional):
                sample_size = 16
            else:
                sample_size = 4
        else:
            sample_size = 20*20
        train_data = np.zeros((n_train_samples, sample_size))
        i = 0
        for filename in train_filenames:
            img = io.imread(filename)
            if(featureRepresentation == 'image'):
                train_data[i] = img.flatten()
            elif(featureRepresentation == 'pca'):
                train_data[i] = decomposition.PCA(n_components=8).fit_transform(img.flatten())
            elif(featureRepresentation == 'glcm'):
                train_data[i] = Helper.get_textural_features(img, glcm_distance, glcm_isMultidirectional)
            i = i + 1;


        # Load test data
        test_filenames = []
        expected = []
        for filename in os.listdir("test"):
            if(filename != ".DS_Store"):
                test_filenames.append("../test/" + filename)
                expected.append(int(filename.split('_')[1].split('.')[0]))

        n_test_samples = len(test_filenames)
        test_data = np.zeros((n_test_samples, sample_size))
        i = 0
        for filename in test_filenames:
            img = io.imread(filename)
            if(featureRepresentation == 'image'):
                test_data[i] = img.flatten()
            elif(featureRepresentation == 'pca'):
                test_data[i] = decomposition.PCA(n_components=8).fit_transform(img.flatten())
            elif(featureRepresentation == 'glcm'):
                test_data[i] = Helper.get_textural_features(img, glcm_distance, glcm_isMultidirectional)
            i = i + 1;
    else:
        train_data, train_targets, test_data, expected = Helper.unserialize(dataset_file)

    # Perform build iterations
    for i in tqdm.tqdm(range(0, iters)):
        # Build Classifier
        param_grid = {"algorithm":["l-bfgs", "sgd", "adam"], "activation":["logistic", "relu", "tanh"], "hidden_layer_sizes":[(5,2), (5), (100), (150), (200)] }
        classifier = grid_search.GridSearchCV(MLPClassifier(), param_grid)
        classifier.fit(train_data, train_targets)

        # Get previous classifier and assess
        serialized_classifier = Helper.unserialize(MLP_FILE)
        if(serialized_classifier):
            predictions = serialized_classifier.predict(test_data)
            confusion_matrix = metrics.confusion_matrix(expected, predictions)
            serialized_n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
            predictions = classifier.predict(test_data)
            confusion_matrix = metrics.confusion_matrix(expected, predictions)
            n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
            if(n_correct > serialized_n_correct):
                Helper.serialize(MLP_FILE, classifier)
        else:
            Helper.serialize(MLP_FILE, classifier)

    # Display final model performance
    serialized_classifier = Helper.unserialize(MLP_FILE)
    predictions = serialized_classifier.predict(test_data)
#.........这里部分代码省略.........
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:103,代码来源:MLP.py

示例8: main

# 需要导入模块: import Helper [as 别名]
# 或者: from Helper import serialize [as 别名]
def main():
    dataset = Helper.extract_features_from_old_data(featureRepresentation='glcm', glcm_distance=1, glcm_isMultidirectional=True)
    Helper.serialize("../Datasets/old_data.data", dataset)
开发者ID:oduwa,项目名称:Wheat-Count,代码行数:5,代码来源:SVM.py


注:本文中的Helper.serialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。