本文整理汇总了Python中pyspark.mllib.tree.DecisionTreeModel.load方法的典型用法代码示例。如果您正苦于以下问题:Python DecisionTreeModel.load方法的具体用法?Python DecisionTreeModel.load怎么用?Python DecisionTreeModel.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyspark.mllib.tree.DecisionTreeModel
的用法示例。
在下文中一共展示了DecisionTreeModel.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate_model
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
def evaluate_model(type):
if type == 'logistic':
model = LogisticRegressionModel.load(sc, "logit_model.model")
elif type == 'tree':
model = DecisionTreeModel.load(sc, "dt_model.model")
elif type == 'rf':
model = RandomForestModel.load(sc, "rf_model.model")
示例2: loadModel
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
def loadModel():
clusterModel = KMeansModel.load(sc, pv.clusterModelPath)
classificationModel = DecisionTreeModel.load(sc, pv.classificationModelPath)
if pv.outputDebugMsg:
Utils.logMessage("\nLoad cluster & classification model finished")
return clusterModel, classificationModel
示例3: saveModel
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
def saveModel(self):
# save the model to the given path
self.tree_model.save(self.sc, "trained")
# re-load the saved model
self.tree_model = DecisionTreeModel.load(self.sc, "trained")
# re-evaluate
self.evaluate()
示例4: main
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
def main(sc, filename):
'''
The driver for the spark scoring application, it generates predictions for
a given file of features and target variables
'''
rawDataRdd = sc.textFile(filename)
print "Data Size: {}".format(rawDataRdd.count())
labeledPointsRdd = rawDataRdd.map(parse_lines)
#load models
logit_model = LogisticRegressionModel.load(sc, "logit_model.model")
dt_model = DecisionTreeModel.load(sc, "dt_model.model")
rf_model = RandomForestModel.load(sc, "rf_model.model")
#logistic predictions
labels_and_preds = labeledPointsRdd.map(lambda p: (float(logit_model.predict(p.features)), p.label ))
labels_and_preds_collected = labels_and_preds.collect()
print "\n"
print "Predictions: Logistic Regression"
y_true = []
y_pred = []
for row in labels_and_preds_collected:
y_true.append(row[1])
y_pred.append(row[0])
# print "predicted: {0} - actual: {1}\n".format(row[0], row[1])
accuracy = labels_and_preds.filter(lambda (v,p): v == p).count() / float(labeledPointsRdd.count())
print_box()
print "Prediction Accuracy (Logistic): {}".format(round(accuracy, 4))
print_box()
print "\n"
#decision tree predictions
predictions = dt_model.predict(labeledPointsRdd.map(lambda p: p.features))
labels_and_preds_dt = labeledPointsRdd.map(lambda p: p.label).zip(predictions)
labels_and_preds_dt_collected = labels_and_preds.collect()
accuracy_dt = labels_and_preds_dt.filter(lambda (v, p): v == p).count() / float(labeledPointsRdd.count())
print_box()
print "Prediction Accuracy (Decision Tree): {}".format(round(accuracy_dt, 4))
print_box()
print "\n"
#random forest predictions
predictions_rf = rf_model.predict(labeledPointsRdd.map(lambda p: p.features))
labels_and_preds_rf = labeledPointsRdd.map(lambda p: p.label).zip(predictions_rf)
accuracy_rf = labels_and_preds_rf.filter(lambda (v, p): v == p).count() / float(labeledPointsRdd.count())
print_box()
print "Prediction Accuracy (Random Forest): {}".format(round(accuracy_rf, 4))
print_box()
示例5: test
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
def test(sc):
files = ["sounds/flushing/20150227_193109-flushing-04.wav",
"sounds/bike/20150227_193806-bici-14.wav",
"sounds/blender/20150227_193606-licuadora-14.wav"
]
rfmodel = RandomForestModel.load(sc, RF_PATH)
dtmodel = DecisionTreeModel.load(sc, DT_PATH)
print dtmodel.toDebugString()
for f in files:
vec = audio.showFeatures(f)
testfeatures = Vectors.dense([float(x) for x in vec.split(' ')])
print(vec)
pred = dtmodel.predict(testfeatures)
print("DT Prediction is " + str(pred), classes[int(pred)])
pred = rfmodel.predict(testfeatures)
print("RF Prediction is " + str(pred), classes[int(pred)])
示例6: init_spark_context
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
def init_spark_context():
global predictionModel
# load spark context
conf = SparkConf().setAppName("movie_recommendation-server")
# IMPORTANT: pass aditional Python modules to each worker
sc = SparkContext(conf=conf, pyFiles=['webapp.py', 'service_func.py'])
# absolute path in hdfs
# to run locally, remove first slash '/' i.e my_model1, not /my_model1
predictionModel = DecisionTreeModel.load(sc, '/my_model1')
sc.addFile( 'conv/6.p')
sc.addFile( 'conv/7.p')
sc.addFile( 'conv/8.p')
sc.addFile('conv/10.p')
sc.addFile('conv/12.p')
sc.addFile( 'conv/36.p')
return sc
示例7: LabeledPoint
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
nonLable = clean_line_split[1:]
return LabeledPoint (label, nonLable)
parsedData = raw_data.map (parsePoint)
#divide training and test data by 70-30 rule
(training, test) = parsedData.randomSplit([0.7, 0.3])
#start timer at this point
startTime = datetime.now()
#build the model
#empty categoricalFeaturesInfo indicates all features are continuous.
model = DecisionTree.trainRegressor (training, categoricalFeaturesInfo={},
impurity='variance', maxDepth=5, maxBins=32)
#evaluate model on test instances and compute test error
predictions = model.predict (test.map (lambda x: x.features))
labelsAndPredictions = test.map (lambda lp: lp.label).zip (predictions)
testMSE = labelsAndPredictions.map (lambda (v, p): (v - p) * (v - p)).sum() /\
float(testData.count())
print ('Time consumed = '), (datetime.now() - startTime)
print ('Test Mean Squared Error = ' + str (testMSE))
print ('Learned regression tree model:')
print (model.toDebugString())
#save and load model
model.save (sc, "DTR-Wide-2008")
sameModel = DecisionTreeModel.load (sc, "DTR-Wide-2008")
sc.stop ()
示例8: LabeledPoint
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
#Cancelled becomes the 6th column now, and total columns in the data = 6
label = clean_line_split[5]
nonLable = clean_line_split[0:5]
return LabeledPoint (label, nonLable)
parsedData = raw_data.map (parsePoint)
#divide training and test data by 70-30 rule
(training, test) = parsedData.randomSplit([0.7, 0.3])
training.cache ()
#start timer at this point
startTime = datetime.now()
#build the model
model = DecisionTree.trainClassifier(training, numClasses=2, categoricalFeaturesInfo={},
impurity='gini', maxDepth=5, maxBins=32)
#evaluate model on test instances and compute test error
predictions = model.predict (test.map (lambda x: x.features))
labelsAndPredictions = test.map (lambda lp: lp.label).zip (predictions)
testErr = labelsAndPredictions.filter (lambda (v, p): v != p).count() / float(test.count())
print ('Time consumed = '), (datetime.now() - startTime)
print ('Test Error = ' + str (testErr))
print ('Learned classification tree model:')
print (model.toDebugString())
#save and load model
model.save(sc, "DT-Class-N-95-08")
sameModel = DecisionTreeModel.load(sc, "DT-Class-N-95-08")
sc.stop ()
示例9: SparkContext
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonDecisionTreeRegressionExample")
# $example on$
# Load and parse the data file into an RDD of LabeledPoint.
data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
# Empty categoricalFeaturesInfo indicates all features are continuous.
model = DecisionTree.trainRegressor(trainingData, categoricalFeaturesInfo={},
impurity='variance', maxDepth=5, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda (v, p): (v - p) * (v - p)).sum() /\
float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))
print('Learned regression tree model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myDecisionTreeRegressionModel")
sameModel = DecisionTreeModel.load(sc, "target/tmp/myDecisionTreeRegressionModel")
# $example off$
示例10: sets
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree, DecisionTreeModel
from pyspark.mllib.util import MLUtils
# Load and parse the data file into an RDD of LabeledPoint.
data = MLUtils.loadLibSVMFile(sc, 'file')
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
# Empty categoricalFeaturesInfo indicates all features are continuous.
model = DecisionTree.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},
impurity='entropy', maxDepth=5, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(lambda (v, p): v != p).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification tree model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "myModelPath")
sameModel = DecisionTreeModel.load(sc, "myModelPath")
示例11: SparkContext
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
.setMaster(master)
.setAppName(app_name))
sc = SparkContext(conf=conf)
lines = sc.textFile(input)
parsedData = lines.map(parseLine)
(trainingData, testData) = parsedData.randomSplit([0.5, 0.5])
# Train a DecisionTree model.
# Empty categoricalFeaturesInfo indicates all features are continuous.
model = DecisionTree.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},
impurity='gini', maxDepth=5, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
predictions.foreach(my_print)
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
labelsAndPredictions.foreach(my_print)
testErr = labelsAndPredictions.filter(lambda (v, p): v != p).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification tree model:')
print(model.toDebugString())
# Save and load model
model.save(sc, output)
sameModel = DecisionTreeModel.load(sc, output)
sc.stop()
示例12: float
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
print "######################################################\n"
print "######################################################\n"
print "######### Start!!! #######\n"
print "######################################################\n"
print "######################################################\n"
print "\n\n\n"
#stop_rdd = rdd_tweets.coalesce(1)
#stop_rdd.saveAsTextFile(output_path)
print "****************************************************\n"
print "Here is the last step\n"
print "****************************************************\n"
#Here is the trainning steps.
binladen_model = DecisionTreeModel.load(sc, binladen_model_path)
#
#training_data = MLUtils.loadLibSVMFile(sc, training_path)
test_data = rdd_labelFeatures
# Evaluate model on test instances and compute test error
predictions = binladen_model.predict(test_data.map(lambda x: x.features))
# test the error value
labelsAndPredictions = test_data.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(lambda (v, p): v!=p).count() / float(test_data.count())
tmp_str = 'Test Error = ' + str(testErr)
print(tmp_str)
log_write(tmp_str)
print "\n\n"
#featuresAndPredictions = test_data.flatMap(lambda words: resplit_only_feature(words))\
# .zip(predictions)
示例13: LabeledPoint
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
#Cancelled becomes the 9th column now, and total columns in the data = 9
label = clean_line_split[8]
nonLable = clean_line_split[0:8]
return LabeledPoint (label, nonLable)
parsedData = raw_data.map (parsePoint)
#divide training and test data by 70-30 rule
(training, test) = parsedData.randomSplit([0.7, 0.3])
training.cache ()
#start timer at this point
startTime = datetime.now()
#build the model
model = DecisionTree.trainClassifier(training, numClasses=2, categoricalFeaturesInfo={},
impurity='gini', maxDepth=5, maxBins=32)
#evaluate model on test instances and compute test error
predictions = model.predict (test.map (lambda x: x.features))
labelsAndPredictions = test.map (lambda lp: lp.label).zip (predictions)
testErr = labelsAndPredictions.filter (lambda (v, p): v != p).count() / float(test.count())
print ('Time consumed = '), (datetime.now() - startTime)
print ('Test Error = ' + str (testErr))
print ('Learned classification tree model:')
print (model.toDebugString())
#save and load model
model.save(sc, "DT-Class-W-00-08")
sameModel = DecisionTreeModel.load(sc, "DT-Class-W-00-08")
sc.stop ()
示例14: getModel
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
def getModel(self, path):
if self.type == 'NaiveBayes':
return NaiveBayesModel.load(self.sc, path)
elif self.type == 'DecisionTree':
return DecisionTreeModel.load(self.sc, path)
示例15: SparkContext
# 需要导入模块: from pyspark.mllib.tree import DecisionTreeModel [as 别名]
# 或者: from pyspark.mllib.tree.DecisionTreeModel import load [as 别名]
.setAppName("Mlib")
.set("spark.executor.memory", "1g"))
sc = SparkContext(conf = conf)
dv1 =np.array([1.0,0.0,3.0])
dv2= [1.0,0.0,3.0]
sv1 = Vectors.sparse(3,[0,2],[1.0,3.0])
sv2 = sps.csc_matrix((np.array([1.0,3.0]),np.array([0,2]),np.array([0,2])),shape=(3,1))
print sv2
data = MLUtils.loadLibSVMFile(sc, 'sample_libsvm_data.txt')
(trainingData, testData) = data.randomSplit([0.7, 0.3])
model = DecisionTree.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},
impurity='gini', maxDepth=5, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(lambda (v, p): v != p).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification tree model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "model_data")
sameModel = DecisionTreeModel.load(sc, "model_data")