本文整理汇总了Python中pyspark.SparkContext.cancelAllJobs方法的典型用法代码示例。如果您正苦于以下问题:Python SparkContext.cancelAllJobs方法的具体用法?Python SparkContext.cancelAllJobs怎么用?Python SparkContext.cancelAllJobs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyspark.SparkContext
的用法示例。
在下文中一共展示了SparkContext.cancelAllJobs方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _load_config
# 需要导入模块: from pyspark import SparkContext [as 别名]
# 或者: from pyspark.SparkContext import cancelAllJobs [as 别名]
#.........这里部分代码省略.........
kwargs = self.CONFIG_DATA[KEYWORD_ARGS][self._trainer.__name__]
return self._trainer.train(dataRDD, *args, **kwargs)
def get_regressor_metrics(self, *args):
if not self._trainer:
return None
dataRDD = self.gen_labeled_pointsRDD()
if not dataRDD:
return None
_count = dataRDD.count()
k = 0.7
dataRDD, kfaultRDD = dataRDD.randomSplit([k, 1-k])
_kcount = kfaultRDD.count()
_kratio = _kcount / float(_count)
kwargs = self.CONFIG_DATA[KEYWORD_ARGS][self._trainer.__name__]
model = self._trainer.train(dataRDD, *args, **kwargs)
predict_obsRDD = kfaultRDD\
.map(lambda lp : (float(model.predict(lp.features)), lp.label))\
.cache()
dataRDD.unpersist()
metrics = RegressionMetrics(predict_obsRDD)
predict_obsRDD.unpersist()
self._metrics_results.append("""
THE METRICS FOR YOUR '{0}' MODEL IS AS FOLLOWS:
keyword_args: {6}
reduction_expr: {7}
K-faultRatio: {8:2.2f}%
explainedVariance: {1}
meanAbsoluteError: {2}
meanSquaredError: {3}
r2: {4}
rootMeanSquaredE: {5}
""".format(self._trainer.__name__, \
metrics.explainedVariance, \
metrics.meanAbsoluteError, \
metrics.meanSquaredError, \
metrics.r2, \
metrics.rootMeanSquaredError, \
self.CONFIG_DATA[KEYWORD_ARGS][self._trainer.__name__], \
self.CONFIG_DATA[LABEL_REDUCTION_EXPR][self._labelidx], \
_kratio*100.0))
def __del__(self):
if self.sc:
self.sc.cancelAllJobs()
self.sc.stop()
if self.sp:
del self.sp
if self.collated_file:
if not self.collated_file.closed:
self.collated_file.close()
if len(self._metrics_results):
for results in self._metrics_results:
print(results)
def update_labelidx(self):
if not self._trainer:
return False
self._labelidx += 1
_size = len(self.CONFIG_DATA[LABEL_REDUCTION_EXPR])
if self._labelidx >= _size:
return False
return True
def update_traineridx(self):
if not self._trainer:
return False
self._traineridx += 1
_size = len(self.\
CONFIG_DATA[UPDATE_KEYWORD_ARGS]\
[self._trainer.__name__])
if self._traineridx >= _size:
return False
_name = self._trainer.__name__
_idx = self._traineridx
self.CONFIG_DATA[KEYWORD_ARGS][_name]\
.update(self.CONFIG_DATA[UPDATE_KEYWORD_ARGS][_name][_idx])
return True
def set_trainer(self, new_trainer):
if hasattr(new_trainer, "train"):
self._trainer = new_trainer
self._traineridx = -1
self._labelidx = -1
def reset_traineridx (self):
self._traineridx = -1