本文整理汇总了Python中pyspark.sql.SQLContext.jsonRDD方法的典型用法代码示例。如果您正苦于以下问题:Python SQLContext.jsonRDD方法的具体用法?Python SQLContext.jsonRDD怎么用?Python SQLContext.jsonRDD使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyspark.sql.SQLContext
的用法示例。
在下文中一共展示了SQLContext.jsonRDD方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestSQL
# 需要导入模块: from pyspark.sql import SQLContext [as 别名]
# 或者: from pyspark.sql.SQLContext import jsonRDD [as 别名]
class TestSQL(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.sqlCtx = SQLContext(self.sc)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist(StorageLevel.MEMORY_ONLY_SER)
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
示例2: writeLumbarReadings
# 需要导入模块: from pyspark.sql import SQLContext [as 别名]
# 或者: from pyspark.sql.SQLContext import jsonRDD [as 别名]
def writeLumbarReadings(time, rdd):
try:
# Convert RDDs of the words DStream to DataFrame and run SQL query
connectionProperties = MySQLConnection.getDBConnectionProps('/home/erik/mysql_credentials.txt')
sqlContext = SQLContext(rdd.context)
if rdd.isEmpty() == False:
lumbarReadings = sqlContext.jsonRDD(rdd)
lumbarReadingsIntermediate = lumbarReadings.selectExpr("readingID","readingTime","deviceID","metricTypeID","uomID","actual.y AS actualYaw","actual.p AS actualPitch","actual.r AS actualRoll","setPoints.y AS setPointYaw","setPoints.p AS setPointPitch","setPoints.r AS setPointRoll")
assembler = VectorAssembler(
inputCols=["actualPitch"], # Must be in same order as what was used to train the model. Testing using only pitch since model has limited dataset.
outputCol="features")
lumbarReadingsIntermediate = assembler.transform(lumbarReadingsIntermediate)
predictions = loadedModel.predict(lumbarReadingsIntermediate.map(lambda x: x.features))
predictionsDF = lumbarReadingsIntermediate.map(lambda x: x.readingID).zip(predictions).toDF(["readingID","positionID"])
combinedDF = lumbarReadingsIntermediate.join(predictionsDF, lumbarReadingsIntermediate.readingID == predictionsDF.readingID).drop(predictionsDF.readingID)
combinedDF = combinedDF.drop("features")
combinedDF.show()
combinedDF.write.jdbc("jdbc:mysql://localhost/biosensor", "SensorReadings", properties=connectionProperties)
except:
pass
示例3: writeLumbarTrainingReadings
# 需要导入模块: from pyspark.sql import SQLContext [as 别名]
# 或者: from pyspark.sql.SQLContext import jsonRDD [as 别名]
def writeLumbarTrainingReadings(time, rddTraining):
try:
# Convert RDDs of the words DStream to DataFrame and run SQL query
connectionProperties = MySQLConnection.getDBConnectionProps('/home/erik/mysql_credentials.txt')
sqlContext = SQLContext(rddTraining.context)
if rddTraining.isEmpty() == False:
lumbarTrainingReading = sqlContext.jsonRDD(rddTraining)
lumbarTrainingReadingFinal = lumbarTrainingReading.selectExpr("deviceID","metricTypeID","uomID","positionID","actual.y AS actualYaw","actual.p AS actualPitch","actual.r AS actualRoll","setPoints.y AS setPointYaw","setPoints.p AS setPointPitch","setPoints.r AS setPointRoll")
lumbarTrainingReadingFinal.write.jdbc("jdbc:mysql://localhost/biosensor", "SensorTrainingReadings", properties=connectionProperties)
except:
pass
示例4: SparkContext
# 需要导入模块: from pyspark.sql import SQLContext [as 别名]
# 或者: from pyspark.sql.SQLContext import jsonRDD [as 别名]
sys.exit(-1)
#
# get the data diretory
#
data_dir = sys.argv[1]
#
# Specify local mode and two cores
#
sc = SparkContext("local[2]", "pubnub_devices")
devicesRDD = sc.textFile(data_dir)
#
# create a dataframe
#
sqlContext = SQLContext(sc)
df = sqlContext.jsonRDD(devicesRDD)
#
# show the infered schema
#
df.show()
#
# register a temp table and issue some SQL like queries to our device information
#
df.registerTempTable("deviceTables")
#
# QUERY 1: get all devices with temp > 20 and humidity < 50
#
results = sqlContext.sql("select device_id, device_name, humidity, temp from deviceTables where temp > 20 and humidity < 50")
# results of a SQL query is another RDD
# interate over the results
#