本文整理汇总了Python中pyspark.sql.SQLContext类的典型用法代码示例。如果您正苦于以下问题:Python SQLContext类的具体用法?Python SQLContext怎么用?Python SQLContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SQLContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Spark_MapReduce_Parents
def Spark_MapReduce_Parents(keyword, tokensofprevlevel, graphcache):
#tokensofprevlevelkeyword=tokensofprevlevel
#tokensofprevlevelkeyword.append(keyword)
md5hashparents = hashlib.md5(keyword).hexdigest()
#md5hashparents = keyword
md5hashparents = md5hashparents + "$parents"
picklef_keyword=open("RecursiveGlossOverlap_MapReduce_Parents_Persisted.txt","w")
asfer_pickle_string_dump(keyword,picklef_keyword)
picklef_keyword.close()
cachevalue=graphcache.get(md5hashparents)
if cachevalue:
print "Spark_MapReduce_Parents(): hash = ", md5hashparents, "; returning from cache"
return cachevalue
else:
#picklelock.acquire()
spcon = SparkContext("local[2]","Spark_MapReduce_Parents")
#picklef_keyword=open("RecursiveGlossOverlap_MapReduce_Parents_Persisted.txt","w")
#asfer_pickle_string_dump(keyword,picklef_keyword)
#picklef_keyword.close()
paralleldata = spcon.parallelize(tokensofprevlevel).cache()
#k=paralleldata.map(lambda keyword: mapFunction_Parents(keyword,tokensofprevlevel)).reduceByKey(reduceFunction_Parents)
k=paralleldata.map(mapFunction_Parents).reduceByKey(reduceFunction_Parents)
sqlContext=SQLContext(spcon)
parents_schema=sqlContext.createDataFrame(k.collect())
parents_schema.registerTempTable("Interview_RecursiveGlossOverlap_Parents")
query_results=sqlContext.sql("SELECT * FROM Interview_RecursiveGlossOverlap_Parents")
dict_query_results=dict(query_results.collect())
#print "Spark_MapReduce_Parents() - SparkSQL DataFrame query results:"
#picklelock.release()
graphcache.set(md5hashparents,dict_query_results[1])
spcon.stop()
print "graphcache_mapreduce_parents updated:", graphcache
return dict_query_results[1]
开发者ID:shrinivaasanka,项目名称:asfer-github-code,代码行数:35,代码来源:InterviewAlgorithmWithIntrinisicMerit_SparkMapReducer.py
示例2: test_save_load
def test_save_load(self):
temp_path = tempfile.mkdtemp()
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
self.assertEqual(loadedCV.getEstimatorParamMaps(), cv.getEstimatorParamMaps())
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
示例3: test_nested_pipeline_persistence
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
sqlContext = SQLContext(self.sc)
temp_path = tempfile.mkdtemp()
try:
df = sqlContext.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
示例4: TestSQL
class TestSQL(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.sqlCtx = SQLContext(self.sc)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist(StorageLevel.MEMORY_ONLY_SER)
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
示例5: main
def main():
log = logging.getLogger(prog)
log.setLevel(logging.INFO)
# bit hackish and hard to keep aligned with docstring changes, not using this
# usage = '\r\b\r\b\r' + __doc__ + "usage: %prog -j file.json -p directory.parquet"
# parser = OptionParser(usage=usage, version='%prog ' + __version__)
parser = OptionParser(version='%prog ' + __version__)
parser.add_option('-j', '--json', dest='jsonFile', help='JSON input file/dir', metavar='<file/dir>')
parser.add_option('-p', '--parquetDir', dest='parquetDir', help='Parquet output dir', metavar='<dir>')
(options, args) = parser.parse_args()
jsonFile = options.jsonFile
parquetDir = options.parquetDir
if args or not jsonFile or not parquetDir:
usage(parser)
conf = SparkConf().setAppName('HS PySpark JSON => Parquet')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
spark_version = sc.version
log.info('Spark version detected as %s' % spark_version)
if not isVersionLax(spark_version):
die("Spark version couldn't be determined. " + support_msg('pytools'))
if isMinVersion(spark_version, 1.4):
json = sqlContext.read.json(jsonFile)
json.write.parquet(parquetDir)
else:
log.warn('running legacy code for Spark <= 1.3')
json = sqlContext.jsonFile(jsonFile)
json.saveAsParquetFile(parquetDir)
示例6: main
def main(argv):
Conf = (SparkConf().setAppName("recommendation"))
sc = SparkContext(conf=Conf)
sqlContext = SQLContext(sc)
dirPath = "hdfs://ec2-52-71-113-80.compute-1.amazonaws.com:9000/reddit/recommend/data/sr_userCount.parquet"
rawDF = sqlContext.read.parquet(dirPath).persist(StorageLevel.MEMORY_AND_DISK_SER)
# argv[1] is the dump of training data in hdfs
# argv[2] is the user perferences
# User Hash Lookup stored into cassandra
user_hash = rawDF.map(lambda (a,b,c): (a,hashFunction(a)))
distinctUser = user_hash.distinct()
userHashDF = sqlContext.createDataFrame(distinctUser,["user","hash"])
userHashDF.write.format("org.apache.spark.sql.cassandra").options(table ="userhash", keyspace = keyspace).save(mode="append")
# Product Hash Lookup stored into cassandra
product_hash = rawDF.map(lambda (a,b,c): (b, hashFunction(b)))
distinctProduct = product_hash.distinct()
productHashDF = sqlContext.createDataFrame(distinctProduct,["product","hash"])
productHashDF.write.format("org.apache.spark.sql.cassandra").options(table ="producthash", keyspace = keyspace).save(mode="append")
# Ratings for training
# ALS requires a java hash of string. This function does that and stores it as Rating Object
# for the algorithm to consume
ratings = rawDF.map(lambda (a,b,c) : Rating(hashFunction(a),hashFunction(b),float(c)))
model = ALS.trainImplicit(ratings,10,10,alpha=0.01,seed=5)
model.save(sc, "hdfs://ec2-52-71-113-80.compute-1.amazonaws.com:9000/reddit/recommend/model")
sc.stop()
示例7: __init__
def __init__(self, predictionAndLabels):
sc = predictionAndLabels.ctx
sql_ctx = SQLContext(sc)
df = sql_ctx.createDataFrame(predictionAndLabels,
schema=sql_ctx._inferSchema(predictionAndLabels))
java_model = callMLlibFunc("newRankingMetrics", df._jdf)
super(RankingMetrics, self).__init__(java_model)
示例8: main
def main(n_part, hdfs_path):
print "********************\n*"
print "* Start main\n*"
print "********************"
conf = SparkConf().setAppName("Benchmark Spark SQL")
sc = SparkContext(conf = conf)
sqlContext = SQLContext(sc)
rowsRDD = sc.textFile(hdfs_path).repartition(n_part).map(lambda x: recordToRows(x)).cache()
df = sqlContext.createDataFrame(rowsRDD).cache()
df.count()
df.registerTempTable("msd_table")
print "********************\n*"
print "* Start querres\n*"
print "********************"
[ave_t1, std1, dt1, n1] = time_querry("SELECT * FROM msd_table WHERE msd_table.artist_name = 'Taylor Swift'", sqlContext)
[ave_t2, std2, dt2, n2] = time_querry("SELECT COUNT(*) FROM msd_table WHERE msd_table.artist_name = 'Taylor Swift'", sqlContext, method=1)
[ave_t3, std3, dt3, n3] = time_querry("SELECT * FROM msd_table WHERE msd_table.artist_hotness > 0.75", sqlContext)
[ave_t4, std4, dt4, n4] = time_querry("SELECT COUNT(*) FROM msd_table WHERE msd_table.artist_hotness > 0.75", sqlContext, method=1)
if n1 != n2:
print "\t!!!!Error, counts disagree for the number of T.S. songs!"
if n3 != n4:
print "\t!!!!Error, counts disagree for the number of high paced songs!"
print "********************\n*"
print "* Results"
print "\t".join(map(lambda x: str(x), [ave_t1, std1, dt1, ave_t2, std2, dt2, ave_t3, std3, dt3, ave_t4, std4, dt4]))
print "********************"
示例9: RunRandomForest
def RunRandomForest(tf, ctx):
sqlContext = SQLContext(ctx)
rdd = tf.map(parseForRandomForest)
# The schema is encoded in a string.
schema = ['genre', 'track_id', 'features']
# Apply the schema to the RDD.
songDF = sqlContext.createDataFrame(rdd, schema)
# Register the DataFrame as a table.
songDF.registerTempTable("genclass")
labelIndexer = StringIndexer().setInputCol("genre").setOutputCol("indexedLabel").fit(songDF)
trainingData, testData = songDF.randomSplit([0.8, 0.2])
labelConverter = IndexToString().setInputCol("prediction").setOutputCol("predictedLabel").setLabels(labelIndexer.labels)
rfc = RandomForestClassifier().setMaxDepth(10).setNumTrees(2).setLabelCol("indexedLabel").setFeaturesCol("features")
#rfc = SVMModel([.5, 10, 20], 5)
#rfc = LogisticRegression(maxIter=10, regParam=0.01).setLabelCol("indexedLabel").setFeaturesCol("features")
pipeline = Pipeline(stages=[labelIndexer, rfc, labelConverter])
model = pipeline.fit(trainingData)
predictions = model.transform(testData)
predictions.show()
evaluator = MulticlassClassificationEvaluator().setLabelCol("indexedLabel").setPredictionCol("prediction").setMetricName("precision")
accuracy = evaluator.evaluate(predictions)
print 'Accuracy of RandomForest = ', accuracy * 100
print "Test Error = ", (1.0 - accuracy) * 100
示例10: mock_data
def mock_data(self):
"""Mock data to imitate read from database."""
sqlContext = SQLContext(self.sc)
mock_data_rdd = self.sc.parallelize([("A", 1, 1), ("B", 1, 0), ("C", 0, 2), ("D", 2, 4), ("E", 3, 5) ])
schema = ["id", "x", "y"]
mock_data_df = sqlContext.createDataFrame(mock_data_rdd, schema)
return mock_data_df
示例11: log_mapreducer
def log_mapreducer(logfilename, pattern, filt="None"):
spcon=SparkContext()
if filt == "None":
input=open(logfilename,'r')
paralleldata=spcon.parallelize(input.readlines())
patternlines=paralleldata.filter(lambda patternline: pattern in patternline)
print "pattern lines",patternlines.collect()
matches=patternlines.map(mapFunction).reduceByKey(reduceFunction)
else:
input=spcon.textFile(logfilename)
matches=input.flatMap(lambda line:line.split()).filter(lambda line: filt in line).map(mapFunction).reduceByKey(reduceFunction)
matches_collected=matches.collect()
print "matches_collected:",matches_collected
if len(matches_collected) > 0:
sqlContext=SQLContext(spcon)
bytes_stream_schema=sqlContext.createDataFrame(matches_collected)
bytes_stream_schema.registerTempTable("USBWWAN_bytes_stream")
query_results=sqlContext.sql("SELECT * FROM USBWWAN_bytes_stream")
dict_query_results=dict(query_results.collect())
print "----------------------------------------------------------------------------------"
print "log_mapreducer(): pattern [",pattern,"] in [",logfilename,"] for filter [",filt,"]"
print "----------------------------------------------------------------------------------"
dict_matches=dict(matches_collected)
sorted_dict_matches = sorted(dict_matches.items(),key=operator.itemgetter(1), reverse=True)
print "pattern matching lines:",sorted_dict_matches
print "----------------------------------------------------------------------------------"
print "SparkSQL DataFrame query results:"
print "----------------------------------------------------------------------------------"
pprint.pprint(dict_query_results)
print "----------------------------------------------------------------------------------"
print "Cardinality of Stream Dataset:"
print "----------------------------------------------------------------------------------"
print len(dict_query_results)
spcon.stop()
return sorted_dict_matches
示例12: main
def main(sc):
sql_context = SQLContext(sc)
all_data = get_all_data()
# Input data: Each row is a bag of words from a sentence or document.
training_data = [(id_gen.next(), text.split(" ")) for text in all_data]
documentdf = sql_context.createDataFrame(training_data, ["id", "text"])
remover = StopWordsRemover(inputCol="text", outputCol="text_filtered")
cleaned_document = remover.transform(documentdf)
# Learn a mapping from words to Vectors.
word2vec = Word2Vec(vectorSize=len(training_data),
inputCol="text_filtered",
outputCol="result")
model = word2vec.fit(cleaned_document)
matrix = column_similarities(model.transform(cleaned_document))
# We use the size of the target data to filter only
# products of target data to filter data and avoid
# products of taret data to itself
values = matrix.entries.filter(
lambda x: x.j >= TARGET_DATA_SIZE and x.i < TARGET_DATA_SIZE).sortBy(
keyfunc=lambda x: x.value, ascending=False).map(
lambda x: x.j).distinct().take(100)
training_data_index = dict(training_data)
for position, item in enumerate(values):
line = " ".join(training_data_index[int(item)])
print('%d -> %s' % (position, line.encode('utf-8')))
示例13: main
def main(argv):
Conf = (SparkConf().setAppName("SimpleGraph"))
sc = SparkContext(conf=Conf)
sqlContext = SQLContext(sc)
dirPath = "hdfs://ec2-52-71-113-80.compute-1.amazonaws.com:9000/reddit/data/"+argv[1]+".parquet"
rawDF = sqlContext.read.parquet(dirPath).registerTempTable("comments")
# This is where the magic happens
# SQL self join to join users who have interacted with one another
df = sqlContext.sql("""
SELECT t1.subreddit as Subreddit,
t1.id as OrigId , t2.id as RespId,
t1.author AS OrigAuth, t2.author AS RespAuth,
t1.score AS OrigScore, t2.score AS RespScore,
t1.ups AS OrigUps, t2.ups AS RespUps,
t1.downs AS OrigDowns, t2.downs AS RespDowns,
t1.controversiality AS OrigControv, t2.controversiality AS RespControv
FROM comments t1 INNER JOIN comments t2 ON CONCAT("t1_",t1.id) = t2.parent_id where t1.author!='[deleted]' and t2.author!='[deleted]'
""")
# write it into parquet ? Why ? Cause it compresses the data and is really fast to read from !
df.write.parquet("hdfs://ec2-52-71-113-80.compute-1.amazonaws.com:9000/reddit/data/"+argv[1]+"-selfjoin.parquet")
示例14: main
def main(dataFile, outputPath):
conf = SparkConf().setAppName("S3 Example").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
raw_text = sc.textFile(dataFile).persist(StorageLevel.MEMORY_AND_DISK)
csv_data = raw_text.map(lambda l: l.split(","))
row_data = csv_data.map(lambda p: dataIO.dataStruc(p))
interaction_df = sqlContext.createDataFrame(row_data)
# features.save_hdfs_parquet(interaction_df, outputPath)
dataIO.save_hdfs_parquet(interaction_df, outputPath)
interaction_df.registerTempTable("interactions")
tcp_interactions = sqlContext.sql( """
SELECT duration, dst_bytes, protocol_type FROM interactions WHERE protocol_type = 'tcp' AND duration > 1000 AND dst_bytes=0
""")
tcp_interactions.show()
features.print_tcp_interactions(tcp_interactions)
dataIO.print_from_dataio()
features.print_from_feature()
sc.stop()
示例15: main
def main(sc):
sqlContext = SQLContext(sc)
tasteProfileRdd = sc.textFile("userTaste/*")
songRdd = sc.textFile("songsDict/*")
# Load a text file and convert each line to a Row.
tasteProfile = tasteProfileRdd.filter(lambda l:len(l) > 0)
parsedSplits = tasteProfile.map(lambda l: l.split('\t'))
userTaste = parsedSplits.map(lambda p: Row(userId=p[0], songId=p[1], playCount=p[2]))
individualSong = songRdd.map(lambda l:l.split('|'))
songData = individualSong.map(lambda s: Row(songId=s[0],featureSet=s[1]))
# Infer the schema, and register the DataFrame as a table.
schemaUserTaste = sqlContext.inferSchema(userTaste)
schemaUserTaste.registerTempTable("userTaste")
schemaSongData = sqlContext.inferSchema(songData)
schemaSongData.registerTempTable("songData")
test2 = sqlContext.sql("select * from songData limit 5")
songIds = test2.map(lambda p: "songIds: " + s.songId)
#test1 = sqlContext.sql("SELECT distinct * FROM userTaste limit 5")
#songIds = test1.map(lambda p: "songIds: " + p.songId)
for i in songIds.collect():
print i