本文整理匯總了Python中pyspark.ml.feature.HashingTF類的典型用法代碼示例。如果您正苦於以下問題:Python HashingTF類的具體用法?Python HashingTF怎麽用?Python HashingTF使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了HashingTF類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: fit_kmeans
def fit_kmeans(spark, products_df):
step = 0
step += 1
tokenizer = Tokenizer(inputCol="title", outputCol=str(step) + "_tokenizer")
step += 1
stopwords = StopWordsRemover(inputCol=tokenizer.getOutputCol(), outputCol=str(step) + "_stopwords")
step += 1
tf = HashingTF(inputCol=stopwords.getOutputCol(), outputCol=str(step) + "_tf", numFeatures=16)
step += 1
idf = IDF(inputCol=tf.getOutputCol(), outputCol=str(step) + "_idf")
step += 1
normalizer = Normalizer(inputCol=idf.getOutputCol(), outputCol=str(step) + "_normalizer")
step += 1
kmeans = KMeans(featuresCol=normalizer.getOutputCol(), predictionCol=str(step) + "_kmeans", k=2, seed=20)
kmeans_pipeline = Pipeline(stages=[tokenizer, stopwords, tf, idf, normalizer, kmeans])
model = kmeans_pipeline.fit(products_df)
words_prediction = model.transform(products_df)
model.save("./kmeans") # the whole machine learning instance is saved in a folder
return model, words_prediction
示例2: train_lg
def train_lg(training_data, collection):
# Configure an ML pipeline, which consists of the following stages: hashingTF, idf, and lr.
hashingTF = HashingTF(inputCol="filtered", outputCol="TF_features")
idf = IDF(inputCol=hashingTF.getOutputCol(), outputCol="features")
pipeline1 = Pipeline(stages=[hashingTF, idf])
# Fit the pipeline1 to training documents.
model1 = pipeline1.fit(training_data)
lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)
pipeline2 = Pipeline(stages=[model1, lr])
paramGrid = ParamGridBuilder() \
.addGrid(hashingTF.numFeatures, [10, 100, 1000, 10000]) \
.addGrid(lr.regParam, [0.1, 0.01]) \
.build()
crossval = CrossValidator(estimator=pipeline2,
estimatorParamMaps=paramGrid,
evaluator=BinaryClassificationEvaluator(),
numFolds=5)
# Run cross-validation, and choose the best set of parameters.
cvModel = crossval.fit(training_data)
# model_path = os.path.join(models_dir , time.strftime("%Y%m%d-%H%M%S") + '_'
# + collection["Id"] + '_'
# + collection["name"])
# cvModel.save(sc, model_path)
return cvModel
示例3: tf_idf_feature
def tf_idf_feature(wordsData):
hashingTF = HashingTF(inputCol="filtered", outputCol="rawFeatures", numFeatures=20)
featurizedData = hashingTF.transform(wordsData)
idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel = idf.fit(featurizedData)
rescaledData = idfModel.transform(featurizedData)
for features_label in rescaledData.select("features", "id").take(3):
print(features_label)
示例4: textPredict
def textPredict(request):
"""6.文本聚類,熱度預測"""
label = request.POST['label']
title = request.POST['title']
conf = SparkConf().setAppName('textPredict').setMaster('spark://HP-Pavilion:7077')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
"""處理數據集,生成特征向量"""
dfTitles = sqlContext.read.parquet('data/roll_news_sina_com_cn.parquet')
print(dfTitles.dtypes)
tokenizer = Tokenizer(inputCol="title", outputCol="words")
wordsData = tokenizer.transform(dfTitles)
hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=20)
featurizedData = hashingTF.transform(wordsData)
idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel = idf.fit(featurizedData)
rescaledData = idfModel.transform(featurizedData)
rescaledData.show()
for features_label in rescaledData.select("features", "rawFeatures").take(3):
print(features_label)
"""決策樹模型培訓"""
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(rescaledData)
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(rescaledData)
(trainingData, testData) = rescaledData.randomSplit([0.7, 0.3])
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
model = pipeline.fit(trainingData)
"""模型測試"""
predictions = model.transform(testData)
predictions.show()
predictions.select("prediction", "indexedLabel", "features").show(5)
"""用戶數據測試,單個新聞測試"""
sentenceData = sqlContext.createDataFrame([
(label,title),
],['label',"title"])
tokenizer = Tokenizer(inputCol="title", outputCol="words")
wordsData = tokenizer.transform(sentenceData)
hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=20)
featurizedData = hashingTF.transform(wordsData)
rescaledData = idfModel.transform(featurizedData)
myprediction = model.transform(rescaledData)
print("==================================================")
myprediction.show()
resultList = convertDfToList(myprediction)
"""模型評估"""
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="precision")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
treeModel = model.stages[2]
print(treeModel)
sc.stop()
return render(request,{'resultList':resultList})
示例5: extract_tf_features
def extract_tf_features(p_df, input_col, output_col):
"""
Extracts TF features.
:param p_df: A DataFrame.
:param in_column: Name of the input column.
:param out_column: Name of the output column.
:return: A DataFrame.
"""
hashingTF = HashingTF(inputCol=input_col, outputCol=output_col, numFeatures=3000)
return hashingTF.transform(p_df)
示例6: term_frequency
def term_frequency(df, column):
"""
Compute term-frequency of a token contained in a column.
Transformation: array<string> --> vector
"""
tf = HashingTF(inputCol=column, outputCol='_'+column)
df = tf.transform(df)
df = replace(df, column, '_'+column)
return df
示例7: tfidf
def tfidf(dataframe, in_col1, out_col1, in_col2, out_col2, n):
global idfModel
hashingTF = HashingTF(inputCol=in_col1, outputCol=out_col1, numFeatures=n)
featurizedData = hashingTF.transform(dataframe)
idf = IDF(inputCol=in_col2, outputCol=out_col2)
idfModel = idf.fit(featurizedData)
dataframe = idfModel.transform(featurizedData)
return dataframe
示例8: run_tf_idf_spark_ml
def run_tf_idf_spark_ml(df, numFeatures=1 << 20):
tokenizer = Tokenizer(inputCol="body", outputCol="words")
wordsData = tokenizer.transform(df)
hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=numFeatures)
featurizedData = hashingTF.transform(wordsData)
idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel = idf.fit(featurizedData)
return idfModel.transform(featurizedData)
示例9: test_apply_binary_term_freqs
def test_apply_binary_term_freqs(self):
df = self.spark.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
n = 10
hashingTF = HashingTF()
hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
expected = Vectors.dense([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))
示例10: predictLabel
def predictLabel(label,title,model):
"""預測新聞的標簽"""
sentenceData = sqlContext.createDataFrame([
(label,title),
],['label',"title"])
tokenizer = Tokenizer(inputCol="title", outputCol="words")
wordsData = tokenizer.transform(sentenceData)
hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=20)
featurizedData = hashingTF.transform(wordsData)
rescaledData = idfModel.transform(featurizedData)
myprediction = model.transform(rescaledData)
return myprediction
示例11: create_features
def create_features(raw_data):
#Create DataFrame
data_df = sqlContext.createDataFrame(raw_data.map(lambda r : Row(appid=r[0], price=r[1], sentence=r[2])))
#Transform sentence into words
tokenizer = Tokenizer(inputCol='sentence', outputCol='words')
words_df = tokenizer.transform(data_df)
#Calculate term frequency
hashingTF = HashingTF(inputCol='words', outputCol='rawFeatures', numFeatures=5)
featurized_df = hashingTF.transform(words_df)
#Calculate inverse document frequency
idf = IDF(inputCol='rawFeatures', outputCol='features')
idfModel = idf.fit(featurized_df)
return idfModel.transform(featurized_df)
示例12: tf_feature_vectorizer
def tf_feature_vectorizer(df,no_of_features,ip_col):
#from pyspark.sql.functions import udf
#from pyspark.sql.types import *
output_raw_col = ip_col+"raw_features"
output_col = ip_col+"features"
hashingTF = HashingTF(inputCol=ip_col, outputCol=output_raw_col, numFeatures=no_of_features)
featurizedData = hashingTF.transform(df)
idf = IDF(inputCol=output_raw_col, outputCol=output_col)
idfModel = idf.fit(featurizedData)
rescaled_data = idfModel.transform(featurizedData)
rescaled_data.show(5)
print(rescaled_data.count())
return rescaled_data
示例13: makeTFIDF
def makeTFIDF(sc, spark, reviews):
# count vectorizer and tfidf
# cv = CountVectorizer(inputCol='words_clean', outputCol='tf')
# cvModel = cv.fit(reviews)
# reviews = cvModel.transform(reviews)
# HashingTF for fewer dimensions:
hashingtf = HashingTF(inputCol='words_clean', outputCol='tf', numFeatures=1000)
reviews = hashingtf.transform(reviews)
# create TF-IDF matrix
idf = IDF().setInputCol('tf').setOutputCol('tfidf')
tfidfModel = idf.fit(reviews)
reviews = tfidfModel.transform(reviews)
示例14: _build_stages
def _build_stages(self):
self.bs_parser = BeautifulSoupParser(inputCol="review", outputCol="parsed")
self.tokenizer = Tokenizer(inputCol=self.bs_parser.getOutputCol(), outputCol="words")
self.hashing_tf = HashingTF(inputCol=self.tokenizer.getOutputCol(), outputCol="raw_features")
self.idf_model = IDF(inputCol=self.hashing_tf.getOutputCol(), outputCol="features")
self.lr = LogisticRegression(maxIter=10, regParam=0.01)
return [self.bs_parser, self.tokenizer, self.hashing_tf, self.idf_model, self.lr]
示例15: append_tf_idf
def append_tf_idf(self, df):
"""
Calculate term frequency and inverse document frequency
based on at least 1 visit hourly in this case. Compares how often the tokens appeared
at least once per hour compared to other tokens. Not used for the main purpose of the project.
Args:
:param df: Dataframe parameter.
Returns:
:return: Dataframe with term frequency and inverse document frequency added in the columns
'rawFeatures' and 'features' respectively.
"""
#Create TF column.
hashingTF = HashingTF(inputCol="tokens", outputCol="rawFeatures", numFeatures=100000)
tf = hashingTF.transform(df)
tf.persist(StorageLevel.MEMORY_AND_DISK)
#Create IDF column.
idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel = idf.fit(tf)
tfidf = idfModel.transform(tf)
return tfidf