本文整理匯總了Python中pyspark.ml.feature.HashingTF方法的典型用法代碼示例。如果您正苦於以下問題:Python feature.HashingTF方法的具體用法?Python feature.HashingTF怎麽用?Python feature.HashingTF使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pyspark.ml.feature
的用法示例。
在下文中一共展示了feature.HashingTF方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: compute_clusters
# 需要導入模塊: from pyspark.ml import feature [as 別名]
# 或者: from pyspark.ml.feature import HashingTF [as 別名]
def compute_clusters(addons_df, num_clusters, random_seed):
""" Performs user clustering by using add-on ids as features.
"""
# Build the stages of the pipeline. We need hashing to make the next
# steps work.
hashing_stage = HashingTF(inputCol="addon_ids", outputCol="hashed_features")
idf_stage = IDF(
inputCol="hashed_features", outputCol="features", minDocFreq=1
)
# As a future improvement, we may add a sane value for the minimum cluster size
# to BisectingKMeans (e.g. minDivisibleClusterSize). For now, just make sure
# to pass along the random seed if needed for tests.
kmeans_kwargs = {"seed": random_seed} if random_seed else {}
bkmeans_stage = BisectingKMeans(k=num_clusters, **kmeans_kwargs)
pipeline = Pipeline(stages=[hashing_stage, idf_stage, bkmeans_stage])
# Run the pipeline and compute the results.
model = pipeline.fit(addons_df)
return model.transform(addons_df).select(["client_id", "prediction"])
示例2: compute_clusters
# 需要導入模塊: from pyspark.ml import feature [as 別名]
# 或者: from pyspark.ml.feature import HashingTF [as 別名]
def compute_clusters(addons_df, num_clusters, random_seed):
""" Performs user clustering by using add-on ids as features.
"""
# Build the stages of the pipeline. We need hashing to make the next
# steps work.
hashing_stage = HashingTF(inputCol="addon_ids", outputCol="hashed_features")
idf_stage = IDF(inputCol="hashed_features", outputCol="features", minDocFreq=1)
# As a future improvement, we may add a sane value for the minimum cluster size
# to BisectingKMeans (e.g. minDivisibleClusterSize). For now, just make sure
# to pass along the random seed if needed for tests.
kmeans_kwargs = {"seed": random_seed} if random_seed else {}
bkmeans_stage = BisectingKMeans(k=num_clusters, **kmeans_kwargs)
pipeline = Pipeline(stages=[hashing_stage, idf_stage, bkmeans_stage])
# Run the pipeline and compute the results.
model = pipeline.fit(addons_df)
return model.transform(addons_df).select(["client_id", "prediction"])
示例3: build_sparkml_operator_name_map
# 需要導入模塊: from pyspark.ml import feature [as 別名]
# 或者: from pyspark.ml.feature import HashingTF [as 別名]
def build_sparkml_operator_name_map():
res = {k: "pyspark.ml.feature." + k.__name__ for k in [
Binarizer, BucketedRandomProjectionLSHModel, Bucketizer,
ChiSqSelectorModel, CountVectorizerModel, DCT, ElementwiseProduct, HashingTF, IDFModel, ImputerModel,
IndexToString, MaxAbsScalerModel, MinHashLSHModel, MinMaxScalerModel, NGram, Normalizer, OneHotEncoderModel,
PCAModel, PolynomialExpansion, QuantileDiscretizer, RegexTokenizer,
StandardScalerModel, StopWordsRemover, StringIndexerModel, Tokenizer, VectorAssembler, VectorIndexerModel,
VectorSlicer, Word2VecModel
]}
res.update({k: "pyspark.ml.classification." + k.__name__ for k in [
LinearSVCModel, LogisticRegressionModel, DecisionTreeClassificationModel, GBTClassificationModel,
RandomForestClassificationModel, NaiveBayesModel, MultilayerPerceptronClassificationModel, OneVsRestModel
]})
res.update({k: "pyspark.ml.regression." + k.__name__ for k in [
AFTSurvivalRegressionModel, DecisionTreeRegressionModel, GBTRegressionModel, GBTRegressionModel,
GeneralizedLinearRegressionModel, IsotonicRegressionModel, LinearRegressionModel, RandomForestRegressionModel
]})
return res
示例4: main
# 需要導入模塊: from pyspark.ml import feature [as 別名]
# 或者: from pyspark.ml.feature import HashingTF [as 別名]
def main():
# Read training data as a DataFrame
sqlCt = SQLContext(sc)
trainDF = sqlCt.read.parquet(training_input)
testDF = sqlCt.read.parquet(testing_input)
tokenizer = Tokenizer(inputCol="text", outputCol="words")
evaluator = BinaryClassificationEvaluator()
# no parameter tuning
hashingTF_notuning = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features", numFeatures=1000)
lr_notuning = LogisticRegression(maxIter=20, regParam=0.1)
pipeline_notuning = Pipeline(stages=[tokenizer, hashingTF_notuning, lr_notuning])
model_notuning = pipeline_notuning.fit(trainDF)
prediction_notuning = model_notuning.transform(testDF)
notuning_output = evaluator.evaluate(prediction_notuning)
# for cross validation
hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
lr = LogisticRegression(maxIter=20)
paramGrid = ParamGridBuilder()\
.addGrid(hashingTF.numFeatures, [1000, 5000, 10000])\
.addGrid(lr.regParam, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\
.build()
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
cv = CrossValidator(estimator=pipeline, estimatorParamMaps=paramGrid, evaluator=evaluator, numFolds=2)
cvModel = cv.fit(trainDF)
# Make predictions on test documents. cvModel uses the best model found.
best_prediction = cvModel.transform(testDF)
best_output = evaluator.evaluate(best_prediction)
s = str(notuning_output) + '\n' + str(best_output)
output_data = sc.parallelize([s])
output_data.saveAsTextFile(output)
示例5: test_cv_lasso_with_mllib_featurization
# 需要導入模塊: from pyspark.ml import feature [as 別名]
# 或者: from pyspark.ml.feature import HashingTF [as 別名]
def test_cv_lasso_with_mllib_featurization(self):
data = [('hi there', 0.0),
('what is up', 1.0),
('huh', 1.0),
('now is the time', 5.0),
('for what', 0.0),
('the spark was there', 5.0),
('and so', 3.0),
('were many socks', 0.0),
('really', 1.0),
('too cool', 2.0)]
data = self.sql.createDataFrame(data, ["review", "rating"])
# Feature extraction using MLlib
tokenizer = Tokenizer(inputCol="review", outputCol="words")
hashingTF = HashingTF(inputCol="words", outputCol="features", numFeatures=20000)
pipeline = Pipeline(stages=[tokenizer, hashingTF])
data = pipeline.fit(data).transform(data)
df = self.converter.toPandas(data.select(data.features.alias("review"), "rating"))
pipeline = SKL_Pipeline([
('lasso', SKL_Lasso())
])
parameters = {
'lasso__alpha': (0.001, 0.005, 0.01)
}
grid_search = GridSearchCV(self.sc, pipeline, parameters)
skl_gs = grid_search.fit(df.review.values, df.rating.values)
assert len(skl_gs.cv_results_['params']) == len(parameters['lasso__alpha'])