本文整理汇总了Python中sklearn.preprocessing.data.StandardScaler.get_params方法的典型用法代码示例。如果您正苦于以下问题:Python StandardScaler.get_params方法的具体用法?Python StandardScaler.get_params怎么用?Python StandardScaler.get_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.preprocessing.data.StandardScaler
的用法示例。
在下文中一共展示了StandardScaler.get_params方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SkRanker
# 需要导入模块: from sklearn.preprocessing.data import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.data.StandardScaler import get_params [as 别名]
class SkRanker(Ranker, SkLearner):
'''
Basic ranker wrapping scikit-learn functions
'''
def train(self, dataset_filename,
scale=True,
feature_selector=None,
feature_selection_params={},
feature_selection_threshold=.25,
learning_params={},
optimize=True,
optimization_params={},
scorers=['f1_score'],
attribute_set=None,
class_name=None,
metaresults_prefix="./0-",
**kwargs):
plot_filename = "{}{}".format(metaresults_prefix, "featureselection.pdf")
data, labels = dataset_to_instances(dataset_filename, attribute_set, class_name, **kwargs)
learner = self.learner
#the class must remember the attribute_set and the class_name in order to reproduce the vectors
self.attribute_set = attribute_set
self.class_name = class_name
#scale data to the mean
if scale:
log.info("Scaling datasets...")
log.debug("Data shape before scaling: {}".format(data.shape))
self.scaler = StandardScaler()
data = self.scaler.fit_transform(data)
log.debug("Data shape after scaling: {}".format(data.shape))
log.debug("Mean: {} , Std: {}".format(self.scaler.mean_, self.scaler.std_))
#avoid any NaNs and Infs that may have occurred due to the scaling
data = np.nan_to_num(data)
#feature selection
if isinstance(feature_selection_params, basestring):
feature_selection_params = eval(feature_selection_params)
self.featureselector, data, metadata = self.run_feature_selection(data, labels, feature_selector, feature_selection_params, feature_selection_threshold, plot_filename)
#initialize learning method and scoring functions and optimize
self.learner, self.scorers = self.initialize_learning_method(learner, data, labels, learning_params, optimize, optimization_params, scorers)
log.info("Data shape before fitting: {}".format(data.shape))
self.learner.fit(data, labels)
self.fit = True
return metadata
def get_model_description(self):
params = {}
if self.scaler:
params = self.scaler.get_params(deep=True)
try: #these are for SVC
if self.learner.kernel == "rbf":
params["gamma"] = self.learner.gamma
params["C"] = self.learner.C
for i, n_support in enumerate(self.learner.n_support_):
params["n_{}".format(i)] = n_support
log.debug(len(self.learner.dual_coef_))
return params
elif self.learner.kernel == "linear":
coefficients = self.learner.coef_
att_coefficients = {}
for attname, coeff in zip(self.attribute_set.get_names_pairwise(), coefficients[0]):
att_coefficients[attname] = coeff
return att_coefficients
except AttributeError:
pass
try: #adaboost etc
params = self.learner.get_params()
numeric_params = OrderedDict()
for key, value in params.iteritems():
try:
value = float(value)
except ValueError:
continue
numeric_params[key] = value
return numeric_params
except:
pass
return {}
def get_ranked_sentence(self, parallelsentence, critical_attribute="rank_predicted",
new_rank_name="rank_hard",
del_orig_class_att=False,
bidirectional_pairs=False,
ties=True,
reconstruct='hard'):
"""
"""
if type(self.learner) == str:
if self.classifier:
#.........这里部分代码省略.........