本文整理汇总了Python中sklearn.datasets.base.Bunch.kwords方法的典型用法代码示例。如果您正苦于以下问题:Python Bunch.kwords方法的具体用法?Python Bunch.kwords怎么用?Python Bunch.kwords使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.datasets.base.Bunch
的用法示例。
在下文中一共展示了Bunch.kwords方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import kwords [as 别名]
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(100, args.fixk)
fixk_saved = "{0}{1}.p".format(args.train, args.fixk)
try:
fixk_file = open(fixk_saved, "rb")
data = pickle.load(fixk_file)
except IOError:
data = load_dataset(args.train, args.fixk, categories[0], vct, min_size, percent=.5)
fixk_file = open(fixk_saved, "wb")
pickle.dump(data, fixk_file)
# data = load_dataset(args.train, args.fixk, categories[0], vct, min_size)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### STUDENT CLASSIFIER
clf = linear_model.LogisticRegression(penalty="l1", C=1)
print "\nStudent Classifier: %s" % clf
#### EXPERT CLASSIFIER
exp_clf = linear_model.LogisticRegression(penalty='l1', C=.3)
exp_clf.fit(data.test.bow, data.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
min_size))
print ("Cheating experiment - use full uncertainty query k words")
t0 = time.time()
### experiment starts
tx =[]
tac = []
tau = []
for t in range(args.trials):
trial_accu =[]
trial_aucs = []
trial_x_axis = []
print "*" * 60
print "Trial: %s" % t
student = randomsampling.UncertaintyLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t)
print "\nStudent: %s " % student
train_indices = []
train_x = []
train_y = []
pool = Bunch()
pool.data = data.train.bow.tocsr() # full words, for training
pool.fixk = data.train.bowk.tocsr() # k words BOW for querying
pool.target = data.train.target
pool.predicted = []
pool.kwords = np.array(data.train.kwords) # k words
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
bootstrapped = False
current_cost = 0
iteration = 0
while 0 < student.budget and len(pool.remaining) > step_size and iteration <= args.maxiter:
if not bootstrapped:
#.........这里部分代码省略.........
示例2: main
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import kwords [as 别名]
#.........这里部分代码省略.........
expert = baseexpert.LookUpExpert(accuracy_value=accu_parameters, cost_function=cost_model.cost_function)
else:
raise Exception("We need a defined cost function options [fixed|log|linear]")
#expert = baseexpert.TrueOracleExpert(cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
eval_range = 1 if (args.budget / evaluation_points) <= 0 else args.budget / evaluation_points
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
50))
t0 = time.time()
### experiment starts
for t in range(args.trials):
print "*" * 60
print "Trial: %s" % t
# TODO shuffle the data??
#student = baselearner.BaseLearner(model=clf, cost_model=cost_model, accuracy_model=accuracy_model, budget=args.budget,
# seed=t)
student = randomsampling.RandomSamplingLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t)
print "\nStudent: %s " % student
train_indices = []
train_x = []
train_y = []
pool = Bunch()
pool.data = data.train.bow.tocsr() # full words, for training
pool.fixk = data.train.bowk.tocsr() # k words BOW for querying
pool.target = data.train.target
pool.predicted = []
pool.kwords = np.array(data.train.kwords) # k words
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
#for x in pool.fixk:
# print x.todense().sum()
bootstrapped = False
current_cost = 0
iteration = 0
while 0 < student.budget and len(pool.remaining) > step_size and iteration <= args.maxiter:
if not bootstrapped:
## random bootstrap
#bt = randomsampling.BootstrapRandom(random_state=t * 10)
## random from each bootstrap
bt = randomsampling.BootstrapFromEach(t * 10)
query_index = bt.bootstrap(pool=pool, k=bootstrap_size)
bootstrapped = True
print "Bootstrap: %s " % bt.__class__.__name__
print
else:
query_index = student.pick_next(pool=pool, k=step_size)
query = pool.fixk[query_index] # query with k words
query_size = [len(vct_analizer(x)) for x in pool.kwords[query_index]]
#if query_size[0] >50:
# print "*** %s" % pool.kwords[query_index]