本文整理汇总了Python中sklearn.datasets.base.Bunch.data方法的典型用法代码示例。如果您正苦于以下问题:Python Bunch.data方法的具体用法?Python Bunch.data怎么用?Python Bunch.data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.datasets.base.Bunch
的用法示例。
在下文中一共展示了Bunch.data方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: shuffleData
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import data [as 别名]
def shuffleData(self, res):
shuffle(res)
train = Bunch()
train.data = map(lambda x:x[1], res)
train.target = map(lambda x:x[0], res)
train.target_names = self.names
return train
示例2: main
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import data [as 别名]
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(100, args.fixk)
fixk_saved = "{0}{1}.p".format(args.train, args.fixk)
try:
fixk_file = open(fixk_saved, "rb")
data = pickle.load(fixk_file)
except IOError:
data = load_dataset(args.train, args.fixk, categories[0], vct, min_size, percent=.5)
fixk_file = open(fixk_saved, "wb")
pickle.dump(data, fixk_file)
# data = load_dataset(args.train, args.fixk, categories[0], vct, min_size)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### STUDENT CLASSIFIER
clf = linear_model.LogisticRegression(penalty="l1", C=1)
print "\nStudent Classifier: %s" % clf
#### EXPERT CLASSIFIER
exp_clf = linear_model.LogisticRegression(penalty='l1', C=.3)
exp_clf.fit(data.test.bow, data.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
min_size))
print ("Cheating experiment - use full uncertainty query k words")
t0 = time.time()
### experiment starts
tx =[]
tac = []
tau = []
for t in range(args.trials):
trial_accu =[]
trial_aucs = []
trial_x_axis = []
print "*" * 60
print "Trial: %s" % t
student = randomsampling.UncertaintyLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t)
print "\nStudent: %s " % student
train_indices = []
train_x = []
train_y = []
pool = Bunch()
pool.data = data.train.bow.tocsr() # full words, for training
pool.fixk = data.train.bowk.tocsr() # k words BOW for querying
pool.target = data.train.target
pool.predicted = []
pool.kwords = np.array(data.train.kwords) # k words
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
bootstrapped = False
current_cost = 0
iteration = 0
while 0 < student.budget and len(pool.remaining) > step_size and iteration <= args.maxiter:
if not bootstrapped:
#.........这里部分代码省略.........
示例3: main
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import data [as 别名]
def main():
print args
print
accuracies = defaultdict(lambda: [])
ora_accu = defaultdict(lambda: [])
oracle_accuracies =[]
ora_cm = defaultdict(lambda: [])
lbl_dit = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = TfidfVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=False, ngram_range=(1, 1),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = 10
args.fixk = None
data, vct = load_from_file(args.train, [categories[3]], args.fixk, min_size, vct, raw=True)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = experiment_utils.parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = experiment_utils.set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
### SENTENCE TRANSFORMATION
if args.train == "twitter":
sent_detector = TwitterSentenceTokenizer()
else:
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
## delete <br> to "." to recognize as end of sentence
data.train.data = experiment_utils.clean_html(data.train.data)
data.test.data = experiment_utils.clean_html(data.test.data)
print("Train:{}, Test:{}, {}".format(len(data.train.data), len(data.test.data), data.test.target.shape[0]))
## Get the features of the sentence dataset
## create splits of data: pool, test, oracle, sentences
expert_data = Bunch()
if not args.fulloracle:
train_test_data = Bunch()
expert_data.sentence, train_test_data.pool = split_data(data.train)
expert_data.oracle, train_test_data.test = split_data(data.test)
data.train.data = train_test_data.pool.train.data
data.train.target = train_test_data.pool.train.target
data.test.data = train_test_data.test.train.data
data.test.target = train_test_data.test.train.target
## convert document to matrix
data.train.bow = vct.fit_transform(data.train.data)
data.test.bow = vct.transform(data.test.data)
#### EXPERT CLASSIFIER: ORACLE
print("Training Oracle expert")
exp_clf = experiment_utils.set_classifier(args.classifier, parameter=args.expert_penalty)
if not args.fulloracle:
print "Training expert documents:%s" % len(expert_data.oracle.train.data)
labels, sent_train = experiment_utils.split_data_sentences(expert_data.oracle.train, sent_detector, vct, limit=args.limit)
expert_data.oracle.train.data = sent_train
expert_data.oracle.train.target = np.array(labels)
expert_data.oracle.train.bow = vct.transform(expert_data.oracle.train.data)
exp_clf.fit(expert_data.oracle.train.bow, expert_data.oracle.train.target)
else:
# expert_data.data = np.concatenate((data.train.data, data.test.data))
# expert_data.target = np.concatenate((data.train.target, data.test.target))
expert_data.data =data.train.data
expert_data.target = data.train.target
expert_data.target_names = data.train.target_names
labels, sent_train = experiment_utils.split_data_sentences(expert_data, sent_detector, vct, limit=args.limit)
expert_data.bow = vct.transform(sent_train)
expert_data.target = labels
expert_data.data = sent_train
exp_clf.fit(expert_data.bow, expert_data.target)
#.........这里部分代码省略.........
示例4: main
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import data [as 别名]
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(100, args.fixk)
if args.fixk < 0:
args.fixk = None
fixk_saved = "{0}{1}.p".format(args.train, args.fixk)
try:
print "Loading existing file... %s " % args.train
fixk_file = open(fixk_saved, "rb")
data = pickle.load(fixk_file)
fixk_file.close()
vectorizer = open("{0}vectorizer.p".format(args.train), "rb")
vct = pickle.load(vectorizer)
vectorizer.close()
except (IOError, ValueError):
print "Loading from scratch..."
data = load_dataset(args.train, args.fixk, categories[0], vct, min_size, percent=.5)
fixk_file = open(fixk_saved, "wb")
pickle.dump(data, fixk_file)
fixk_file.close()
vectorizer = open("{0}vectorizer.p".format(args.train), "wb")
pickle.dump(vct, vectorizer)
vectorizer.close()
# data = load_dataset(args.train, args.fixk, categories[0], vct, min_size)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### STUDENT CLASSIFIER
clf = linear_model.LogisticRegression(penalty="l1", C=1)
# clf = set_classifier(args.classifier)
print "\nStudent Classifier: %s" % clf
#### EXPERT CLASSIFIER
exp_clf = linear_model.LogisticRegression(penalty='l1', C=args.expert_penalty)
exp_clf.fit(data.test.bow, data.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
min_size))
print ("Anytime active learning experiment - use objective function to pick data")
t0 = time.time()
tac = []
tau = []
### experiment starts
for t in range(args.trials):
trial_accu = []
trial_aucs = []
print "*" * 60
print "Trial: %s" % t
if args.student in "anyunc":
student = randomsampling.AnytimeLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t, vcn=vct,
subpool=250, cost_model=cost_model)
elif args.student in "lambda":
student = randomsampling.AnytimeLearnerDiff(model=clf, accuracy_model=None, budget=args.budget, seed=t, vcn=vct,
subpool=250, cost_model=cost_model, lambda_value=args.lambda_value)
elif args.student in "anyzero":
student = randomsampling.AnytimeLearnerZeroUtility(model=clf, accuracy_model=None, budget=args.budget, seed=t, vcn=vct,
subpool=250, cost_model=cost_model)
#.........这里部分代码省略.........
示例5: main
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import data [as 别名]
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 1),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(50, args.fixk)
if "imdb" in args.train:
########## IMDB MOVIE REVIEWS ###########
data = load_imdb(args.train, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=args.fixk) # should brind data as is
elif "aviation" in args.train:
raise Exception("We are not ready for that data yet")
elif "20news" in args.train:
########## 20 news groups ######
data = load_20newsgroups(categories=categories[0], vectorizer=vct, min_size=min_size,
fix_k=args.fixk) # for testing purposes
elif "dummy" in args.train:
########## DUMMY DATA###########
data = load_dummy("C:/Users/mramire8/Documents/code/python/data/dummy", shuffle=True,
rnd=2356, vct=vct, min_size=0, fix_k=args.fixk)
else:
raise Exception("We do not know that dataset")
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
#print(data.train.data[0])
#### COST MODEL
parameters = parse_parameters(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### ACCURACY MODEL
# try:
# # accu_parameters = parse_parameters(args.accu_model)
# except ValueError:
accu_parameters = parse_parameters_mat(args.accu_model)
# else
# print("Error: Accuracy parameters didn't work")
print "Accuracy Parameters %s" % accu_parameters
#if "fixed" in args.accu_function:
# accuracy_model = base_models.FixedAccuracyModel(accuracy_value=.7)
#elif "log" in args.accu_function:
# accuracy_model = base_models.LogAccuracyModel(model=parameters)
#elif "linear" in args.accu_function:
# accuracy_model = base_models.LRAccuracyModel(model=parameters)
#else:
# raise Exception("We need a defined cost function options [fixed|log|linear]")
#
#print "\nAccuracy Model: %s " % accuracy_model
#### CLASSIFIER
#### Informed priors
#feature_counts = np.ones(x_train.shape[0]) * x_train
#feature_frequencies = feature_counts / np.sum(feature_counts)
#alpha = feature_frequencies
alpha = 1
clf = MultinomialNB(alpha=alpha)
print "\nClassifier: %s" % clf
#### EXPERT MODEL
#expert = baseexpert.BaseExpert()
if "fixed" in args.expert:
expert = baseexpert.FixedAccuracyExpert(accuracy_value=accu_parameters[0],
cost_function=cost_model.cost_function) #average value of accuracy of the experts
elif "true" in args.expert:
expert = baseexpert.TrueOracleExpert(cost_function=cost_model.cost_function)
elif "linear" in args.expert:
#expert = baseexpert.LRFunctionExpert(model=[0.0019, 0.6363],cost_function=cost_model.cost_function)
raise Exception("We do not know linear yet!!")
elif "log" in args.expert:
expert = baseexpert.LogFunctionExpert(model=accu_parameters, cost_function=cost_model.cost_function)
elif "direct" in args.expert:
expert = baseexpert.LookUpExpert(accuracy_value=accu_parameters, cost_function=cost_model.cost_function)
else:
raise Exception("We need a defined cost function options [fixed|log|linear]")
#expert = baseexpert.TrueOracleExpert(cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
#.........这里部分代码省略.........
示例6: load_mask_images
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import data [as 别名]
import numpy as np
from skimage import io
from sklearn.datasets.base import Bunch
from dip.load_data import load_image_files, load_mask_images
from dip.mask import bounding_rect_of_mask
datasets = load_mask_images()
data = []
for f, mask in zip(
datasets.filenames,
load_image_files(datasets.filenames),
):
# rect: (min_x, max_x, min_y, max_x)
rect = bounding_rect_of_mask(mask, negative=True)
data.append(list(rect))
print('{0}: {1}'.format(f, rect))
bunch = Bunch(name='mask rects')
bunch.data = np.array(data)
bunch.filenames = datasets.filenames
bunch.target = datasets.target
bunch.target_names = datasets.target_names
bunch.description = 'mask rects: (min_x, min_y, max_x, max_y)'
with gzip.open('rects.pkl.gz', 'wb') as f:
pickle.dump(bunch, f)
示例7: main
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import data [as 别名]
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='latin-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(10, args.fixk)
if args.fixk < 0:
args.fixk = None
# data = load_dataset(args.train, args.fixk, categories[0], vct, min_size, percent=.5)
# fixk_saved = "{0}{1}.p".format(args.train, args.fixk)
data, vct = load_from_file(args.train, categories, args.fixk, min_size, vct)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
#### COST MODEL
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### ACCURACY MODEL
accu_parameters = parse_parameters_mat(args.accu_model)
#### CLASSIFIER
clf = set_classifier(args.classifier)
print "\nClassifier: %s" % clf
#### EXPERT MODEL
if "fixed" in args.expert:
expert = baseexpert.FixedAccuracyExpert(accuracy_value=accu_parameters[0],
cost_function=cost_model.cost_function) #average value of accuracy of the experts
elif "true" in args.expert:
expert = baseexpert.TrueOracleExpert(cost_function=cost_model.cost_function)
elif "linear" in args.expert:
#expert = baseexpert.LRFunctionExpert(model=[0.0019, 0.6363],cost_function=cost_model.cost_function)
raise Exception("We do not know linear yet!!")
elif "log" in args.expert:
expert = baseexpert.LogFunctionExpert(model=accu_parameters, cost_function=cost_model.cost_function)
elif "direct" in args.expert:
expert = baseexpert.LookUpExpert(accuracy_value=accu_parameters, cost_function=cost_model.cost_function)
elif "neutral" in args.expert:
exp_clf = LogisticRegression(penalty='l1', C=1)
exp_clf.fit(data.test.bow, data.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
else:
raise Exception("We need a defined cost function options [fixed|log|linear]")
exp_clf = LogisticRegression(penalty='l1', C=args.expert_penalty)
exp_clf.fit(data.test.bow, data.test.target)
print "\nExpert: %s " % expert
coef = exp_clf.coef_[0]
# print_features(coef, vct.get_feature_names())
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
50))
t0 = time.time()
tac = []
tau = []
### experiment starts
for t in range(args.trials):
trial_accu = []
trial_aucs = []
print "*" * 60
print "Trial: %s" % t
if args.student in "unc":
student = randomsampling.UncertaintyLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t,
subpool=250)
else:
student = randomsampling.RandomSamplingLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t)
print "\nStudent: %s " % student
#.........这里部分代码省略.........