本文整理汇总了Python中numpy.split函数的典型用法代码示例。如果您正苦于以下问题:Python split函数的具体用法?Python split怎么用?Python split使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了split函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _read_tile
def _read_tile(self, filename):
with open(filename, "r") as tilefile:
# this is reversed from the fortran b/c in is a reserved word
self.ni, self.nj, self.nk = np.fromfile(tilefile, dtype="int32",
count = 3, sep = " ")
raw_data= np.genfromtxt(tilefile,
dtype = ("int32", "float64", "float64", "float64", "float64"),
names = ("idx", "a", "b", "vla", "vlb"))
self.ii, self.ij, self.ik = np.split(raw_data["idx"],
[self.ni,
self.ni+self.nj])
self.x1a, self.x2a, self.x3a = np.split(raw_data["a"],
[self.ni,
self.ni+self.nj])
self.x1b, self.x2b, self.x3b = np.split(raw_data["b"],
[self.ni,
self.ni+self.nj])
self.vl1a, self.vl2a, self.vl3a = np.split(raw_data["vla"],
[self.ni,
self.ni+self.nj])
self.vl1b, self.vl2b, self.vl3b = np.split(raw_data["vlb"],
[self.ni,
self.ni+self.nj])
return
示例2: test_stratified_batches
def test_stratified_batches():
data = np.array([('a', -1), ('b', 0), ('c', 1), ('d', -1), ('e', -1)],
dtype=[('x', np.str_, 8), ('y', np.int32)])
assert list(data['x']) == ['a', 'b', 'c', 'd', 'e']
assert list(data['y']) == [-1, 0, 1, -1, -1]
batch_generator = training_batches(data, batch_size=3, n_labeled_per_batch=1)
first_ten_batches = list(islice(batch_generator, 10))
labeled_batch_portions = [batch[:1] for batch in first_ten_batches]
unlabeled_batch_portions = [batch[1:] for batch in first_ten_batches]
labeled_epochs = np.split(np.concatenate(labeled_batch_portions), 5)
unlabeled_epochs = np.split(np.concatenate(unlabeled_batch_portions), 4)
assert ([sorted(items['x'].tolist()) for items in labeled_epochs] ==
[['b', 'c']] * 5)
assert ([sorted(items['y'].tolist()) for items in labeled_epochs] ==
[[0, 1]] * 5)
assert ([sorted(items['x'].tolist()) for items in unlabeled_epochs] ==
[['a', 'b', 'c', 'd', 'e']] * 4)
assert ([sorted(items['y'].tolist()) for items in unlabeled_epochs] ==
[[-1, -1, -1, -1, -1]] * 4)
示例3: drop_samples
def drop_samples(game, prob):
"""Drop samples from a sample game
Samples are dropped independently with probability prob."""
sample_map = {}
for prof, pays in zip(np.split(game.profiles, game.sample_starts[1:]),
game.sample_payoffs):
num_profiles, _, num_samples = pays.shape
perm = rand.permutation(num_profiles)
prof = prof[perm]
pays = pays[perm]
new_samples, counts = np.unique(
rand.binomial(num_samples, prob, num_profiles), return_counts=True)
splits = counts[:-1].cumsum()
for num, prof_samp, pay_samp in zip(
new_samples, np.split(prof, splits), np.split(pays, splits)):
if num == 0:
continue
prof, pays = sample_map.setdefault(num, ([], []))
prof.append(prof_samp)
pays.append(pay_samp[..., :num])
if sample_map:
profiles = np.concatenate(list(itertools.chain.from_iterable(
x[0] for x in sample_map.values())), 0)
sample_payoffs = tuple(np.concatenate(x[1]) for x
in sample_map.values())
else: # No data
profiles = np.empty((0, game.num_role_strats), dtype=int)
sample_payoffs = []
return rsgame.samplegame_copy(game, profiles, sample_payoffs, False)
示例4: split_dataset
def split_dataset(dataset, N=4000):
perm = np.random.permutation(len(dataset['target']))
dataset['data'] = dataset['data'][perm]
dataset['target'] = dataset['target'][perm]
x_train, x_test = np.split(dataset['data'], [N])
y_train, y_test = np.split(dataset['target'], [N])
return x_train, y_train, x_test, y_test
示例5: update_h
def update_h(sigma2, phi, y, mu, psi):
"""Updates the hidden variables using updated parameters.
This is an implementation of the equation:
.. math::
\\hat{h} = (\\sigma^2 I + \\sum_{n=1}^N \\Phi_n^T A^T A \\Phi_n)^{-1} \\sum_{n=1}^N \\Phi_n^T A^T (y_n - A \\mu_n - b)
"""
N = y.shape[0]
K = phi.shape[1]
A = psi.params[:2, :2]
b = psi.translation
partial_0 = 0
for phi_n in np.split(phi, N, axis=0):
partial_0 += phi_n.T @ A.T @ A @ phi_n
partial_1 = sigma2 * np.eye(K) + partial_0
partial_2 = np.zeros((K, 1))
for phi_n, y_n, mu_n in zip(np.split(phi, N, axis=0), y, mu.reshape(-1, 2)):
partial_2 += phi_n.T @ A.T @ (y_n - A @ mu_n - b).reshape(2, -1)
return np.linalg.inv(partial_1) @ partial_2
示例6: split_data
def split_data(X,Y,degree):
Testing_error =[] #all the testing errors of 10 fold cross validations
Training_error = [] #all the training errors of 10 fold cross validations
X_sets = np.split(X,10)
Y_sets = np.split(Y,10)
for i in range(len(X_sets)):
X_test =np.vstack( X_sets[i])
Y_test = np.vstack(Y_sets[i])
if i<len(X_sets)-1:
X_train = np.vstack(X_sets[i+1:])
Y_train =np.vstack(Y_sets[i+1:])
elif i==len(X_sets)-1 :
X_train = np.vstack(X_sets[:i])
Y_train = np.vstack(Y_sets[:i])
while i>0:
tempX = np.vstack(X_sets[i-1])
X_train = np.append(tempX,X_train)
tempY = np.vstack(Y_sets[i-1])
Y_train = np.append(tempY,Y_train)
i = i-1
X_train = np.vstack(X_train)
Y_train = np.vstack(Y_train)
Z_train,theta,Z_test = polynomial_withCV(X_train,Y_train,degree,X_test)
Testing_error.append( mse(Z_test,theta,Y_test))
Training_error.append(mse(Z_train,theta,Y_train))
return sum(Testing_error),sum(Training_error)
示例7: get_train_data
def get_train_data(self, label_types):
labeled_images = self.get_labeled_images()
x_train_all = np.asarray(map(
lambda labeled_image_file: labeled_image_file.get_image(),
labeled_images
))
y_train_all = np.asarray(map(
lambda labeled_image_file: label_to_output(labeled_image_file.get_label(), label_types),
labeled_images
))
length = len(labeled_images)
# 元データをランダムに並べ替える
indexes = np.random.permutation(length)
x_train_all_rand = x_train_all[indexes]
y_train_all_rand = y_train_all[indexes]
# 平均画像を引く
mean = self.get_mean_image()
if mean is not None:
x_train_all_rand -= mean
# 正規化
x_train_all /= 255
# 1/5はテストに使う
data_size = length * 4 / 5
x_train, x_test = np.split(x_train_all_rand, [data_size])
y_train, y_test = np.split(y_train_all_rand, [data_size])
return x_train, x_test, y_train, y_test
示例8: split_x
def split_x(x, split_pos):
# NOTE: do not support multiple sentence tensors
# sequence input , non-sequence input, and no non-sequence input
# sequence input:
if type(x) is not list:
x=[x]
if len(x) == 1:
# sec1, sec2, sec3,...
# sent1, sent2, sent5
x01, x02 = tuple(np.split(x[0],[split_pos]))
cond_list=[x02>=0,x02<0]
offset = x02[0][0]
choice_list=[x02-offset, x02 ]
x02 = np.select(cond_list, choice_list)
return ([x01],[x02])
# doc1 doc2 doc3
# sec1 sec2 ...
# sec1, sec2, ...
# sent1, sent2, ...
x01, x02 = tuple(np.split(x[0], [split_pos]))
offset = x02[0][0]
x1, x2 = split_x(x[1:], offset)
cond_list = [x02 >= 0, x02 < 0]
choice_list = [x02 - offset, x02]
x02 = np.select(cond_list, choice_list)
return ([x01] + x1, [x02]+x2)
示例9: generate_svm
def generate_svm():
digits, labels = load_digits(DIGITS_FN)
print('preprocessing...')
# shuffle digits
rand = np.random.RandomState(321)
shuffle = rand.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = list(map(deskew, digits))
samples = preprocess_hog(digits2)
train_n = int(0.9*len(samples))
cv2.imshow('test set', mosaic(25, digits[train_n:]))
digits_train, digits_test = np.split(digits2, [train_n])
samples_train, samples_test = np.split(samples, [train_n])
labels_train, labels_test = np.split(labels, [train_n])
print('training SVM...')
model = SVM(C=2.67, gamma=5.383)
model.train(samples_train, labels_train)
vis = evaluate_model(model, digits_test, samples_test, labels_test)
print('saving SVM as "digits_svm.dat"...')
return model
cv2.waitKey(0)
示例10: k_fold_cross_validation_sets
def k_fold_cross_validation_sets(X, y, k, shuffle=True):
if shuffle:
X, y = shuffle_data(X, y)
n_samples = len(y)
left_overs = {}
n_left_overs = (n_samples % k)
if n_left_overs != 0:
left_overs["X"] = X[-n_left_overs:]
left_overs["y"] = y[-n_left_overs:]
X = X[:-n_left_overs]
y = y[:-n_left_overs]
X_split = np.split(X, k)
y_split = np.split(y, k)
sets = []
for i in range(k):
X_test, y_test = X_split[i], y_split[i]
X_train = np.concatenate(X_split[:i] + X_split[i + 1:], axis=0)
y_train = np.concatenate(y_split[:i] + y_split[i + 1:], axis=0)
sets.append([X_train, X_test, y_train, y_test])
# Add left over samples to last set as training samples
if n_left_overs != 0:
np.append(sets[-1][0], left_overs["X"], axis=0)
np.append(sets[-1][2], left_overs["y"], axis=0)
return np.array(sets)
示例11: to_json
def to_json(self):
base = super().to_json()
base['offsets'] = self.payoff_to_json(self._offset)
base['coefs'] = self.payoff_to_json(self._coefs)
lengths = {}
for role, strats, lens in zip(
self.role_names, self.strat_names,
np.split(self._lengths, self.role_starts[1:])):
lengths[role] = {s: self.payoff_to_json(l)
for s, l in zip(strats, lens)}
base['lengths'] = lengths
profs = {}
for role, strats, data in zip(
self.role_names, self.strat_names,
np.split(np.split(self._profiles, self._size_starts[1:]),
self.role_starts[1:])):
profs[role] = {strat: [self.profile_to_json(p) for p in dat]
for strat, dat in zip(strats, data)}
base['profiles'] = profs
alphas = {}
for role, strats, alphs in zip(
self.role_names, self.strat_names,
np.split(np.split(self._alpha, self._size_starts[1:]),
self.role_starts[1:])):
alphas[role] = {s: a.tolist() for s, a in zip(strats, alphs)}
base['alphas'] = alphas
base['type'] = 'rbf.1'
return base
示例12: update_stipples
def update_stipples(self, cells):
""" Updates stipple locations from an image
cells should be an image of the same size as self.img
with pixel values representing which Voronoi cell that
pixel falls into
"""
indices = np.argsort(cells.flat)
_, boundaries = np.unique(cells.flat[indices], return_index=True)
gxs = np.split(self.gx.flat[indices], boundaries)[1:]
gys = np.split(self.gy.flat[indices], boundaries)[1:]
gws = np.split(1 - self.img.flat[indices], boundaries)[1:]
w = self.img.shape[1] / 2.0
h = self.img.shape[0] / 2.0
for i, (gx, gy, gw) in enumerate(zip(gxs, gys, gws)):
weight = np.sum(gw)
if weight > 0:
x = np.sum(gx * gw) / weight
y = np.sum(gy * gw) / weight
self.stipples[i,:] = [(x - w) / w, (y - h) / h]
else:
self.stipples[i,:] = np.random.uniform(-1, 1, size=2)
示例13: make_predictions
def make_predictions(net, data, labels, num_classes):
data = np.require(data, requirements='C')
labels = np.require(labels, requirements='C')
preds = np.zeros((data.shape[1], num_classes), dtype=np.single)
softmax_idx = net.get_layer_idx('probs', check_type='softmax')
t0 = time.time()
net.libmodel.startFeatureWriter(
[data, labels, preds], softmax_idx)
net.finish_batch()
print "Predicted %s cases in %.2f seconds." % (
labels.shape[1], time.time() - t0)
if net.multiview_test:
# We have to deal with num_samples * num_views
# predictions.
num_views = net.test_data_provider.num_views
num_samples = labels.shape[1] / num_views
split_sections = range(
num_samples, num_samples * num_views, num_samples)
preds = np.split(preds, split_sections, axis=0)
labels = np.split(labels, split_sections, axis=1)
preds = reduce(np.add, preds)
labels = labels[0]
return preds, labels
示例14: train
def train(self, trainfile_name):
train_X, train_Y, num_classes = self.make_data(trainfile_name)
accuracies = []
fscores = []
if self.cv:
num_points = train_X.shape[0]
fol_len = num_points / self.folds
rem = num_points % self.folds
X_folds = numpy.split(train_X, self.folds) if rem == 0 else numpy.split(train_X[:-rem], self.folds)
Y_folds = numpy.split(train_Y, self.folds) if rem == 0 else numpy.split(train_Y[:-rem], self.folds)
for i in range(self.folds):
train_folds_X = []
train_folds_Y = []
for j in range(self.folds):
if i != j:
train_folds_X.append(X_folds[j])
train_folds_Y.append(Y_folds[j])
train_fold_X = numpy.concatenate(train_folds_X)
train_fold_Y = numpy.concatenate(train_folds_Y)
classifier = self.fit_model(train_fold_X, train_fold_Y, num_classes)
predictions = self.classify(classifier, X_folds[i])
accuracy, weighted_fscore, _ = self.evaluate(Y_folds[i], predictions)
accuracies.append(accuracy)
fscores.append(weighted_fscore)
accuracies = numpy.asarray(accuracies)
fscores = numpy.asarray(fscores)
print >>sys.stderr, "Accuracies:", accuracies
print >>sys.stderr, "Average: %0.4f (+/- %0.4f)"%(accuracies.mean(), accuracies.std() * 2)
print >>sys.stderr, "Fscores:", fscores
print >>sys.stderr, "Average: %0.4f (+/- %0.4f)"%(fscores.mean(), fscores.std() * 2)
self.classifier = self.fit_model(train_X, train_Y, num_classes)
cPickle.dump(classifier, open(self.trained_model_name, "wb"))
#pickle.dump(tagset, open(self.stored_tagset, "wb"))
print >>sys.stderr, "Done"
示例15: conf2yap
def conf2yap(conf_fname, yap_filename):
print("Yap file : ", yap_filename)
positions, radii, meta = clff.read_conf_file(conf_fname)
positions[:, 0] -= float(meta['lx'])/2
positions[:, 1] -= float(meta['ly'])/2
positions[:, 2] -= float(meta['lz'])/2
if 'np_fixed' in meta:
# for conf with fixed particles
split_line = len(positions) - int(meta['np_fixed'])
pos_mobile, pos_fixed = np.split(positions, [split_line])
rad_mobile, rad_fixed = np.split(radii, [split_line])
yap_out = pyp.layer_switch(3)
yap_out = pyp.add_color_switch(yap_out, 3)
yap_out = np.row_stack((yap_out,
particles_yaparray(pos_mobile, rad_mobile)))
yap_out = pyp.add_layer_switch(yap_out, 4)
yap_out = pyp.add_color_switch(yap_out, 4)
yap_out = np.row_stack((yap_out,
particles_yaparray(pos_fixed, rad_fixed)))
else:
yap_out = pyp.layer_switch(3)
yap_out = pyp.add_color_switch(yap_out, 3)
yap_out = np.row_stack((yap_out,
particles_yaparray(positions, radii)))
pyp.savetxt(yap_filename, yap_out)