本文整理汇总了Python中numpy.vsplit方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.vsplit方法的具体用法?Python numpy.vsplit怎么用?Python numpy.vsplit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.vsplit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: MAXPooling
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def MAXPooling(Array,activation=1, ksize=2):
assert len(Array) % ksize == 0
V2list = np.vsplit(Array, len(Array) / ksize)
VerticalElements = list()
HorizontalElements = list()
for x in V2list:
H2list = np.hsplit(x, len(x[0]) / ksize)
HorizontalElements.clear()
for y in H2list:
# y should be a two-two square
HorizontalElements.append(y.max())
VerticalElements.append(np.array(HorizontalElements))
return np.array(np.array(VerticalElements)/activation,dtype=int)
示例2: random_colors
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def random_colors(number_ofcolors):
"""
Generate random RGB tuples.
Parameters
----------
number_ofcolors : int
Number of tuples to generate
Returns
-------
colors : list of tuples of floats
List of ``len(number_ofcolors)``, the requested random colors
"""
color_tmp = np.random.rand(number_ofcolors, 3)
color_tmp = np.vsplit(color_tmp, number_ofcolors)
colors = []
for c in color_tmp:
colors.append(c[0])
return colors
示例3: bbox_overlaps
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def bbox_overlaps(bboxes, ref_bboxes):
"""
ref_bboxes: N x 4;
bboxes: K x 4
return: K x N
"""
refx1, refy1, refx2, refy2 = np.vsplit(np.transpose(ref_bboxes), 4)
x1, y1, x2, y2 = np.hsplit(bboxes, 4)
minx = np.maximum(refx1, x1)
miny = np.maximum(refy1, y1)
maxx = np.minimum(refx2, x2)
maxy = np.minimum(refy2, y2)
inter_area = (maxx - minx + 1) * (maxy - miny + 1)
ref_area = (refx2 - refx1 + 1) * (refy2 - refy1 + 1)
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iou = inter_area / (ref_area + area - inter_area)
return iou
示例4: rev
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def rev(self, lng, lat, z=None, _type=np.int32):
if z is None:
z = self._default_z
if all(isinstance(var, (int, float, tuple)) for var in [lng, lat]):
lng, lat = (np.array([lng]), np.array([lat]))
if not all(isinstance(var, np.ndarray) for var in [lng, lat]):
raise ValueError("lng, lat inputs must be of type int, float, tuple or numpy.ndarray")
if not isinstance(z, np.ndarray):
z = np.zeros_like(lng) + z
coord = np.dstack([lng, lat, z])
offset, scale = np.vsplit(self._offscl, 2)
normed = coord * scale + offset
X = self._rpc(normed)
result = np.rollaxis(np.inner(self._A, X) / np.inner(self._B, X), 0, 3)
rev_offset, rev_scale = np.vsplit(self._px_offscl_rev, 2)
# needs to return x/y
return np.rint(np.rollaxis(result * rev_scale + rev_offset, 2)).squeeze().astype(_type)[::-1]
示例5: split_train_dev_test
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def split_train_dev_test(X,y,train_per,dev_per,test_per):
if(train_per + dev_per + test_per > 1):
print "Train Dev Test split should sum to one"
return
dim = y.shape[0]
split1 = int(dim*train_per)
if(dev_per ==0):
train_y,test_y = np.vsplit(y,[split1])
dev_y = np.array([])
train_X = X[0:split1,:]
dev_X = np.array([])
test_X = X[split1:,:]
else:
split2 = int(dim*(train_per+dev_per))
print split2
train_y,dev_y,test_y = np.vsplit(y,(split1,split2))
train_X = X[0:split1,:]
dev_X = X[split1:split2,:]
test_X = X[split2:,:]
return train_y,dev_y,test_y,train_X,dev_X,test_X
示例6: _build_model
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def _build_model(self, X, Y):
# save mean and std of data for normalization
self.x_std = np.std(X, axis=0)
self.x_mean = np.mean(X, axis=0)
self.y_mean = np.std(Y, axis=0)
self.y_std = np.std(Y, axis=0)
self.n_train_points = X.shape[0]
# lazy learner - just store training data
self.X_train = self._normalize_x(X)
self.Y_train = Y
# prepare Gaussians centered in the Y points
self.locs_array = np.vsplit(Y, self.n_train_points)
self.log_kernel = multivariate_normal(mean=np.ones(self.ndim_y)).logpdf
# select / properly initialize bandwidth and epsilon
if isinstance(self.bandwidth, (int, float)):
self.bandwidth = self.y_std * self.bandwidth
if self.param_selection == 'normal_reference':
self.bandwidth = self._normal_reference()
elif self.param_selection == 'cv_ml':
self.bandwidth, self.epsilon = self._cv_ml()
示例7: test_debug
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def test_debug(self):
image = imageio.imread("./temp/dump.png")
grid_n = 6
img_size = image.shape[1] // grid_n
img_ch = image.shape[-1]
images = np.vsplit(image, grid_n)
images = [np.hsplit(i, grid_n) for i in images]
images = np.reshape(np.array(images), [grid_n*grid_n, img_size, img_size, img_ch])
with tf.Graph().as_default():
with tf.Session() as sess:
v_images_placeholder = tf.placeholder(dtype=tf.float32)
v_images = tf.contrib.gan.eval.preprocess_image(v_images_placeholder)
v_logits = tf.contrib.gan.eval.run_inception(v_images)
v_score = tf.contrib.gan.eval.classifier_score_from_logits(v_logits)
score, logits = sess.run([v_score, v_logits], feed_dict={v_images_placeholder:images})
imageio.imwrite("./temp/inception_logits.png", logits)
示例8: load_digits_and_labels
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def load_digits_and_labels(big_image):
""" Returns all the digits from the 'big' image and creates the corresponding labels for each image"""
# Load the 'big' image containing all the digits:
digits_img = cv2.imread(big_image, 0)
# Get all the digit images from the 'big' image:
number_rows = digits_img.shape[1] / SIZE_IMAGE
rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)
digits = []
for row in rows:
row_cells = np.hsplit(row, number_rows)
for digit in row_cells:
digits.append(digit)
digits = np.array(digits)
# Create the labels for each image:
labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
return digits, labels
开发者ID:PacktPublishing,项目名称:Mastering-OpenCV-4-with-Python,代码行数:22,代码来源:knn_handwritten_digits_recognition_k_training_testing_preprocessing_hog.py
示例9: load_digits_and_labels
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def load_digits_and_labels(big_image):
"""Returns all the digits from the 'big' image and creates the corresponding labels for each image"""
# Load the 'big' image containing all the digits:
digits_img = cv2.imread(big_image, 0)
# Get all the digit images from the 'big' image:
number_rows = digits_img.shape[1] / SIZE_IMAGE
rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)
digits = []
for row in rows:
row_cells = np.hsplit(row, number_rows)
for digit in row_cells:
digits.append(digit)
digits = np.array(digits)
# Create the labels for each image:
labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
return digits, labels
开发者ID:PacktPublishing,项目名称:Mastering-OpenCV-4-with-Python,代码行数:22,代码来源:knn_handwritten_digits_recognition_introduction.py
示例10: trainBlock
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def trainBlock(array,row,col):
arrayShape=array.shape
print(arrayShape)
rowPara=divmod(arrayShape[1],row) #divmod(a,b)方法为除法取整,以及a对b的余数
colPara=divmod(arrayShape[0],col)
extractArray=array[:colPara[0]*col,:rowPara[0]*row] #移除多余部分,规范数组,使其正好切分均匀
# print(extractArray.shape)
hsplitArray=np.hsplit(extractArray,rowPara[0])
vsplitArray=flatten_lst([np.vsplit(subArray,colPara[0]) for subArray in hsplitArray])
dataBlock=flatten_lst(vsplitArray)
print("样本量:%s"%(len(dataBlock))) #此时切分的块数据量,就为样本数据量
'''显示查看其中一个样本'''
subShow=dataBlock[-10]
print(subShow,'\n',subShow.max(),subShow.std())
fig=plt.figure(figsize=(20, 12))
ax=fig.add_subplot(111)
plt.xticks([x for x in range(subShow.shape[0]) if x%400==0])
plt.yticks([y for y in range(subShow.shape[1]) if y%200==0])
ax.imshow(subShow)
dataBlockStack=np.append(dataBlock[:-1],[dataBlock[-1]],axis=0) #将列表转换为数组
print(dataBlockStack.shape)
return dataBlockStack
示例11: _merge_floating_point_errors
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def _merge_floating_point_errors(polygons, tol = 1e-10):
stacked_polygons = np.vstack(polygons)
x = stacked_polygons[:,0]
y = stacked_polygons[:,1]
polygon_indices = np.cumsum([len(p) for p in polygons])
xfixed = _merge_nearby_floating_points(x, tol = tol)
yfixed = _merge_nearby_floating_points(y, tol = tol)
stacked_polygons_fixed = np.vstack([xfixed, yfixed]).T
polygons_fixed = np.vsplit(stacked_polygons_fixed, polygon_indices[:-1])
return polygons_fixed
示例12: split2d
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def split2d(img, cell_size, flatten=True):
h, w = img.shape[:2]
sx, sy = cell_size
cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
cells = np.array(cells)
if flatten:
cells = cells.reshape(-1, sy, sx)
return cells
示例13: computeFeaturesForVideoDataset
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def computeFeaturesForVideoDataset(self, dataloader, pickle_path=None):
"""
Computes Feature Vectors for the video dataset provided via a dataloader object
:param dataloader: gulpIO Dataloader object which represents a dataset
:param pickle_path: (optional) if provided the features are pickeled to the specified location
:return: (features, labels) - features as ndarray of shape (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor) and labels of videos
"""
assert isinstance(dataloader, DataLoader)
feature_batch_list = []
labels = []
n_batches = len(dataloader)
for i, (data_batch, label_batch) in enumerate(dataloader):
assert data_batch.ndim == 5
n_frames = data_batch.shape[1]
frames_batch = data_batch.reshape(
(data_batch.shape[0] * n_frames, data_batch.shape[2], data_batch.shape[3], data_batch.shape[4]))
frames_batch = frames_batch.astype('float32')
feature_batch = self.computeFeatures(frames_batch)
assert feature_batch.ndim == 2
feature_batch = feature_batch.reshape((data_batch.shape[0], data_batch.shape[1], feature_batch.shape[1]))
feature_batch_list.append(feature_batch)
labels.extend(label_batch)
print("batch %i of %i" % (i, n_batches))
features = np.concatenate(feature_batch_list, axis=0)
# reshape features to (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor)
features = features.reshape((features.shape[0], features.shape[1], 1, features.shape[2]))
assert features.shape[0] == len(labels) and features.ndim == 4
# store as pandas dataframe
if pickle_path:
df = pd.DataFrame(data={'labels': labels, 'features': np.vsplit(features, features.shape[0])})
print('Dumped feature dataframe to', pickle_path)
df.to_pickle(pickle_path)
return features, labels
示例14: computeFeaturesForVideoDataset
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def computeFeaturesForVideoDataset(self, dataloader, pickle_path=None):
"""
Computes Feature Vectors for the video dataset provided via a dataloader object
:param dataloader: gulpIO Dataloader object which represents a dataset
:param pickle_path: (optional) if provided the features are pickeled to the specified location
:return: (features, labels) - features as ndarray of shape (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor) and labels of videos
"""
assert isinstance(dataloader, DataLoader)
feature_batch_list = []
labels = []
n_batches = len(dataloader)
for i, (data_batch, label_batch) in enumerate(dataloader):
assert data_batch.ndim == 5
n_frames = data_batch.shape[1]
frames_batch = data_batch.reshape(
(data_batch.shape[0] * n_frames, data_batch.shape[2], data_batch.shape[3], data_batch.shape[4]))
frames_batch = frames_batch.astype('float32')
feature_batch = self.computeFeatures(frames_batch)
assert feature_batch.ndim == 2
feature_batch = feature_batch.reshape((data_batch.shape[0], data_batch.shape[1], -1, feature_batch.shape[1]))
feature_batch_list.append(feature_batch)
labels.extend(label_batch)
print("batch %i of %i" % (i, n_batches))
features = np.concatenate(feature_batch_list, axis=0)
assert features.shape[0] == len(labels) and features.ndim == 4
if pickle_path:
df = pd.DataFrame(data={'labels': labels, 'features': np.vsplit(features, features.shape[0])})
print('Dumped feature dataframe to', pickle_path)
df.to_pickle(pickle_path)
return features, labels
示例15: test_validation_curve_cv_splits_consistency
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import vsplit [as 别名]
def test_validation_curve_cv_splits_consistency():
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=100, random_state=0)
scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
# The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
# `split` is called for each parameter, the following should produce
# identical results for param setting 1 and param setting 2 as both have
# the same C value.
assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
2))
scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits, shuffle=True))
# For scores2, compare the 1st and 2nd parameter's scores
# (Since the C value for 1st two param setting is 0.1, they must be
# consistent unless the train test folds differ between the param settings)
assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
2))
scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits))
# OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
assert_array_almost_equal(np.array(scores3), np.array(scores1))