本文整理汇总了Python中scipy.concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python scipy.concatenate方法的具体用法?Python scipy.concatenate怎么用?Python scipy.concatenate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy
的用法示例。
在下文中一共展示了scipy.concatenate方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: extend
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def extend(self, extracted_features):
# This method reads the pkl files in a folder and adds them to the
# existing data for processing in the TCData class.
(data, labels, feature_string, width, height, winsize, nbins) = extracted_features
npixels = width * height
xlabel = 'Grayscale intensity'
ylabel = 'Probability'
xvals = scipy.arange(self.data.shape[0]).reshape(-1,1)
self.data = N.concatenate((self.data, data),axis=1)
self.width = N.append(self.width, width)
self.height = N.append(self.height, height)
self.xvals = N.append(self.xvals, xvals)
self.labels.extend(labels)
self.img_label_split.extend([len(self.labels)])
self.data_split.extend([self.data.shape[1]])
示例2: find_log_ticks
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def find_log_ticks(start, stop):
"""
finds tick values for linear axis
"""
if (start < stop):
min, max = start, stop
else:
min, max = stop, start
# lists for ticks
tick_0_list = []
tick_1_list = []
tick_2_list = []
max_decade = math.ceil(math.log10(max))
min_decade = math.floor(math.log10(min))
start_ax = None
stop_ax = None
for decade in scipy.arange(min_decade, max_decade + 1, 1):
# for number in scipy.concatenate((scipy.arange(1,2,0.2),scipy.arange(2,3,0.5),scipy.arange(3,10,1))):
for number in [1, 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3, 4, 5, 6, 7, 8, 9]:
u = number * 10.0 ** decade
if u >= min and u <= max:
if start_ax == None:
start_ax = number
stop_ax = number
if number == 1:
tick_0_list.append(u)
if number in [2, 3, 4, 5, 6, 7, 8, 9]:
tick_1_list.append(u)
if number in [1.2, 1.4, 1.6, 1.8, 2.5]:
tick_2_list.append(u)
# print tick_0_list
# print tick_1_list
# print tick_2_list
return tick_0_list, tick_1_list, tick_2_list, start_ax, stop_ax
示例3: _compose
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def _compose(orig, recon):
_imgo = []
_imgr = []
for i in range(orig.shape[0]):
_imgo.append(orig[i])
for i in range(orig.shape[0]):
_imgr.append(recon[i])
_imgo = sp.concatenate(_imgo, 1)
_imgr = sp.concatenate(_imgr, 1)
_rv = sp.concatenate([_imgo, _imgr], 0)
_rv = sp.clip(_rv, 0, 1)
return _rv
示例4: _compose_multi
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def _compose_multi(imgs):
_imgs = []
for i in range(len(imgs)):
_imgs.append([])
for j in range(imgs[i].shape[0]):
_imgs[i].append(imgs[i][j])
_imgs[i] = sp.concatenate(_imgs[i], 1)
_rv = sp.concatenate(_imgs, 0)
_rv = sp.clip(_rv, 0, 1)
return _rv
示例5: si_read_ppm
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def si_read_ppm(self, rawfilename, filename):
# This function reads the ppm/jpg file and extracts the features if the
# features pkl file doesn't exist. It is also compatible for extension
# of the feauture vector and doesn't compute the already computed features
new_feature_string = []
updated_feature = 0
data = N.array([], dtype=int)
if os.path.exists(filename):
pkl_f = open(filename, 'r')
(data, labels, feature_string, width, height, winsize, nbins)= pickle.load(pkl_f)
self.winsize = winsize
self.nbins = nbins
new_feature_string = list(feature_string)
pkl_f.close()
if not new_feature_string.count('sift'):
updated_feature = 1
(sift_features, labels, width, height) = self.extract_sift(rawfilename, self.winsize, self.nbins)
if data.size:
data = scipy.concatenate((data.transpose(), sift_features.transpose()), 1).transpose()
else:
data = sift_features
new_feature_string.append('sift')
if updated_feature:
outf = open(filename, 'w')
pickle.dump((data, labels, new_feature_string, width, height, self.winsize, self.nbins),outf)
outf.close()
print 'Saved data to %s.' % filename
return (data, labels, new_feature_string, width, height, self.winsize, self.nbins)
示例6: testpoly
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def testpoly():
[x, y] = scipy.mgrid[0:10, 0:10]
#print 'X'
#print x
#print 'Y'
#print y
u = scipy.zeros((10, 10))
v = scipy.zeros((10, 10))
# Random polynomials
a0 = scipy.random.rand(1)
a1 = 0.1*(scipy.random.rand(2)-0.5)
a2 = 0.01*(scipy.random.rand(3)-0.5)
a = scipy.concatenate((a0, a1))
a = scipy.concatenate((a, a2))
a[2] = 0.01*a[2]
print('A coefficients')
print(a)
b0 = scipy.random.rand(1)
b1 = 0.1*(scipy.random.rand(2)-0.5)
b2 = 0.01*(scipy.random.rand(3)-0.5)
b = scipy.concatenate((b0, b1))
b = scipy.concatenate((b, b2))
b[1] = 0.01*b[1]
print('B coeffcicients')
print(b)
for i in range(10):
for j in range(10):
u[i, j] = poly(a, x[i, j], y[i, j], 2) #+ scipy.random.normal(0.0, 0.01)
v[i, j] = poly(b, x[i, j], y[i, j], 2) #+ scipy.random.normal(0.0, 0.01)
#print z
s1 = polyFit2(u, x, y, 2)
s2 = polyFit2(v, x, y, 2)
print('S1', s1)
print('S2', s2)
uc = poly(s1, x, y, 2)
vc = poly(s2, x, y, 2)
P.figure(1)
P.clf()
P.grid(True)
P.plot(u, v, 'gx')
P.plot(uc, vc, 'r+')
示例7: extract_sift
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def extract_sift(cls, rawfilename, winsize, nbins):
"""read_ppm(rawfilename, filename)
Read in raw pixel data from rawfilename (.ppm).
Create a histogram around each pixel to become
the feature vector for that obsevation (pixel).
Pickle the result and save it to filename.
Note: does NOT update object fields.
Follow this with a call to readin().
"""
if cls._VL_SIFT_:
# VLSIFT matlab
im = Image.open(rawfilename)
(width, height) = im.size
mlab.bb_sift(N.array(im), 'temp.mat')
sift_features = scipy.io.loadmat('temp.mat')
kp = sift_features['f_']
sift_features = sift_features['d_']
sift_features = scipy.concatenate((sift_features.transpose(), kp[2:4].transpose()), 1).transpose()
labels = [];
for ikp in kp.transpose():
(x,y) = ikp[0:2]
labels += ['(%d,%d)' % (y,x)]
else:
#Opencv SIFT
img = cv2.imread(rawfilename)
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
height, width = gray.shape
# Computing SIFT
sift = cv2.SIFT(edgeThreshold = 3)
kp, des = sift.detectAndCompute(gray,None)
labels = []
sift_features = N.transpose(des)
scale_angle = []
for ikp in kp:
(x,y) = ikp.pt
scale_angle.append([ikp.size/12, ikp.angle])
labels += ['(%d,%d)' % (y,x)]
scale_angle = N.array(scale_angle)
sift_features = scipy.concatenate((sift_features.transpose(), scale_angle), 1).transpose()
return (sift_features, labels, width, height)
示例8: extract_hist
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def extract_hist(cls, rawfilename, winsize, nbins):
# This function extracts the histogram features from the image
im = Image.open(rawfilename)
(width, height) = im.size
npixels = width * height
pix = scipy.array(im)
# Generate one feature vector (histogram) per pixel
#winsize = 20 # for test.pgm
#winsize = 0 # for RGB
halfwin = int(winsize/2)
bins = scipy.linspace(0, 255, nbins)
# Only use windows that are fully populated
mywidth = width-winsize
myheight = height-winsize
#data = scipy.zeros((nbins-1, mywidth * myheight))
#data = scipy.zeros((3*winsize*winsize, mywidth * myheight))
data = []
labels = []
# Pick up all windows, stepping by half of the window size
for y in range(halfwin, height-halfwin, int(halfwin/2)):
for x in range(halfwin, width-halfwin, int(halfwin/2)):
# Read in data in row-major order
ind = (y-halfwin)*mywidth + (x-halfwin)
#data[:,ind] = \
# scipy.histogram(pix[y-halfwin:y+halfwin,
# x-halfwin:x+halfwin],
# bins)[0]
# Just RGB
#data[:,ind] = pix[y,x]
# RGB window
#data[:,ind] = pix[y-halfwin:y+halfwin,x-halfwin:x+halfwin].flat
hist_features = TCData.extract_hist_subimg(pix[y-halfwin:y+halfwin,x-halfwin:x+halfwin])
if data == []:
data = hist_features.reshape(-1,1)
else:
data = scipy.concatenate((data, hist_features.reshape(-1,1)),1)
labels += ['(%d,%d)' % (y,x)]
return (data, labels, width, height)
示例9: read_ppm
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import concatenate [as 别名]
def read_ppm(self, rawfilename, filename):
# This function reads the ppm/jpg file and extracts the features if the
# features pkl file doesn't exist. It is also compatible for extension
# of the feauture vector and doesn't compute the already computed features
new_feature_string = []
updated_feature = 0
data = N.array([], dtype=int)
if os.path.exists(filename):
pkl_f = open(filename, 'r')
(data, labels, feature_string, width, height, winsize, nbins)= pickle.load(pkl_f)
self.winsize = winsize
self.nbins = nbins
new_feature_string = list(feature_string)
pkl_f.close()
if not new_feature_string.count('dsift'):
updated_feature = 1
(sift_features, labels, width, height) = self.extract_dsift(rawfilename, self.winsize, self.nbins)
if data.size:
data = scipy.concatenate((data.transpose(), sift_features.transpose()), 1).transpose()
else:
data = sift_features
new_feature_string.append('dsift')
if not new_feature_string.count('histogram'):
updated_feature = 1
(hist_features, labels, width, height) = self.extract_hist(rawfilename, self.winsize, self.nbins)
hist_features = hist_features/(self.winsize)
if data.size:
data = scipy.concatenate((data.transpose(), hist_features.transpose()), 1).transpose()
else:
data = hist_features
new_feature_string.append('histogram')
'''
if not new_feature_string.count('position'):
updated_feature = 1
position_features = []
for label in labels:
(y,x) = map(int, label.strip('()').split(','))
position_features.append([x,y])
position_features = N.array(position_features)
if data.size:
data = scipy.concatenate((data.transpose(), position_features), 1).transpose()
else:
data = position_features
new_feature_string.append('position')
'''
if updated_feature:
outf = open(filename, 'w')
pickle.dump((data, labels, new_feature_string, width, height, self.winsize, self.nbins),outf)
outf.close()
print 'Saved data to %s.' % filename
return (data, labels, new_feature_string, width, height, self.winsize, self.nbins)