本文整理汇总了Python中scipy.misc.bytescale函数的典型用法代码示例。如果您正苦于以下问题:Python bytescale函数的具体用法?Python bytescale怎么用?Python bytescale使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bytescale函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: imstretch
def imstretch(self):
data = np.clip(self.data_array, self.threshold[0], self.threshold[1])
if self.mode == "linear":
pass
elif self.mode == "logarithmic":
data = np.reciprocal(1 + np.power(0.5 / data, self.factor))
elif self.mode == "gamma":
data = np.power(data, self.factor)
elif self.mode == "arcsinh":
mn = np.nanmin(data)
mx = np.nanmax(data)
tmp = bytescale(data, high=1.0)
beta = np.clip(self.factor, 0.0, self.factor)
sclbeta = (beta - mn) / (mx - mn)
sclbeta = np.clip(sclbeta, 1.0e-12, sclbeta)
nonlinearity = 1.0 / sclbeta
extrema = np.arcsinh(np.array([0.0, nonlinearity]))
data = np.clip(np.arcsinh(data * nonlinearity), extrema[0], extrema[1])
elif self.mode == "square root":
data = np.sqrt(np.fabs(data)) * np.sign(data)
elif self.mode == "histogram equalization":
imhist, bins = np.histogram(data.flatten(), 256, normed=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
im2 = np.interp(data.flatten(), bins[:-1], cdf)
data = im2.reshape(data.shape)
self.scaled = bytescale(data).flatten().tolist()
示例2: test_bytescale
def test_bytescale(self):
x = np.array([0, 1, 2], np.uint8)
y = np.array([0, 1, 2])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
assert_equal(misc.bytescale(x), x)
assert_equal(misc.bytescale(y), [0, 128, 255])
示例3: test_bytescale_keywords
def test_bytescale_keywords(self):
x = np.array([40, 60, 120, 200, 300, 500])
res_lowhigh = misc.bytescale(x, low=10, high=143)
assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143])
res_cmincmax = misc.bytescale(x, cmin=60, cmax=300)
assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255])
assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4])
示例4: test_bytescale_keywords
def test_bytescale_keywords(self):
x = np.array([40, 60, 120, 200, 300, 500])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
res_lowhigh = misc.bytescale(x, low=10, high=143)
assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143])
res_cmincmax = misc.bytescale(x, cmin=60, cmax=300)
assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255])
assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4])
示例5: getProjection
def getProjection(self, numFrame):
""" Get projections onto yoz and xoz plane from xoy image """
xoyImg, _ = self.getGestureRegion(numFrame)
heightLen, widthLen = xoyImg.shape
depthLen = 256
size = xoyImg.size
corMap = np.zeros((size, 3))
corX = np.reshape(corMap[:, 0], (size, 1))
corY = np.reshape(corMap[:, 1], (size, 1))
corZ = np.reshape(xoyImg, (size, 1), 'F') # using Fortran-like index order
# generate x and y coordinates
for i in range(widthLen):
startIdx = i * heightLen
endIdx = startIdx + heightLen
corX[startIdx : endIdx] = np.ones((heightLen, 1)) * i
tmpArray = np.array(range(0, heightLen)) # generate matrix [0-480]
corY[startIdx : endIdx] = np.reshape(tmpArray, (tmpArray.size, 1))
# corMap[:, 0] = corX
# corMap[:, 1] = corY
# corMap[:, 2] = corZ
corMap = hstack([corX, corY, corZ])
##
thresh = 10
selectedCorMap = corMap[np.where(corMap[:, 2] > thresh)]
selectedCorMap = selectedCorMap.astype(int)
# yoz and xoz image
xozImg = np.zeros((depthLen, widthLen))
yozImg = np.zeros((heightLen, depthLen))
rowNum, _ = selectedCorMap.shape
for i in range(rowNum):
xozImg[selectedCorMap[i, 2], selectedCorMap[i, 0]] = xozImg[selectedCorMap[i, 2], selectedCorMap[i, 0]] + 1
yozImg[selectedCorMap[i, 1], selectedCorMap[i, 2]] = yozImg[selectedCorMap[i, 1], selectedCorMap[i, 2]] + 1
xozImg = bytescale(xozImg) # scale to 0-255
yozImg = bytescale(yozImg) # scale to 0-255
return xozImg, yozImg
示例6: test_bytescale_low_equals_high
def test_bytescale_low_equals_high(self):
a = np.arange(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
actual = misc.bytescale(a, low=10, high=10)
expected = [10, 10, 10]
assert_equal(actual, expected)
示例7: test_bytescale_rounding
def test_bytescale_rounding(self):
a = np.array([-0.5, 0.5, 1.5, 2.5, 3.5])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
actual = misc.bytescale(a, cmin=0, cmax=10, low=0, high=10)
expected = [0, 1, 2, 3, 4]
assert_equal(actual, expected)
示例8: test_bytescale_mask
def test_bytescale_mask(self):
a = np.ma.MaskedArray(data=[1, 2, 3], mask=[False, False, True])
actual = misc.bytescale(a)
expected = [0, 255, 3]
assert_equal(expected, actual)
assert_mask_equal(a.mask, actual.mask)
self.assertTrue(isinstance(actual, np.ma.MaskedArray))
示例9: write_img
def write_img(self,filename=None,
date=None,receiver=None,
data=None,
format='jpg',quality=80,
data_directory=None,
output_filename=None,
output_directory=None,
min_val=None,max_val=None,
verbose=True,greyscale=True,
reverse_color=True,
download_file=False,
delete_file=False,
prep=False):
"""
Write output image file containing the dynamical spectrum.
"""
if (greyscale):
mode='L'
else:
mode='RGB'
ext = format.lower()
if (data is None):
data = self.get_data(date=date,receiver=receiver,
filename=filename,
data_directory=data_directory,
download_file=download_file,
delete_file=delete_file,
verbose=verbose,prep=prep)
if (data is None):
return ""
array = data.intensity
if (min_val is None): min_val = array.min()
if (max_val is None): max_val = array.max()
array = array.clip(min_val,max_val)
if not ("(db)" in data.intensity_units.lower()):
array = to_dB(array)
array = bytescale(array)
if (reverse_color):
array = array.max() - array
image = Image.fromarray(array,mode=mode)
if (output_filename is None):
if (filename is None):
filename = self.get_filename(date,receiver=receiver)
output_filename = os.path.basename(filename)+"."+ext
if (output_directory is None):
output_path = output_filename
else:
output_path = os.path.join(output_directory,os.path.basename(output_filename))
image.save(output_path,quality=quality)
return output_path
示例10: pibayerraw
def pibayerraw(fn,exposure_sec,bit8):
with PiCamera() as cam: #load camera driver
print('camera startup gain autocal')
#LED automatically turns on, this turns it off
cam.led = False
sleep(0.75) # somewhere between 0.5..0.75 seconds to let camera settle to final gain value.
setparams(cam,exposure_sec) #wait till after sleep() so that gains settle before turning off auto
getparams(cam)
counter = 1
#%% main loop
while True:
# tic = time()
img10 = grabframe(cam)
# print('{:.1f} sec. to grab frame'.format(time()-tic))
#%% linear scale 10-bit to 8-bit
if bit8:
img = bytescale(img10,0,1024,255,0)
else:
img = img10
#%% write to PNG or JPG or whatever based on file extension
max_value = img.max()
print(max_value)
if max_value > 50:
idx = unravel_index(img.argmax(), img.shape)
xidx = idx[0]
yidx = idx[1]
print(xidx, yidx)
xlow = max(0, xidx-25)
ylow = max(0, yidx-25)
xhi = min(1944, xidx+25)
yhi = min(2592, yidx+25)
imsave(fn+'%03d' % counter + '.png',img[xlow:xhi,ylow:yhi])
counter = counter + 1
# break
return img
示例11: make_pc_images
def make_pc_images(pca, shape):
U = REACT2D.build_dct(shape[0], shape[1], 50)
pca_images = np.empty((npca, shape[0], shape[1], 3))
pca_images[:, :, :, 0] = pca.components_[:, :ncoefs].dot(U.T[:ncoefs, :]).reshape((npca, shape[0], shape[1]))
pca_images[:, :, :, 1] = pca.components_[:, ncoefs:2*ncoefs].dot(U.T[:ncoefs, :]).reshape((npca, shape[0], shape[1]))
pca_images[:, :, :, 2] = pca.components_[:, 2*ncoefs:].dot(U.T[:ncoefs, :]).reshape((npca, shape[0], shape[1]))
npca_rows = 3
npca_cols = 3
nplots = 2
pca_idx = 0
for plot in range(nplots):
idx = 1
plt.clf()
for row in range(npca_rows):
for col in range(npca_cols):
print row, col, idx
plt.subplot(npca_rows, npca_cols, idx)
plt.imshow(bytescale(pca_images[pca_idx, :, :, :]))
plt.title('PC ' + str(pca_idx + 1))
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
idx += 1
pca_idx += 1
plt.savefig(plot_dir + 'PC_Images_' + str(plot + 1) + '.png')
if doshow:
plt.show()
示例12: getGestureRegion
def getGestureRegion(self, frameNum):
""" Get gesture region for the given frame """
# get Depth frame
depthData = self.getFrame(self.depth, frameNum)
depthGray = cv2.cvtColor(depthData, cv2.cv.CV_RGB2GRAY)
# get user segmentation frame
userSeg = self.getFrame(self.user, frameNum)
userSegGray = cv2.cvtColor(userSeg, cv2.cv.CV_RGB2GRAY)
userSegGray = cv2.medianBlur(userSegGray, 5) # Median filter on original user image
# Convert user to binary image
threshold = 128
_, userBinImg = cv2.threshold(userSegGray, threshold, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
depthGray[np.where(userBinImg == 0)] = 0
depthGray = cv2.medianBlur(depthGray, 5)
depthRealValue = depthGray.astype(np.float32) # depth value of real world (0-maxDepth)
# Convert to depth values
depthRealValue = depthRealValue / 255.0 * float(self.data['maxDepth'])
depthRealValue = depthRealValue.round()
depthRealValue = depthRealValue.astype(np.uint16)
# scale depthGray to 0-255
depthGray = depthGray.astype(np.uint16)
depthGray = bytescale(depthGray)
depthImgValue = np.copy(depthGray)
return depthImgValue, depthRealValue
示例13: hdf2video
def hdf2video(data,imgh5,outfn,clim):
outfn = Path(outfn).expanduser()
import cv2
try:
from cv2.cv import FOURCC as fourcc #Windows needs from cv2.cv
except ImportError:
from cv2 import VideoWriter_fourcc as fourcc
outfn = outfn.with_suffix('.ogv')
cc4 = fourcc(*'THEO')
# we use isColor=True because some codecs have trouble with grayscale
hv = cv2.VideoWriter(str(outfn),cc4, fps=33,
frameSize=data.shape[1:][::-1], #frameSize needs col,row
isColor=True) #right now we're only using grayscale
if not hv.isOpened():
raise TypeError('trouble starting video file')
for d in data:
#RAM usage explodes if scaling all at once on GB class file
#for d in bytescale(data,1000,4000):
#for d in sixteen2eight(data,(1000,4000)):
hv.write(gray2rgb(bytescale(d,clim[0],clim[1])))
hv.release()
示例14: test_bytescale_cscale_lowhigh
def test_bytescale_cscale_lowhigh(self):
a = np.arange(10)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
actual = misc.bytescale(a, cmin=3, cmax=6, low=100, high=200)
expected = [100, 100, 100, 100, 133, 167, 200, 200, 200, 200]
assert_equal(actual, expected)
示例15: main
def main(image):
matplotlib.rcParams["font.size"] = 10
def show_img(img, axes):
"""Plot the image as float"""
# img = img_as_float(img)
ax_img = axes
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
return ax_img
# Open and read in the fits image
try:
fits = pyfits.open(image)
# fits = Image.open(image)
except IOError:
print "Can not read the fits image: " + image + " !!"
# Check the input image
img = fits[0].data
# img = np.array(fits)
if img.ndim != 2:
raise NameError("Data need to be 2-D image !")
# Logrithm scaling of the image
img_log = np.log10(img)
img_log = img_as_float(img_log)
# Contrast streching
p5, p95 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p5, p95))
# Adaptive equalization
img_new = bytescale(img_rescale)
img_ahe = exposure.equalize_adapthist(img_new, ntiles_x=16, ntiles_y=16, clip_limit=0.05, nbins=256)
img_ahe = img_as_float(img_ahe)
# Display results
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 5))
# Original image
ax_img = show_img(img_log, axes[0])
ax_img.set_title("Original")
# Contrast Enhanced one
ax_img = show_img(img_rescale, axes[1])
ax_img.set_title("Rescale")
# AHE Enhanced one
ax_img = show_img(img_ahe, axes[2])
ax_img.set_title("AHE")
# Prevent overlap of y-axis
fig.subplots_adjust(bottom=0.1, right=0.9, top=0.9, left=0.1, wspace=0.05)
# Save a PNG file
plt.gcf().savefig("ahe_test.png")