本文整理汇总了Python中numpy.uint8函数的典型用法代码示例。如果您正苦于以下问题:Python uint8函数的具体用法?Python uint8怎么用?Python uint8使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了uint8函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: write
def write(self, struct_name, data_dict):
"""write data_dict under the group struct_name in the open hdf5 file
:param struct_name: the identificatioon of the structure to write in the hdf5
:param data_dict: The python dictionnary containing the informations to write
"""
if self.file is None:
info = "No file currently open"
logger.info(info)
return
group_l1 = self.file.create_group(struct_name)
group_l1.attrs['OCTAVE_GLOBAL'] = np.uint8(1)
group_l1.attrs['OCTAVE_NEW_FORMAT'] = np.uint8(1)
group_l1.create_dataset("type", data=np.string_('scalar struct'), dtype="|S14")
group_l2 = group_l1.create_group('value')
for ftparams in data_dict:
group_l3 = group_l2.create_group(ftparams)
group_l3.attrs['OCTAVE_NEW_FORMAT'] = np.uint8(1)
if type(data_dict[ftparams]) == str:
group_l3.create_dataset("type", (), data=np.string_('sq_string'), dtype="|S10")
if self.octave_targetted_version < 3.8:
group_l3.create_dataset("value", data=np.string_(data_dict[ftparams] + '0'))
else:
group_l3.create_dataset("value", data=np.string_(data_dict[ftparams]))
else:
group_l3.create_dataset("type", (), data=np.string_('scalar'), dtype="|S7")
group_l3.create_dataset("value", data=data_dict[ftparams])
示例2: __init__
def __init__(self, mode='dummy', address=None, high_duration=0.001,
verbose=None):
self._stamp_trigger = self._parallel_trigger
if mode == 'parallel':
if 'Linux' in platform.system():
address = '/dev/parport0' if address is None else address
import parallel as _p
self._port = _p.Parallel(address)
self._set_data = self._port.setData
elif 'Windows' in platform.system():
from ctypes import windll
if not hasattr(windll, 'inpout32'):
raise SystemError('Must have inpout32 installed')
addr = 0x0378 if address is None else address
base = int(addr, 16) if addr[:2] == '0x' else addr
self._port = windll.inpout32
mask = np.uint8(1 << 5 | 1 << 6 | 1 << 7)
# Use ECP to put the port into byte mode
val = int((self._port.Inp32(base + 0x402) & ~mask) | (1 << 5))
self.port.Out32(base + 0x402, val)
# Now to make sure the port is in output mode we need to make
# sure that bit 5 of the control register is not set
val = int(self._port.Inp32(base + 2) & ~np.uint8(1 << 5))
self._port.Out32(base + 2, val)
def _set_data(data):
return self._port.Out32(base, data)
self._set_data = _set_data
else:
raise NotImplementedError
else: # mode == 'dummy':
self._stamp_trigger = self._dummy_trigger
self.high_duration = high_duration
示例3: process
def process(self):
# capture
ret, frame = self.cam.read()
#frame = self.first
frame = cv.resize(frame, None, fx=0.5, fy=0.5)
frame = cv.flip(frame, 1)
self.orig_frame = frame
for click in (c for c in list(zip(self.col_defs, self.col_poss,
self.col_disps))
if (c[0] != self.undefined_col).all()
and (c[1] != self.undefined_pos).all()):
col = click[0]
pos = click[1]
disp = click[2]
thresh = self.thresh_slide.get()
hlower = (col[0, 0, 0] - thresh) % 180
hupper = (col[0, 0, 0] + thresh) % 180
# in case we have gone under 0 or over 180
invert = hlower > hupper
if invert:
hlower, hupper = hupper, hlower
lower = np.uint8([[[hlower, 20, 20]]])
upper = np.uint8([[[hupper, 255, 255]]])
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, lower, upper)
if not invert:
mask = cv.bitwise_not(mask)
frame = cv.bitwise_and(frame, frame, mask=mask)
self.frame = frame
self.root.after(50, self.process)
示例4: testInt
def testInt(self):
num = np.int(2562010)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(127)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(2562010)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(2562010)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.int64(2562010)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
num = np.uint8(255)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(2562010)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(2562010)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
num = np.uint64(2562010)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
示例5: crop_waffle
def crop_waffle(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
greyscale = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
lower_yellow = np.array([0,50,50])
upper_yellow = np.array([70,255,255])
mask = cv2.inRange(hsv, np.uint8(lower_yellow), np.uint8(upper_yellow))
kernel = np.ones((9,9),np.uint8)
closed_mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
masked_img = cv2.bitwise_and(greyscale,greyscale,mask = closed_mask)
[contours,hiearchy] = cv2.findContours(masked_img,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
#now find the largest contour
max_area = 0
max_contour = None
for c in contours:
#we change datatypes from numpy arrays to cv arrays and back because contour area only takes cv arrays.
c = cv.fromarray(c)
if cv.ContourArea(c) > max_area:
max_contour = c
max_area = cv.ContourArea(c)
max_contour = np.asarray(max_contour)
shape = img.shape
largest_blob_mask = np.zeros((shape[0],shape[1],1),np.uint8)
cv2.fillPoly(largest_blob_mask, pts =[max_contour], color=(255,255,255))
print_rgb_hist(img,largest_blob_mask)
return cv2.bitwise_and(img,img, mask= largest_blob_mask)
示例6: write_sequence_file
def write_sequence_file(awgData, fileName, miniLLRepeat=1):
'''
Main function to pack channel LLs into an APS h5 file.
'''
#Preprocess the sequence data to handle APS restrictions
LLs12, repeat12, wfLib12 = preprocess(awgData['ch12']['linkList'],
awgData['ch12']['wfLib'],
awgData['ch12']['correctionT'])
LLs34, repeat34, wfLib34 = preprocess(awgData['ch34']['linkList'],
awgData['ch34']['wfLib'],
awgData['ch34']['correctionT'])
assert repeat12 == repeat34, 'Failed to unroll sequence'
if repeat12 != 0:
miniLLRepeat *= repeat12
#Merge the the marker data into the IQ linklists
merge_APS_markerData(LLs12, awgData['ch1m1']['linkList'], 1)
merge_APS_markerData(LLs12, awgData['ch2m1']['linkList'], 2)
merge_APS_markerData(LLs34, awgData['ch3m1']['linkList'], 1)
merge_APS_markerData(LLs34, awgData['ch4m1']['linkList'], 2)
#Open the HDF5 file
if os.path.isfile(fileName):
os.remove(fileName)
with h5py.File(fileName, 'w') as FID:
#List of which channels we have data for
#TODO: actually handle incomplete channel data
channelDataFor = [1,2] if LLs12 else []
channelDataFor += [3,4] if LLs34 else []
FID['/'].attrs['Version'] = 2.1
FID['/'].attrs['channelDataFor'] = np.uint16(channelDataFor)
FID['/'].attrs['miniLLRepeat'] = np.uint16(miniLLRepeat - 1)
#Create the waveform vectors
wfInfo = []
for wfLib in (wfLib12, wfLib34):
wfInfo.append(create_wf_vector({key:wf.real for key,wf in wfLib.items()}))
wfInfo.append(create_wf_vector({key:wf.imag for key,wf in wfLib.items()}))
LLData = [LLs12, LLs34]
repeats = [0, 0]
#Create the groups and datasets
for chanct in range(4):
chanStr = '/chan_{0}'.format(chanct+1)
chanGroup = FID.create_group(chanStr)
chanGroup.attrs['isIQMode'] = np.uint8(1)
#Write the waveformLib to file
FID.create_dataset('{0}/waveformLib'.format(chanStr), data=wfInfo[chanct][0])
#For A channels (1 & 3) we write link list data if we actually have any
if (np.mod(chanct,2) == 0) and LLData[chanct//2]:
groupStr = chanStr+'/linkListData'
LLGroup = FID.create_group(groupStr)
LLDataVecs, numEntries = create_LL_data(LLData[chanct//2], wfInfo[chanct][1], os.path.basename(fileName))
LLGroup.attrs['length'] = numEntries
for key,dataVec in LLDataVecs.items():
FID.create_dataset(groupStr+'/' + key, data=dataVec)
else:
chanGroup.attrs['isLinkListData'] = np.uint8(0)
示例7: partition_FOV_KMeans
def partition_FOV_KMeans(self,tradeoff_weight=.5,fx=.25,fy=.25,n_clusters=4,max_iter=500):
"""
Partition the FOV in clusters that are grouping pixels close in space and in mutual correlation
Parameters
------------------------------
tradeoff_weight:between 0 and 1 will weight the contributions of distance and correlation in the overall metric
fx,fy: downsampling factor to apply to the movie
n_clusters,max_iter: KMeans algorithm parameters
Outputs
-------------------------------
fovs:array 2D encoding the partitions of the FOV
mcoef: matric of pairwise correlation coefficients
distanceMatrix: matrix of picel distances
Example
"""
_,h1,w1=self.shape
self.resize(fx,fy)
T,h,w=self.shape
Y=np.reshape(self,(T,h*w))
mcoef=np.corrcoef(Y.T)
idxA,idxB = np.meshgrid(list(range(w)),list(range(h)));
coordmat=np.vstack((idxA.flatten(),idxB.flatten()))
distanceMatrix=euclidean_distances(coordmat.T);
distanceMatrix=old_div(distanceMatrix,np.max(distanceMatrix))
estim=KMeans(n_clusters=n_clusters,max_iter=max_iter);
kk=estim.fit(tradeoff_weight*mcoef-(1-tradeoff_weight)*distanceMatrix)
labs=kk.labels_
fovs=np.reshape(labs,(h,w))
fovs=cv2.resize(np.uint8(fovs),(w1,h1),old_div(1.,fx),old_div(1.,fy),interpolation=cv2.INTER_NEAREST)
return np.uint8(fovs), mcoef, distanceMatrix
示例8: on_epoch_end
def on_epoch_end(self, callback_data, model, epoch):
# convert to numpy arrays
data_batch = model.data_batch.get()
noise_batch = model.noise_batch.get()
# value transform
data_batch = self._value_transform(data_batch)
noise_batch = self._value_transform(noise_batch)
# shape transform
data_canvas = self._shape_transform(data_batch)
noise_canvas = self._shape_transform(noise_batch)
# plotting options
im_args = dict(interpolation="nearest", vmin=0., vmax=1.)
if self.nchan == 1:
im_args['cmap'] = plt.get_cmap("gray")
fname = self.filename+'_data_'+'{:03d}'.format(epoch)+'.png'
Image.fromarray(np.uint8(data_canvas*255)).convert('RGB').save(fname)
fname = self.filename+'_noise_'+'{:03d}'.format(epoch)+'.png'
Image.fromarray(np.uint8(noise_canvas*255)).convert('RGB').save(fname)
# plot logged WGAN costs if logged
if model.cost.costfunc.func == 'wasserstein':
giter = callback_data['gan/gen_iter'][:]
nonzeros = np.where(giter)
giter = giter[nonzeros]
cost_dis = callback_data['gan/cost_dis'][:][nonzeros]
w_dist = medfilt(np.array(-cost_dis, dtype='float64'), kernel_size=101)
plt.figure(figsize=(400/self.dpi, 300/self.dpi), dpi=self.dpi)
plt.plot(giter, -cost_dis, 'k-', lw=0.25)
plt.plot(giter, w_dist, 'r-', lw=2.)
plt.title(self.filename, fontsize=self.font_size)
plt.xlabel("Generator Iterations", fontsize=self.font_size)
plt.ylabel("Wasserstein estimate", fontsize=self.font_size)
plt.margins(0, 0, tight=True)
plt.savefig(self.filename+'_training.png', bbox_inches='tight')
plt.close()
示例9: calc_chans
def calc_chans(self):
"""Calculate lockout and inclusion chan neighbourhoods, max number of chans to use,
and define the spike record dtype"""
sort = self.sort
self.enabledSiteLoc = {}
for chan in self.chans: # for all enabled chans
self.enabledSiteLoc[chan] = sort.stream.probe.SiteLoc[chan] # get its (x, y)
# distance matrix for the chans enabled for this search, sorted by chans:
self.dm = DistanceMatrix(self.enabledSiteLoc)
# dict of neighbourhood of chanis for each chani
self.locknbhdi = {} # for lockout around a spike
self.inclnbhdi = {} # for inclusion of wavedata as part of a spike
maxnchansperspike = 0
for chani, distances in enumerate(self.dm.data): # iterate over rows of distances
# at what col indices does the returned row fall within lockr?:
lockchanis, = np.uint8(np.where(distances <= self.lockr))
# at what col indices does the returned row fall within inclr?:
inclchanis, = np.uint8(np.where(distances <= self.inclr))
self.locknbhdi[chani] = lockchanis
self.inclnbhdi[chani] = inclchanis
maxnchansperspike = max(maxnchansperspike, len(inclchanis))
self.maxnchansperspike = maxnchansperspike
self.SPIKEDTYPE = [('id', np.int32), ('nid', np.int16),
('chan', np.uint8), ('nchans', np.uint8),
('chans', np.uint8, self.maxnchansperspike),
('chani', np.uint8),
('t', np.int64), ('t0', np.int64), ('t1', np.int64),
('dt', np.int16), # time between peaks, in us
('tis', np.uint8, (self.maxnchansperspike, 2)), # peak positions
('aligni', np.uint8),
('V0', np.float32), ('V1', np.float32), ('Vpp', np.float32),
('x0', np.float32), ('y0', np.float32),
('sx', np.float32), ('sy', np.float32),
]
示例10: explain
def explain(model, img, topLabels, numSamples, numFeatures, hideRest, hideColor, positiveOnly):
img, oldImg = transform_img_fn(img)
img = img*(1./255)
prediction = model.predict(img)
explainer = lime_image.LimeImageExplainer()
img = np.squeeze(img)
explanation = explainer.explain_instance(img, model.predict, top_labels=topLabels, hide_color=hideColor, num_samples=numSamples)
temp, mask = explanation.get_image_and_mask(getTopPrediction(prediction[0]), positive_only=positiveOnly, num_features=numFeatures, hide_rest=hideRest)
tempMask = mask * 255
temp = Image.fromarray(np.uint8(tempMask))
temp = temp.resize((oldImg.width, oldImg.height))
temp = image.img_to_array(temp)
temp = temp * 1./255
temp = temp.astype(np.int64)
temp = np.squeeze(temp)
oldImgArr = image.img_to_array(oldImg)
oldImgArr = oldImgArr * (1./255)
oldImgArr = oldImgArr.astype(np.float64)
imgExplained = mark_boundaries(oldImgArr, temp)
imgFinal = np.uint8(imgExplained*255)
img = Image.fromarray(imgFinal)
imgByteArr = io.BytesIO()
img.save(imgByteArr, format='JPEG')
imgByteArr = imgByteArr.getvalue()
return imgByteArr
示例11: grabCut
def grabCut(img, rect=None, mask=None, ite=5):
height, width, channels = img.shape
# if no arguments, try to segment using a large rectangle
if rect == None and mask == None:
rect = (int(width*0.15), 15, int(width*0.85), height-15)
initOpt = cv2.GC_INIT_WITH_RECT
# if rectangle argument but no mask, init mask with rectangle
elif mask == None:
mask = np.zeros((height, width), np.uint8)
initOpt = cv2.GC_INIT_WITH_RECT
# if mask argument but no rectangle, use mask and let rect to None
elif rect == None:
initOpt = cv2.GC_INIT_WITH_MASK
rect = (0, 0, width, height)
mask = np.uint8(mask)
# if mask argument and rectangle, set pixels outside the mask as background
else:
mask = np.uint8(mask)
rect = rectangleutil.checkRectangleBounds(rect, mask.shape)
maskRect = rectangleutil.rectangle2mask(rect, mask.shape)
mask[maskRect == 0] = cv2.GC_BGD
initOpt = cv2.GC_INIT_WITH_MASK
#imageblured = np.zeros(img.shape, img.dtype)
#cv2.smooth(img, imageblured, cv.CV_GAUSSIAN, 5)
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))
cv2.grabCut(img, mask, rect, tmp1, tmp2, ite, initOpt)
mask[mask == cv2.GC_BGD] = 0
mask[mask == cv2.GC_PR_BGD] = 0
mask[mask == cv2.GC_FGD] = 255
mask[mask == cv2.GC_PR_FGD] = 255
return mask
示例12: __init__
def __init__(self):
self.stack = np.zeros(STACK_SIZE, dtype=np.uint8)
self.sp = np.uint8(0)
self.pc = np.uint8(0)
self.s_regs = np.zeros(8, dtype=np.uint8)
self.m_regs = np.zeros(8, dtype=np.uint8)
self.progmem = np.zeros(PROG_SIZE, dtype=np.uint8)
示例13: get_depth
def get_depth():
"""
Returns numpy ndarrays representing the raw and ranged depth images.
Outputs:
dmap:= distancemap in mm, 1L ndarray, dtype=uint16, min=0, max=2**12-1
d4d := depth for dislay, 3L ndarray, dtype=uint8, min=0, max=255
Note1:
fromstring is faster than asarray or frombuffer
Note2:
.reshape(120,160) #smaller image for faster response
OMAP/ARM default video configuration
.reshape(240,320) # Used to MATCH RGB Image (OMAP/ARM)
Requires .set_video_mode
"""
dmap = np.fromstring(depth_stream.read_frame().get_buffer_as_uint16(),dtype=np.uint16).reshape(h,w) # Works & It's FAST
d4d = dmap.astype(float) *255/ 2**12-1 # Correct the range. Depth images are 12bits
d4d = cv2.cvtColor(np.uint8(d4d),cv2.COLOR_GRAY2RGB)
temp1 = np.zeros(dmap.shape,dtype=np.uint8)
temp1 = dmap - 2**8 #most significant: 2**8-2**15
temp2 = dmap.copy() #least significant: 2**0-2**7
temp1[temp1<0]=0
temp2[temp2>255] = 0
dmap = np.uint8(np.dstack((temp2,temp1,temp2)))
#print dmap.shape, type(dmap), dmap.dtype
return dmap, d4d
示例14: save_frame_and_response_map
def save_frame_and_response_map(frame, bbox, fig_n, crop_x, score, writer, fig):
# fig = plt.figure(fig_n)
plt.clf()
ax = fig.add_subplot(131)
ax.set_title('Tracked sequence')
r = patches.Rectangle((bbox[0],bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', fill=False)
ax.imshow(np.uint8(frame))
ax.add_patch(r)
ax2 = fig.add_subplot(132)
ax2.set_title('Context region')
ax2.imshow(np.uint8(crop_x))
ax2.spines['left'].set_position('center')
ax2.spines['right'].set_color('none')
ax2.spines['bottom'].set_position('center')
ax2.spines['top'].set_color('none')
ax2.set_yticklabels([])
ax2.set_xticklabels([])
ax3 = fig.add_subplot(133)
ax3.set_title('Response map')
ax3.spines['left'].set_position('center')
ax3.spines['right'].set_color('none')
ax3.spines['bottom'].set_position('center')
ax3.spines['top'].set_color('none')
ax3.set_yticklabels([])
ax3.set_xticklabels([])
ax3.imshow(np.uint8(score))
# ax3.grid()
writer.grab_frame()
示例15: array_colorkey
def array_colorkey (surface):
"""pygame.numpyarray.array_colorkey (Surface): return array
copy the colorkey values into a 2d array
Create a new array with the colorkey transparency value from each
pixel. If the pixel matches the colorkey it will be fully
tranparent; otherwise it will be fully opaque.
This will work on any type of Surface format. If the image has no
colorkey a solid opaque array will be returned.
This function will temporarily lock the Surface as pixels are
copied.
"""
colorkey = surface.get_colorkey ()
if colorkey == None:
# No colorkey, return a solid opaque array.
array = numpy.empty (surface.get_width () * surface.get_height (),
numpy.uint8)
array.fill (0xff)
array.shape = surface.get_width (), surface.get_height ()
return array
# Taken from from Alex Holkner's pygame-ctypes package. Thanks a
# lot.
array = array2d (surface)
# Check each pixel value for the colorkey and mark it as opaque or
# transparent as needed.
val = surface.map_rgb (colorkey)
array = numpy.choose (numpy.equal (array, val),
(numpy.uint8 (0xff), numpy.uint8 (0)))
array.shape = surface.get_width (), surface.get_height ()
return array