本文整理汇总了Python中numpy.frombuffer函数的典型用法代码示例。如果您正苦于以下问题:Python frombuffer函数的具体用法?Python frombuffer怎么用?Python frombuffer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了frombuffer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _read_dig_point_struct
def _read_dig_point_struct(fid, tag, shape, rlims):
"""Read dig point struct tag."""
return dict(
kind=int(np.frombuffer(fid.read(4), dtype=">i4")),
ident=int(np.frombuffer(fid.read(4), dtype=">i4")),
r=np.frombuffer(fid.read(12), dtype=">f4"),
coord_frame=FIFF.FIFFV_COORD_UNKNOWN)
示例2: eleventhPass
def eleventhPass(idxArray, ageArray, disPops, P_Age, length, name, myPipe):
# collect sample stats
subObs = {}
subExp = {}
sampleN = myPipe.recv()
idxArray = np.frombuffer(idxArray.get_obj(), dtype=np.int32)
ageArray = np.frombuffer(ageArray.get_obj(), dtype=np.int8)
while sampleN != 'END':
# sample = np.random.choice(idxArray, length, replace=True)
randomIndexs = np.random.choice(xrange(idxArray.shape[0]), length, replace=True)
sample = idxArray[randomIndexs]
sampleAges = ageArray[randomIndexs]
counts = Counter(sample)
ageCounts = Counter(sampleAges)
for i in xrange(len(disPops)):
disPop = np.frombuffer(disPops[i].get_obj(), dtype=np.int32)
P_Dis_Age = P_Age[i]
obsSampled = set(disPop) & set(sample)
expSampled = np.sum([P_Dis_Age[k] * v for k, v in ageCounts.iteritems()])
try:
subObs[i].append(sum([counts[s] for s in obsSampled]))
subExp[i].append(expSampled)
except KeyError:
subObs[i] = [sum([counts[s] for s in obsSampled])]
subExp[i] = [expSampled]
del sample
if sampleN % 50 == 0:
sys.stdout.write('.')
sys.stdout.flush()
sampleN = myPipe.recv()
myPipe.send([subObs, subExp])
# print len(subStats[0])
return 0
示例3: get_trace
def get_trace(f, num_points, big):
"""
Get a trace from an open RNMRTK file.
Parameters
-----------
f : file object
Open file object to read from.
num_points : int
Number of points in trace (R+I)
big : bool
True for data that is big-endian, False for little-endian.
Returns
-------
trace : ndarray
Raw trace of NMR data.
"""
if big:
bsize = num_points * np.dtype('>f4').itemsize
return np.frombuffer(f.read(bsize), dtype='>f4')
else:
bsize = num_points * np.dtype('<f4').itemsize
return np.frombuffer(f.read(bsize), dtype='<f4')
示例4: test_create_with_metadata
def test_create_with_metadata(self):
for length in range(0, 1000, 3):
# Create an object id string.
object_id = random_object_id()
# Create a random metadata string.
metadata = generate_metadata(length)
# Create a new buffer and write to it.
memory_buffer = np.frombuffer(self.plasma_client.create(object_id,
length,
metadata),
dtype="uint8")
for i in range(length):
memory_buffer[i] = i % 256
# Seal the object.
self.plasma_client.seal(object_id)
# Get the object.
memory_buffer = np.frombuffer(
self.plasma_client.get_buffers([object_id])[0], dtype="uint8")
for i in range(length):
assert memory_buffer[i] == i % 256
# Get the metadata.
metadata_buffer = np.frombuffer(
self.plasma_client.get_metadata([object_id])[0], dtype="uint8")
assert len(metadata) == len(metadata_buffer)
for i in range(len(metadata)):
assert metadata[i] == metadata_buffer[i]
示例5: prepare_np_frame
def prepare_np_frame(self, shape_str, buf_str):
'''
Convert raw frame buffer to numpy array and apply warp perspective
transformation.
Emits:
frame-update : New numpy video frame available with perspective
transform applied.
'''
height, width, channels = np.frombuffer(shape_str, count=3,
dtype='uint32')
im_buf = np.frombuffer(buf_str, dtype='uint8',
count=len(buf_str)).reshape(height, width, -1)
# Warp and scale
if self.frame_shape != (width, height):
# Frame shape has changed.
old_frame_shape = self.frame_shape
self.frame_shape = width, height
self.emit('frame-shape-changed', old_frame_shape, self.frame_shape)
if self.shape is None:
self.shape = width, height
np_warped = cv2.warpPerspective(im_buf, self.transform, self.shape)
self.emit('frame-update', np_warped)
示例6: v2_apply_symmetry
def v2_apply_symmetry(self, symmetry, content):
"""
Apply a random symmetry to a v2 record.
"""
assert symmetry >= 0 and symmetry < 8
# unpack the record.
(ver, probs, planes, to_move, winner) = self.v2_struct.unpack(content)
planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8))
# We use the full length reflection tables to apply symmetry
# to all 16 planes simultaneously
planes = planes[self.full_reflection_table[symmetry]]
assert len(planes) == 19*19*16
planes = np.packbits(planes)
planes = planes.tobytes()
probs = np.frombuffer(probs, dtype=np.float32)
# Apply symmetries to the probabilities.
probs = probs[self.prob_reflection_table[symmetry]]
assert len(probs) == 362
probs = probs.tobytes()
# repack record.
return self.v2_struct.pack(ver, probs, planes, to_move, winner)
示例7: _read_data
def _read_data(self, fh, byteorder='>'):
"""Return image data from open file as numpy array."""
fh.seek(len(self.header))
data = fh.read()
dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'
depth = 1 if self.magicnum == b"P7 332" else self.depth
shape = [-1, self.height, self.width, depth]
size = functools.reduce(operator.mul, shape[1:], 1) # prod()
if self.magicnum in b"P1P2P3":
data = numpy.array(data.split(None, size)[:size], dtype)
data = data.reshape(shape)
elif self.maxval == 1:
shape[2] = int(math.ceil(self.width / 8))
data = numpy.frombuffer(data, dtype).reshape(shape)
data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]
else:
size *= numpy.dtype(dtype).itemsize
data = numpy.frombuffer(data[:size], dtype).reshape(shape)
if data.shape[0] < 2:
data = data.reshape(data.shape[1:])
if data.shape[-1] < 2:
data = data.reshape(data.shape[:-1])
if self.magicnum == b"P7 332":
rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)
rgb332 *= [36, 36, 85]
data = numpy.take(rgb332, data, axis=0)
return data
示例8: create_vectors
def create_vectors(self, verbs):
"""Create vectors with simple frequency."""
self.logger.info('Creating frequency vectors for %d features with '
'%s...', len(self.feats), verbs)
j_indices = array.array(str('i'))
indptr = array.array(str('i'))
indptr.append(0)
values = array.array(str('i'))
for verb in verbs:
verb_ngrams = verb.ngrams()
for ngram in verb_ngrams:
try:
j_indices.append(self.feats[ngram])
except KeyError:
pass
else:
values.append(verb_ngrams[ngram])
indptr.append(len(j_indices))
j_indices = np.frombuffer(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.frombuffer(values, dtype=np.intc)
dtm = sparse.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(self.feats)))
dtm.sum_duplicates()
if self.should_normalize:
normalize(dtm)
return dtm
示例9: get_cbs
def get_cbs(self, gene_id, cb_type):
if cb_type == 'ub':
return numpy.frombuffer(self.ubs[gene_id])
elif cb_type == 'lb':
return numpy.frombuffer(self.lbs[gene_id])
else:
assert False, "Unrecognized confidence bound type '%s'" % cb_type
示例10: load_data
def load_data():
"""Loads the Fashion-MNIST dataset.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = os.path.join('datasets', 'fashion-mnist')
base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
files = ['train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz']
paths = []
for file in files:
paths.append(get_file(file, origin=base + file, cache_subdir=dirname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(imgpath.read(), np.uint8,
offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(imgpath.read(), np.uint8,
offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
开发者ID:AhlamMD,项目名称:Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials,代码行数:29,代码来源:fashion_mnist.py
示例11: micro_step
def micro_step(self, adve=True, cond=True):
""" Defining microphysical step """
libopts = libcl.lgrngn.opts_t()
libopts.cond = cond
libopts.adve = adve
libopts.coal = libopts.sedi = False
self.micro.step_sync(libopts, self.state_micro["th_d"], self.state_micro["rv"], self.state_micro["rho_d"])
self.micro.step_async(libopts)
# absolute number of super-droplets per grid cell
self.micro.diag_sd_conc()
self.state_micro["sd"][:] = np.frombuffer(self.micro.outbuf())
# number of particles (per kg of dry air) with r_w < .5 um
self.micro.diag_wet_rng(0, .5e-6)
self.micro.diag_wet_mom(0)
self.state_micro["na"][:] = np.frombuffer(self.micro.outbuf())
# number of particles (per kg of dry air) with r_w > .5 um
self.micro.diag_wet_rng(.5e-6, 1)
self.micro.diag_wet_mom(0)
self.state_micro["nc"][:] = np.frombuffer(self.micro.outbuf())
# cloud water mixing ratio [kg/kg] (same size threshold as above)
self.micro.diag_wet_mom(3)
rho_H2O = 1e3
self.state_micro["rc"][:] = 4./3 * math.pi * rho_H2O * np.frombuffer(self.micro.outbuf())
示例12: bits_float
def bits_float(BYTES):
d0 = np.frombuffer(BYTES[0::3], dtype='u1').astype(float)
d1 = np.frombuffer(BYTES[1::3], dtype='u1').astype(float)
d2 = np.frombuffer(BYTES[2::3], dtype='i1').astype(float)
d0 += 256 * d1
d0 += 65536 * d2
return d0
示例13: _term_counts_to_matrix
def _term_counts_to_matrix(self, n_doc, i_indices, j_indices, values):
"""Construct COO matrix from indices and values.
i_indices and j_indices should be constructed with _make_int_array.
"""
# array("i") corresponds to np.intc, which is also what scipy.sparse
# wants for indices, so they won't be copied by the coo_matrix ctor.
# The length check works around a bug in old NumPy versions:
# http://projects.scipy.org/numpy/ticket/1943
if len(i_indices) > 0:
i_indices = np.frombuffer(i_indices, dtype=np.intc)
if len(j_indices) > 0:
j_indices = np.frombuffer(j_indices, dtype=np.intc)
if self.dtype == np.intc and len(values) > 0:
values = np.frombuffer(values, dtype=np.intc)
else:
# In Python 3.2, SciPy 0.10.1, the coo_matrix ctor won't accept an
# array.array.
values = np.asarray(values, dtype=self.dtype)
shape = (n_doc, max(six.itervalues(self.vocabulary_)) + 1)
spmatrix = sp.coo_matrix((values, (i_indices, j_indices)),
shape=shape, dtype=self.dtype)
if self.binary:
spmatrix.data.fill(1)
return spmatrix
示例14: _get_data
def _get_data(self):
if self._train:
data, label = self._train_data, self._train_label
else:
data, label = self._test_data, self._test_label
namespace = 'gluon/dataset/'+self._namespace
data_file = download(_get_repo_file_url(namespace, data[0]),
path=self._root,
sha1_hash=data[1])
label_file = download(_get_repo_file_url(namespace, label[0]),
path=self._root,
sha1_hash=label[1])
with gzip.open(label_file, 'rb') as fin:
struct.unpack(">II", fin.read(8))
label = np.frombuffer(fin.read(), dtype=np.uint8).astype(np.int32)
with gzip.open(data_file, 'rb') as fin:
struct.unpack(">IIII", fin.read(16))
data = np.frombuffer(fin.read(), dtype=np.uint8)
data = data.reshape(len(label), 28, 28, 1)
self._data = nd.array(data, dtype=data.dtype)
self._label = label
示例15: plotdata
def plotdata(self, offset, nsamples=spksamples):
f = self.datafile
f.seek(offset*nchannels*4)
data = np.frombuffer(f.read(4*nsamples*nchannels), dtype=np.float32)
nsamples = len(data) // nchannels
t = np.arange(offset, offset+nsamples)/samplingrate
axis = [t.min(), t.max(), -maxamp, maxamp]
for p in self.p:
if p: p.remove()
for p in self.spk:
p.remove()
for i in xrange(nchannels):
ax = self.ax[i]
self.p[i], = ax.plot(t, data[i::nchannels], 'k-',
scalex=False, scaley=False)
ax.axis(axis)
f = self.validationdata
f.seek(offset*4)
data = np.frombuffer(f.read(4*nsamples), dtype=np.float32)
data = np.convolve(self.filt, data)[self.filti:-self.filtj]
ax = self.ax[nchannels]
self.p[nchannels], = ax.plot(t, data, 'k-',
scalex=False, scaley=False)
ax.axis([t.min(), t.max(), -self.validationamp, self.validationamp])
return nsamples