本文整理汇总了Python中sharedmem.empty函数的典型用法代码示例。如果您正苦于以下问题:Python empty函数的具体用法?Python empty怎么用?Python empty使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了empty函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main(A):
""" match the mean fraction by fixing prefactor A(a) and B(a) on tau
requires 'gaussian' to be finished.
run before convolve, though it uses functions in convolve for evaluating
the cost function.
"""
global meanfractionmodel
global varfractionmodel
varfractionmodel = VarFractionModel(A)
meanfractionmodel = MeanFractionModel(A)
Nbins = 8
zBins = numpy.linspace(2.0, 4.0, Nbins + 1, endpoint=True)
LogLamBins = numpy.log10(1216.0 * (1 + zBins ))
z = 0.5 * (zBins[1:] + zBins[:-1])
Af = sharedmem.empty(z.shape)
Bf = sharedmem.empty(z.shape)
xmeanF = sharedmem.empty(z.shape)
xstdF = sharedmem.empty(z.shape)
def work(i):
if i > 0:
Afguess, Bfguess = Af[i-1], Bf[i-1]
else:
Afguess, Bfguess = (0.00015, 1.5)
Af[i], Bf[i], xmeanF[i], xstdF[i] = fitRange(A, LogLamBins[i], LogLamBins[i + 1],
Afguess, Bfguess)
map(work, range(Nbins))
numpy.savez(A.MatchMeanFractionOutput, a=1 / (z+1),
Af=Af, Bf=Bf, xmeanF=xmeanF, xvarF=xstdF ** 2)
示例2: __init__
def __init__(self, face_width, res_x, res_y, f_x, f_y, fan_position, visualize=False):
# Initialize multiprocessing.Process parent
multiprocessing.Process.__init__(self)
# Exit event for stopping process
self._exit = multiprocessing.Event()
# Event that is set, everytime a new servo angle position has been computed
self.newposition_event = multiprocessing.Event()
# An array in shared memory for storing the current face position
self._currentface = sharedmem.empty((4, 1), dtype='int16')
# An array in shared memory for storing the current position angles
self._currentangles = sharedmem.empty((2, 1), dtype='float')
self._facewidth = face_width
self._res_x = res_x
self._res_y = res_y
self._f_x = f_x
self._f_y = f_y
self._fan_position = fan_position
# Defines whether to visualize the servo angles position
self._visualize = visualize
示例3: test_memory_type
def test_memory_type():
a = sharedmem.empty(100)
b = sharedmem.empty(100)
assert isinstance(b, type(a))
assert not isinstance(a + 10, type(a))
assert not isinstance(numpy.sum(a), type(a))
assert not isinstance(a + b, type(a))
assert not isinstance(a * b, type(a))
示例4: __init__
def __init__(self, config):
def getfilename(mock):
dir = os.path.join(config.prefix, mock)
paramfile = os.path.join(config.prefix, mock, 'paramfile')
c = Config(paramfile, basedir=dir)
return os.path.join(c.datadir, 'bootstrap.npz')
if config.UseMocks is None:
filenames = sorted(list(glob(os.path.join(config.prefix, '[0-9]*', '*',
'bootstrap.npz'))))
else:
filenames = [getfilename(mock) for mock in config.UseMocks]
files = [ numpy.load(f) for f in filenames]
print 'using', len(filenames), ' files', filenames
self.r = files[0]['r']
self.mu = files[0]['mu']
# b/c they all have the same cosmology
self.eigenmodes = EigenModes(numpy.load(config.EigenModesOutput)['eigenmodes'])
self.DQDQ, self.RQDQ, self.RQRQ = sharedmem.empty(
[3, len(files)] + list(files[0]['DQDQ'].shape))
self.DQDFsum1, self.RQDFsum1, self.DFDFsum1 = sharedmem.empty(
[3, len(files)] + list(files[0]['DQDQ'].shape))
self.DQDFsum2, self.RQDFsum2, self.DFDFsum2 = sharedmem.empty(
[3, len(files)] + list(files[0]['DQDQ'].shape))
self.ND, self.NR = sharedmem.empty([2, len(files)] +
list(files[0]['Qchunksize'].shape))
def read(i):
file = files[i]
self.DQDQ[i] = file['DQDQ']
self.RQDQ[i] = file['RQDQ']
self.RQRQ[i] = file['RQRQ']
self.DQDFsum1[i] = file['DQDFsum1'][0]
self.RQDFsum1[i] = file['RQDFsum1'][0]
self.DFDFsum1[i] = file['DFDFsum1'][0]
self.DQDFsum2[i] = file['DQDFsum2']
self.RQDFsum2[i] = file['RQDFsum2']
self.DFDFsum2[i] = file['DFDFsum2']
self.ND[i] = file['Qchunksize']
self.NR[i] = file['Rchunksize']
chunkmap(read, range(len(files)), 1)
self.Nchunks = self.DQDQ[0].shape[0]
# build the currelation function on the first sample
# use it as a template
self.dummy = self(0)
示例5: getforest
def getforest(A, Zmin, Zmax, RfLamMin, RfLamMax, combine=1):
spectra = SpectraOutput(A)
meanFred = MeanFractionMeasured(A, kind='red')
meanFreal = MeanFractionMeasured(A, kind='real')
combine = numpy.minimum(spectra.sightlines.Npixels.max(), combine)
# will combine every this many pixels
Npixels1 = spectra.sightlines.Npixels // combine
Offset1 = numpy.concatenate([[0], numpy.cumsum(Npixels1)])
Npixels = Npixels1.sum()
print Npixels1.min(), Npixels1.max()
print spectra.sightlines.Npixels.min(), spectra.sightlines.Npixels.max()
data = sharedmem.empty(Npixels, ('f4', 3))
DFred, DFreal, Delta = data.T
pos = sharedmem.empty(Npixels, ('f4', 3))
x, y, z = pos.T
mask = sharedmem.empty(Npixels, '?')
id = sharedmem.empty(Npixels, 'i4')
spectra.taured
spectra.taureal
def work(i):
def combinepixels(value, method=numpy.mean):
# reduce the number of pixels with 'method'
return \
method(value[:Npixels1[i] * combine]\
.reshape([Npixels1[i]] + [combine]),
axis=-1)
sl = slice(Offset1[i], Npixels1[i] + Offset1[i])
a = spectra.a[i]
Fred = numpy.exp(-spectra.taured[i]) / meanFred(a) - 1
Freal = numpy.exp(-spectra.taureal[i]) / meanFreal(a) - 1
DFred[sl] = combinepixels(Fred)
DFreal[sl] = combinepixels(Freal)
Delta[sl] = combinepixels(spectra.delta[i])
p = spectra.position(i)
x[sl] = combinepixels(p[:, 0])
y[sl] = combinepixels(p[:, 1])
z[sl] = combinepixels(p[:, 2])
m = spectra.z[i] > Zmin
m &= spectra.z[i] < Zmax
m &= spectra.RfLam(i) > RfLamMin
m &= spectra.RfLam(i) < RfLamMax
mask[sl] = combinepixels(m, method=numpy.all)
id[sl] = i
chunkmap(work, range(len(spectra)), 100)
return data[mask], pos[mask], id[mask]
示例6: main
def main(config):
global cov
DB = BootstrapDB(config)
MASK = DB.dummy.imesh >= 0
MASK &= DB.dummy.rmesh <= config.rmax
MASK &= DB.dummy.rmesh >= config.rmin
print "dof in fitting", MASK.sum()
# create a dummy to test the fitting
p0 = [-0.2, 3.5, 1.5, 1.5]
eigenmodes = DB.eigenmodes
dummy = eigenmodes(p0)
covfull = numpy.load(config.CovarianceMatrixOutput)["cov"]
cov = covfull[MASK][:, MASK]
print "inverting"
INV = linalg.inv(covfull[MASK][:, MASK])
print "inverted"
x, chi = fit1(dummy, eigenmodes, INV, MASK)
print "x =", x
print "p0 = bF, bQ, BF, BQ", p0
error = poles_err(dummy, covfull)
fitted = sharedmem.empty((len(DB), len(p0)))
chi = sharedmem.empty((len(DB)))
samples, models = [], []
sharedmem.set_debug(True)
def work(i):
sample = DB(i)
print "fitting", i
fitted[i], chi[i] = fit1(sample, eigenmodes, INV, MASK)
model = eigenmodes(fitted[i])
print zip(sample[0].monopole, model[0].monopole)
return i, sample, model
def reduce(rt):
i, s, m = rt
samples.append((i, s))
models.append((i, m))
chunkmap(work, range(len(DB)), 100, reduce=reduce)
samples = [s for i, s in sorted(samples)]
models = [s for i, s in sorted(models)]
numpy.savez("fit.npz", samples=samples, models=models, fittedparameters=fitted, chi=chi, error=error)
示例7: argsort
def argsort(ar):
min = minimum.reduce(ar)
max = maximum.reduce(ar)
nchunk = sharedmem.cpu_count() * 2
#bins = numpy.linspace(min, max, nchunk, endpoint=True)
step = 1.0 * (max - min) / nchunk
bins = numpy.array(
1.0 * numpy.arange(nchunk + 1) * (max - min) / nchunk + min,
min.dtype)
dig = digitize(ar, bins)
binlength = bincount(dig, minlength=len(bins) + 1)
binoffset = numpy.cumsum(binlength)
out = sharedmem.empty(len(ar), dtype='intp')
with sharedmem.MapReduce() as pool:
def work(i):
# we can do this a lot faster
# but already having pretty good speed.
ind = numpy.nonzero(dig == i + 1)[0]
myar = ar[ind]
out[binoffset[i]:binoffset[i+1]] = ind[myar.argsort()]
pool.map(work, range(nchunk))
return out
示例8: call
def call(self, args, axis=0, out=None, chunksize=1024 * 1024, **kwargs):
""" axis is the axis to chop it off.
if self.altreduce is set, the results will
be reduced with altreduce and returned
otherwise will be saved to out, then return out.
"""
if self.altreduce is not None:
ret = [None]
else:
if out is None :
if self.outdtype is not None:
dtype = self.outdtype
else:
try:
dtype = numpy.result_type(*[args[i] for i in self.ins] * 2)
except:
dtype = None
out = sharedmem.empty(
numpy.broadcast(*[args[i] for i in self.ins] * 2).shape,
dtype=dtype)
if axis != 0:
for i in self.ins:
args[i] = numpy.rollaxis(args[i], axis)
out = numpy.rollaxis(out, axis)
size = numpy.max([len(args[i]) for i in self.ins])
with sharedmem.MapReduce() as pool:
def work(i):
sl = slice(i, i+chunksize)
myargs = args[:]
for j in self.ins:
try:
tmp = myargs[j][sl]
a, b, c = sl.indices(len(args[j]))
myargs[j] = tmp
except Exception as e:
print tmp
print j, e
pass
if b == a: return None
rt = self.ufunc(*myargs, **kwargs)
if self.altreduce is not None:
return rt
else:
out[sl] = rt
def reduce(rt):
if self.altreduce is None:
return
if ret[0] is None:
ret[0] = rt
elif rt is not None:
ret[0] = self.altreduce(ret[0], rt)
pool.map(work, range(0, size, chunksize), reduce=reduce)
if self.altreduce is None:
if axis != 0:
out = numpy.rollaxis(out, 0, axis + 1)
return out
else:
return ret[0]
示例9: test_critical
def test_critical():
t = sharedmem.empty((), dtype='i8')
t[...] = 0
# FIXME: if the system has one core then this will never fail,
# even if the critical section is not
with sharedmem.MapReduce(np=8) as pool:
def work(i):
with pool.critical:
t[...] = 1
if i != 30:
time.sleep(0.01)
assert_equal(t, 1)
t[...] = 0
pool.map(work, range(16))
def work(i):
t[...] = 1
if i != 30:
time.sleep(0.01)
assert_equal(t, 1)
t[...] = 0
try:
pool.map(work, range(16))
except sharedmem.SlaveException as e:
assert isinstance(e.reason, AssertionError)
return
raise AssertionError("Shall not reach here.")
示例10: generate_shared_array
def generate_shared_array(unshared_arr,dtype):
r"""Creates synchronized shared arrays from numpy arrays.
The function takes a numpy array `unshared_arr` and returns a shared
memory object, `shared_arr`. The user also specifies the data-type of
the values in the array with the `dataType` argument. See
multiprocessing.Array and ctypes for details on shared memory arrays and
the data-types.
Parameters
----------
unshared_arr : ndarray
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
dtype : ctypes instance
The data-type specificed has to be an instance of the ctypes library.
See ctypes for details.
Returns
-------
shared_arr : synchronized shared array
An array that is read accessible from multiple processes/threads.
"""
shared_arr = sharedmem.empty(unshared_arr.shape, dtype=dtype)
shared_arr[:] = unshared_arr[:]
return shared_arr
示例11: main
def main(config):
DB = BootstrapDB(config)
Ndof = len(DB.dummy.compress())
if mpicov.world.rank == 0:
numpy.random.seed(9999)
seeds = numpy.random.randint(low=0, high=99999999999, size=config.BigN)
else:
seeds = []
myseeds = mpicov.world.scatter(numpy.array_split(seeds, mpicov.world.size))
print 'This Task = ', mpicov.world.rank, 'Number of samples = ', \
len(myseeds), 'seed0 =', myseeds[0]
myxi = sharedmem.empty((len(myseeds), Ndof), dtype='f8')
def work(i):
rng = numpy.random.RandomState(myseeds[i])
choice = rng.choice(len(DB), size=DB.Nchunks)
sample = DB(choice)
myxi[i][...] = sample.compress()
print 'build samples'
chunkmap(work, range(len(myxi)), 100)
print 'done samples'
print 'covariance matrix'
cov = mpicov.cov(myxi, rowvar=0, ddof=0)
print 'done covariance matrix'
print numpy.nanmin(numpy.diag(cov))
if mpicov.world.rank == 0:
numpy.savez(config.CovarianceMatrixOutput, cov=cov,
BigN=config.BigN, dummy=DB.dummy,
xi_cov=DB.dummy.copy().uncompress(myxi[0]), r=DB.r, mu=DB.mu)
示例12: create_video_pipe
def create_video_pipe(video, name=None, read_ahead=False):
""" creates the two ends of a video pipe.
The typical use case is
def worker_process(self, video):
''' worker process processing a video '''
expensive_function(video)
if __name__ == '__main__':
# load a video file
video = VideoFile('test.mov')
# create the video pipe
sender, receiver = create_video_pipe(video)
# create the worker process
proc = multiprocessing.Process(target=worker_process,
args=(receiver,))
proc.start()
sender.start()
"""
# create the pipe used for communication
pipe_sender, pipe_receiver = mp.Pipe(duplex=True)
# create the buffer in memory that is used for passing frames
frame_buffer = sharedmem.empty(video.shape[1:], np.uint8)
# create the two ends of the video pipe
sender = VideoPipeSender(video, pipe_sender, frame_buffer,
name, read_ahead)
receiver = VideoPipeReceiver(pipe_receiver, frame_buffer,
video.video_format, name)
return sender, receiver
示例13: test_local
def test_local():
t = sharedmem.empty(800)
with sharedmem.MapReduce(np=4) as pool:
def work(i):
time.sleep(0.1 * numpy.random.uniform())
with pool.ordered:
t[i] = pool.local.rank
pool.map(work, range(800))
assert_equal(numpy.unique(t), range(4))
示例14: test_sum
def test_sum():
"""
Integrate [0, ... 1.0) with rectangle rule.
Compare results from
1. direct sum of 'xdx' (filled by subprocesses)
2. 'shmsum', cummulated by partial sums on each process
3. sum of partial sums from each process.
"""
xdx = sharedmem.empty(1024 * 1024 * 128, dtype='f8')
shmsum = sharedmem.empty((), dtype='f8')
shmsum[...] = 0.0
with sharedmem.MapReduce() as pool:
def work(i):
s = slice (i, i + chunksize)
start, end, step = s.indices(len(xdx))
dx = 1.0 / len(xdx)
myxdx = numpy.arange(start, end, step) \
* 1.0 / len(xdx) * dx
xdx[s] = myxdx
a = xdx[s].sum(dtype='f8')
with pool.critical:
shmsum[...] += a
return i, a
def reduce(i, a):
# print('chunk', i, 'done', 'local sum', a)
return a
chunksize = 1024 * 1024
r = pool.map(work, range(0, len(xdx), chunksize), reduce=reduce)
assert_almost_equal(numpy.sum(r, dtype='f8'), shmsum)
assert_almost_equal(numpy.sum(xdx, dtype='f8'), shmsum)
示例15: __init__
def __init__(self, x, y, scale_factor=1.1, minsize=(60, 60),
classifier='haarcascade_frontalface_alt2.xml',
use_lowpass=True, lowpass_rc=50,
visualize=False):
# Initialize multiprocessing.Process parent
multiprocessing.Process.__init__(self)
# Exit event for stopping process
self._exit = multiprocessing.Event()
# Event that is set, everytime a face is detected
self.newface_event = multiprocessing.Event()
# Event that pauses the main loop if set
self._pause_event = multiprocessing.Event()
# An array in shared memory to store the current image frame
self._currentframe = sharedmem.empty((y, x), dtype='uint8')
# Set camera parameters
self._x = x
self._y = y
# Set parameters for face detection algorithm
self._scale_factor = scale_factor
self._minsize = minsize
self._classifier_file = classifier
self._use_lowpass = use_lowpass
self._lowpass_rc = lowpass_rc
# Defines whether to visualize the camera output
self._visualize = visualize
# A tuple for storing the current width and height of a face
self._currentface = sharedmem.empty((4, 1), dtype='float')
# A tuple for storing the last width and height of a face
self._lastface = sharedmem.empty((4, 1), dtype='float')
# Setup a multiscale classifier
self._classifier = cv2.CascadeClassifier(self._classifier_file)