本文整理汇总了Python中multiprocessing.Array类的典型用法代码示例。如果您正苦于以下问题:Python Array类的具体用法?Python Array怎么用?Python Array使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Array类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: steps_multiprocessing
def steps_multiprocessing(self, number_of_steps, plot, plot_every_n):
"""
Equal to take_steps but using multiprocesing.
Parameters
----------
number_of_steps : float
Total number of time steps.
plot : object
make_plot Object.
plot_every_n : float
Every few time steps are going on a plot.
"""
shared_array_base = Array(ctypes.c_double, len(self.bodies)*2)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = shared_array.reshape(2, len(self.bodies))
counter = Value(ctypes.c_int64, 0)
end_plot = Value(ctypes.c_int8, 1)
old_counter = 1
rk_fun = Process(target = self.rk_fun_task, args=(number_of_steps, plot_every_n, shared_array, end_plot, counter))
plot_fun = Process(target = self.plot_fun_task, args=(old_counter, shared_array, end_plot, counter, plot))
rk_fun.start()
plot_fun.start()
rk_fun.join()
plot_fun.join()
示例2: get_predict
def get_predict(args, ortho, model):
xp = cuda.cupy if args.gpu >= 0 else np
args.h_limit, args.w_limit = ortho.shape[0], ortho.shape[1]
args.canvas_h = args.h_limit
args.canvas_w = args.w_limit
# to share 'canvas' between different threads
canvas_ = Array(ctypes.c_float, args.canvas_h * args.canvas_w * args.channels)
canvas = np.ctypeslib.as_array(canvas_.get_obj())
canvas = canvas.reshape((args.canvas_h, args.canvas_w, args.channels))
# prepare queues and threads
patch_queue = Queue(maxsize=5)
preds_queue = Queue()
patch_worker = Process(target=create_minibatch, args=(args, ortho, patch_queue))
canvas_worker = Process(target=tile_patches, args=(args, canvas, preds_queue))
patch_worker.start()
canvas_worker.start()
while True:
minibatch = patch_queue.get()
if minibatch is None:
break
minibatch = Variable(xp.asarray(minibatch, dtype=xp.float32), volatile=True)
preds = model(minibatch, None).data
if args.gpu >= 0:
preds = xp.asnumpy(preds)
[preds_queue.put(pred) for pred in preds]
preds_queue.put(None)
patch_worker.join()
canvas_worker.join()
return canvas
示例3: conv_single_image
def conv_single_image(image):
shared_array_base = Array(ctypes.c_double, image.size)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = shared_array.reshape(image.shape)
shared_array[:] = image
return shared_array
示例4: run
def run(args):
# create dummy environment to be able to create model
env = gym.make(args.environment)
assert isinstance(env.observation_space, Box)
assert isinstance(env.action_space, Discrete)
print("Observation space:", env.observation_space)
print("Action space:", env.action_space)
# create main model
model = create_model(env, args)
model.summary()
env.close()
# for better compatibility with Theano and Tensorflow
multiprocessing.set_start_method('spawn')
# create shared buffer for sharing weights
blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
shared_buffer = Array('c', len(blob))
shared_buffer.raw = blob
# force runner processes to use cpu
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# create fifos and threads for all runners
fifos = []
for i in range(args.num_runners):
fifo = Queue(args.queue_length)
fifos.append(fifo)
process = Process(target=runner, args=(shared_buffer, fifo, args))
process.start()
# start trainer in main thread
trainer(model, fifos, shared_buffer, args)
示例5: shared_np_array
def shared_np_array(shape, data=None, integer=False):
"""Create a new numpy array that resides in shared memory.
Parameters
----------
shape : tuple of ints
The shape of the new array.
data : numpy.array
Data to copy to the new array. Has to have the same shape.
integer : boolean
Whether to use an integer array. Defaults to False which means
float array.
"""
size = np.prod(shape)
if integer:
array = Array(ctypes.c_int64, int(size))
np_array = np.frombuffer(array.get_obj(), dtype="int64")
else:
array = Array(ctypes.c_double, int(size))
np_array = np.frombuffer(array.get_obj())
np_array = np_array.reshape(shape)
if data is not None:
if len(shape) != len(data.shape):
raise ValueError("`data` must have the same dimensions"
"as the created array.")
same = all(x == y for x, y in zip(shape, data.shape))
if not same:
raise ValueError("`data` must have the same shape"
"as the created array.")
np_array[:] = data
return np_array
示例6: initialize
def initialize(self):
# Create thread safe arrays.
self.prev_values = Array('d', self.prev_values, lock=False)
self.next_values = Array('d', self.next_values, lock=False)
for key in self.records:
self.records[key] = manager.list()
for key in self.spikes:
self.spikes[key] = manager.list()
示例7: __init__
class SynapseEnvironment:
def __init__(self, noise=0.0):
def beta(maximum, rate=1.0):
return betav(maximum, noise=noise, rate=rate)
self.beta = beta
self.prev_concentrations = []
self.next_concentrations = []
def initialize(self):
# Create thread safe arrays.
self.prev_concentrations = Array('d', self.prev_concentrations, lock=False)
self.next_concentrations = Array('d', self.next_concentrations, lock=False)
self.dirty = Value('b', True, lock=False)
def register(self, baseline_concentration):
pool_id = len(self.prev_concentrations)
self.prev_concentrations.append(baseline_concentration)
self.next_concentrations.append(baseline_concentration)
return pool_id
def get_concentration(self, pool_id):
try: self.dirty
except: self.initialize()
return self.prev_concentrations[pool_id]
def set_concentration(self, pool_id, new_concentration):
try: self.dirty.value = True
except: self.initialize()
self.next_concentrations[pool_id] = new_concentration
def add_concentration(self, pool_id, molecules):
try: self.dirty.value = True
except: self.initialize()
self.next_concentrations[pool_id] += molecules
def remove_concentration(self, pool_id, molecules):
try: self.dirty.value = True
except: self.initialize()
self.next_concentrations[pool_id] -= molecules
self.next_concentrations[pool_id] = \
max(0.0, self.next_concentrations[pool_id])
def step(self):
"""
Cycles the environment.
Returns whether the environment is stable (not dirty, no changes)
"""
try: self.dirty
except: self.initialize()
if self.dirty.value:
self.dirty.value = False
for i in xrange(len(self.prev_concentrations)):
self.prev_concentrations[i]=self.next_concentrations[i]
return False
else: return True
示例8: __init__
def __init__(self, size=1000, data_type="int32"):
self.data_type = data_type
self.head = Value("i", 0)
self.ring_buffer = Array(data_type[0], range(size))
self.size = size
for i in range(size):
self.ring_buffer[i] = 0 # probably really slow but not done often
示例9: calculatePearsonCorrelationMatrixMultiprocessing
def calculatePearsonCorrelationMatrixMultiprocessing(matrix, axis=0, symmetrical=True, getpvalmat=False):
if axis == 1:
matrix = matrix.T
nRows = matrix.shape[0]
# create shared array that can be used from multiple processes
output_r_arr = Array(ctypes.c_double, matrix.shape[0] * matrix.shape[0])
# then in each new process create a new numpy array using:
output_r = np.frombuffer(output_r_arr.get_obj()) # mp_arr and arr share the same memory
# make it two-dimensional
output_r = output_r.reshape((matrix.shape[0], matrix.shape[0])) # b and arr share the same memory
# output_r = np.zeros((nRows,nRows)) # old version
output_p_arr = Array(ctypes.c_double, matrix.shape[0] * matrix.shape[0])
output_p = np.frombuffer(output_p_arr.get_obj())
output_p = output_p.reshape((matrix.shape[0], matrix.shape[0]))
print 'Calculating Pearson R for each row, multithreaded'
print mp.cpu_count(), 'processes in pool'
pool = None
try:
pool = mp.Pool(mp.cpu_count(),
initializer=_init_pool,
initargs=(matrix, output_r_arr, output_p_arr,
nRows, symmetrical))
# bar = tqdm(total=nRows*nRows/2)
# tqdm.write('Calculating Pearson R for each row, multithreaded')
for result in tqdm(pool.imap_unordered(_f, range(0, nRows)), total=nRows):
# bar.update(result)
pass
# bar.close()
finally: # To make sure processes are closed in the end, even if errors happen
pool.close()
pool.join()
print output_r
if getpvalmat:
return output_r, output_p
else:
return output_r
示例10: initialize
def initialize(self, nChannels, nSamples, windowSize=1, nptype='float32'):
'''
Initializes the buffer with a new raw array
Parameters
----------
nChannels : int
dimensionality of a single sample
nSamples : int
the buffer capacity in samples
windowSize : int, optional
optional, the size of the window to be used for reading the
data. The pocket of the this size will be created
nptype : string, optional
the type of the data to be stored
'''
self.__initialized = True
# checking parameters
if nChannels < 1:
self.logger.warning('nChannels must be a positive integer, setting to 1')
nChannels = 1
if nSamples < 1:
self.logger.warning('nSamples must be a positive integer, setting to 1')
nSamples = 1
if windowSize < 1:
self.logger.warning('wondowSize must be a positive integer, setting to 1')
windowSize = 1
# initializing
sizeBytes = c.sizeof(BufferHeader) + \
(nSamples + windowSize) * nChannels * np.dtype(nptype).itemsize
raw = Array('c', sizeBytes)
hdr = BufferHeader.from_buffer(raw.get_obj())
hdr.bufSizeBytes = nSamples * nChannels * np.dtype(nptype).itemsize
hdr.pocketSizeBytes = windowSize * nChannels * np.dtype(nptype).itemsize
hdr.dataType = datatypes.get_code(nptype)
hdr.nChannels = nChannels
hdr.nSamplesWritten = 0
self.initialize_from_raw(raw.get_obj())
示例11: _calculate_phi
def _calculate_phi(self, x):
C = self.workers
neurons = self.neurons
mu = self.mu
sigmas = self.sigmas
phi = self.phi = None
n = self.n
def heavy_lifting(c, phi):
s = jobs[c][1] - jobs[c][0]
for k, i in enumerate(xrange(jobs[c][0], jobs[c][1])):
for j in xrange(neurons):
# phi[i, j] = metrics(x[i,:], mu[j])**3)
# phi[i, j] = plateSpine(x[i,:], mu[j]))
# phi[i, j] = invMultiQuadric(x[i,:], mu[j], sigmas[j]))
phi[i, j] = multiQuadric(x[i,:], mu[j], sigmas[j])
# phi[i, j] = gaussian(x[i,:], mu[j], sigmas[j]))
if k % 1000 == 0:
percent = true_divide(k, s)*100
print c, ': {:2.2f}%'.format(percent)
print c, ': Done'
# distributing the work between 4 workers
shared_array = Array(c_double, n * neurons)
phi = frombuffer(shared_array.get_obj())
phi = phi.reshape((n, neurons))
jobs = []
workers = []
p = n / C
m = n % C
for c in range(C):
jobs.append((c*p, (c+1)*p + (m if c == C-1 else 0)))
worker = Process(target = heavy_lifting, args = (c, phi))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
return phi
示例12: initialize
def initialize(self, n_channels, n_samples, np_dtype='float32'):
"""Initializes the buffer with a new array."""
logger.debug('Initializing {}x{} {} buffer.'.format(n_channels, n_samples, np_dtype))
# check parameters
if n_channels < 1 or n_samples < 1:
logger.error('n_channels and n_samples must be a positive integer')
raise SharedBufferError(1)
size_bytes = ct.sizeof(SharedBufferHeader) + n_samples * n_channels * np.dtype(np_dtype).itemsize
raw = Array('c', size_bytes)
hdr = SharedBufferHeader.from_buffer(raw.get_obj())
hdr.bufSizeBytes = size_bytes - ct.sizeof(SharedBufferHeader)
hdr.dataType = DataTypes.get_code(np_dtype)
hdr.nChannels = n_channels
hdr.nSamples = n_samples
hdr.position = 0
self.initialize_from_raw(raw.get_obj())
示例13: load_data
def load_data(args, input_q, minibatch_q):
c = args.channel
s = args.size
d = args.joint_num * 2
input_data_base = Array(ctypes.c_float, args.batchsize * c * s * s)
input_data = np.ctypeslib.as_array(input_data_base.get_obj())
input_data = input_data.reshape((args.batchsize, c, s, s))
label_base = Array(ctypes.c_float, args.batchsize * d)
label = np.ctypeslib.as_array(label_base.get_obj())
label = label.reshape((args.batchsize, d))
x_queue, o_queue = Queue(), Queue()
workers = [Process(target=transform,
args=(args, x_queue, args.datadir, args.fname_index,
args.joint_index, o_queue))
for _ in range(args.batchsize)]
for w in workers:
w.start()
while True:
x_batch = input_q.get()
if x_batch is None:
break
# data augmentation
for x in x_batch:
x_queue.put(x)
j = 0
while j != len(x_batch):
a, b = o_queue.get()
input_data[j] = a
label[j] = b
j += 1
minibatch_q.put([input_data, label])
for _ in range(args.batchsize):
x_queue.put(None)
for w in workers:
w.join()
示例14: __init__
def __init__(self):
self.all_curves = Listing.index_all_curves()
index_file = open ("outputs/index_file.txt","w")
for index, item in enumerate(self.all_curves):
index_file.write("%i,%s" % (index, str(item)))
index_file.close()
self.n = len(self.all_curves)
self.total_costs_matrix_base = Array(ctypes.c_double, self.n*self.n)
self.total_costs_matrix = numpy.ctypeslib.as_array(
self.total_costs_matrix_base.get_obj())
self.total_costs_matrix = self.total_costs_matrix.reshape(self.n,self.n)
示例15: test_continuous_send_dialog
def test_continuous_send_dialog(self):
self.add_signal_to_form("esaver.coco")
self.__add_first_signal_to_generator()
port = self.get_free_port()
gframe = self.form.generator_tab_controller
expected = np.zeros(gframe.total_modulated_samples, dtype=np.complex64)
expected = gframe.modulate_data(expected)
current_index = Value("L", 0)
buffer = Array("f", 4 * len(expected))
process = Process(target=receive, args=(port, current_index, 2 * len(expected), buffer))
process.daemon = True
process.start()
time.sleep(1) # ensure server is up
ContinuousModulator.BUFFER_SIZE_MB = 10
continuous_send_dialog = self.__get_continuous_send_dialog()
continuous_send_dialog.device.set_client_port(port)
continuous_send_dialog.device_settings_widget.ui.spinBoxNRepeat.setValue(2)
continuous_send_dialog.ui.btnStart.click()
QTest.qWait(1000)
time.sleep(1)
process.join(1)
# CI sometimes swallows a sample
self.assertGreaterEqual(current_index.value, len(expected) - 1)
buffer = np.frombuffer(buffer.get_obj(), dtype=np.complex64)
for i in range(len(expected)):
self.assertEqual(buffer[i], expected[i], msg=str(i))
continuous_send_dialog.ui.btnStop.click()
continuous_send_dialog.ui.btnClear.click()
QTest.qWait(1)
continuous_send_dialog.close()