本文整理汇总了Python中multiprocessing.Array方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Array方法的具体用法?Python multiprocessing.Array怎么用?Python multiprocessing.Array使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.Array方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_chain_sampling_multidim_model
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_chain_sampling_multidim_model(self):
"""Test that sampling from DREAM history for multi-dimensional model when the history is known matches with expected possible samples."""
self.params, self.like = multidmodel()
model = Model(likelihood=self.like, sampled_parameters=self.params)
dream = Dream(model=model)
history_arr = mp.Array('d', [0]*2*dream.total_var_dimension)
n = mp.Value('i', 0)
pydream.Dream_shared_vars.history = history_arr
pydream.Dream_shared_vars.count = n
chains_added_to_history = []
for i in range(2):
start = i*dream.total_var_dimension
end = start+dream.total_var_dimension
chain = dream.draw_from_prior(model.sampled_parameters)
pydream.Dream_shared_vars.history[start:end] = chain
chains_added_to_history.append(chain)
sampled_chains = dream.sample_from_history(nseedchains=2, DEpairs=1, ndimensions=dream.total_var_dimension)
sampled_chains = np.array(sampled_chains)
chains_added_to_history = np.array(chains_added_to_history)
self.assertIs(np.array_equal(chains_added_to_history[chains_added_to_history[:,0].argsort()], sampled_chains[sampled_chains[:,0].argsort()]), True)
示例2: test_history_recording_simple_model
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_history_recording_simple_model(self):
"""Test that history in memory matches with that recorded for test one-dimensional model."""
self.param, self.like = onedmodel()
model = Model(self.like, self.param)
step = Dream(model=model, model_name='test_history_recording')
history_arr = mp.Array('d', [0]*4*step.total_var_dimension)
n = mp.Value('i', 0)
nchains = mp.Value('i', 3)
pydream.Dream_shared_vars.history = history_arr
pydream.Dream_shared_vars.count = n
pydream.Dream_shared_vars.nchains = nchains
test_history = np.array([[1], [3], [5], [7]])
for chainpoint in test_history:
for point in chainpoint:
step.record_history(nseedchains=0, ndimensions=step.total_var_dimension, q_new=point, len_history=len(history_arr))
history_arr_np = np.frombuffer(pydream.Dream_shared_vars.history.get_obj())
history_arr_np_reshaped = history_arr_np.reshape(np.shape(test_history))
self.assertIs(np.array_equal(history_arr_np_reshaped, test_history), True)
remove('test_history_recording_DREAM_chain_history.npy')
remove('test_history_recording_DREAM_chain_adapted_crossoverprob.npy')
remove('test_history_recording_DREAM_chain_adapted_gammalevelprob.npy')
示例3: test_history_recording_multidim_model
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_history_recording_multidim_model(self):
"""Test that history in memory matches with that recorded for test multi-dimensional model."""
self.param, self.like = multidmodel()
model = Model(self.like, self.param)
dream = Dream(model=model, model_name='test_history_recording')
history_arr = mp.Array('d', [0]*4*dream.total_var_dimension*3)
n = mp.Value('i', 0)
nchains = mp.Value('i', 3)
pydream.Dream_shared_vars.history = history_arr
pydream.Dream_shared_vars.count = n
pydream.Dream_shared_vars.nchains = nchains
test_history = np.array([[[1, 2, 3, 4], [3, 4, 5, 6], [5, 6, 7, 8]], [[7, 8, 9, 10], [9, 12, 18, 20], [11, 14, 18, 8]], [[13, 14, 18, 4], [15, 17, 11, 8], [17, 28, 50, 4]], [[19, 21, 1, 18], [21, 19, 19, 11], [23, 4, 3, 2]]])
for chainpoint in test_history:
for point in chainpoint:
dream.record_history(nseedchains=0, ndimensions=dream.total_var_dimension, q_new=point, len_history=len(history_arr))
history_arr_np = np.frombuffer(pydream.Dream_shared_vars.history.get_obj())
history_arr_np_reshaped = history_arr_np.reshape(np.shape(test_history))
self.assertIs(np.array_equal(history_arr_np_reshaped, test_history), True)
remove('test_history_recording_DREAM_chain_history.npy')
remove('test_history_recording_DREAM_chain_adapted_crossoverprob.npy')
remove('test_history_recording_DREAM_chain_adapted_gammalevelprob.npy')
示例4: __init__
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def __init__(self, action_dim, observation_dim, **kwargs):
# shared variable that all processes will see
self.crash_flag = Value('i', 0)
self.reset_call_flag = Value('i', 0)
# Communicator Parameters
communicator_setups = {'generic1': {'Communicator': MockCommunicator,
'kwargs': {}},
'generic2': {'Communicator': MockCommunicator,
'kwargs': {}}
}
self._uniform_array_ = np.frombuffer(Array('d', 3).get_obj(), dtype=np.float64)
super().__init__(communicator_setups=communicator_setups,
action_dim=action_dim,
observation_dim=observation_dim,
**kwargs)
示例5: test_random_state_array
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_random_state_array(self):
rand_obj = np.random.RandomState(1)
rand_state = rand_obj.get_state()
original_uniform_values = rand_obj.uniform(-1, 1, 100)
original_normal_values = rand_obj.randn(100)
rand_state_array_type, rand_state_array_size, rand_state_array = get_random_state_array(rand_state)
shared_rand_array = np.frombuffer(Array('b', rand_state_array_size).get_obj(), dtype=rand_state_array_type)
np.copyto(shared_rand_array, np.frombuffer(rand_state_array, dtype=rand_state_array_type))
new_rand_obj = np.random.RandomState()
new_rand_obj.set_state(get_random_state_from_array(shared_rand_array))
new_uniform_values = new_rand_obj.uniform(-1, 1, 100)
new_normal_values = new_rand_obj.randn(100)
assert np.all(original_uniform_values == new_uniform_values)
assert np.all(original_normal_values == new_normal_values)
示例6: array
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def array(shape, dtype=_np.float64, autolock=False):
"""Factory method for shared memory arrays supporting all numpy dtypes."""
assert _NP_AVAILABLE, "To use the shared array object, numpy must be available!"
if not isinstance(dtype, _np.dtype):
dtype = _np.dtype(dtype)
# Not bothering to translate the numpy dtypes to ctype types directly,
# because they're only partially supported. Instead, create a byte ctypes
# array of the right size and use a view of the appropriate datatype.
shared_arr = _multiprocessing.Array(
"b", int(_np.prod(shape) * dtype.alignment), lock=autolock
)
with _warnings.catch_warnings():
# For more information on why this is necessary, see
# https://www.reddit.com/r/Python/comments/j3qjb/parformatlabpool_replacement
_warnings.simplefilter("ignore", RuntimeWarning)
data = _np.ctypeslib.as_array(shared_arr).view(dtype).reshape(shape)
return data
示例7: test_array
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
示例8: test_sharedctypes
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
示例9: __init__
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def __init__(self, make_env, observation_space):
self.observation_space = observation_space
if isinstance(observation_space, gym.spaces.Box):
num_elems = len(np.array(observation_space.low).flatten())
zeros = [0] * num_elems
self._obs_buf = Array('b', zeros)
else:
self._obs_buf = None
self._pipe, other_end = Pipe()
self._proc = Process(target=self._worker,
args=(other_end,
self._obs_buf,
cloudpickle.dumps(make_env)),
daemon=True)
self._proc.start()
self._running_cmd = None
other_end.close()
self._pipe.send(('action_space', None))
self.action_space = self._get_response()
示例10: CompleteAndWriteOGMatrices
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def CompleteAndWriteOGMatrices(self, ogs, ogMatrices):
"""
ogMatrices - each matrix is a list of mp.Array (so that each represents an nSeq x nSeq matrix
"""
for iog, (og, m) in enumerate(zip(ogs, ogMatrices)):
# dendroblast scores
n = len(m)
max_og = -9e99
# Careful not to over-write a value and then attempt to try to use the old value
for i in range(n):
for j in range(i):
m[i][j] = -np.log(m[i][j] + m[j][i])
m[j][i] = m[i][j]
max_og = max(max_og, m[i][j])
self.WritePhylipMatrix(m, [g.ToString() for g in og], files.FileHandler.GetOGsDistMatFN(iog), max_og)
return ogMatrices
示例11: WritePhylipMatrix
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def WritePhylipMatrix(m, names, outFN, max_og):
"""
m - list of mp.Array (so that each represents an nSeq x nSeq matrix
"""
max_og = 1.1*max_og
sliver = 1e-6
with open(outFN, 'w') as outfile:
n = len(m)
outfile.write("%d\n" % n)
for i in range(n):
outfile.write(names[i] + " ")
# values could be -inf, these are the most distantly related so replace with max_og
V = [0. + (0. if i==j else m[i][j] if m[i][j] > -9e99 else max_og) for j in range(n)] # "0. +": hack to avoid printing out "-0"
V = [sliver if 0 < v < sliver else v for v in V] # make sure scientific notation is not used (not accepted by fastme)
values = " ".join(["%.6f" % v for v in V])
outfile.write(values + "\n")
示例12: SpeciesTreeDistances
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def SpeciesTreeDistances(self, ogs, ogMatrices, method = 0):
"""
ogMatrices - each matrix is a list of mp.Array (so that each represents an nSeq x nSeq matrix
"""
spPairs = list(itertools.combinations(self.ogSet.seqsInfo.speciesToUse, 2))
D = [[] for _ in spPairs]
if method == 0:
""" closest distance for each species pair in each orthogroup"""
for og, m in zip(ogs, ogMatrices):
spDict = defaultdict(list)
for i, g in enumerate(og):
spDict[g.iSp].append(i)
for (sp1, sp2), d_list in zip(spPairs, D):
distances = [m[i][j] for i in spDict[sp1] for j in spDict[sp2]]
if len(distances) > 0: d_list.append(min(distances))
# d_list.append(min(distances) if len(distances) > 0 else None)
return D, spPairs
示例13: test_DistanceMatrixEvalues
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_DistanceMatrixEvalues(self):
if qBinary:
self.skipTest("Skipping unit test. Test can be run on sourcecode version of OrthoFinder.")
import orthologues
m = np.zeros((2,2))
m = [mp.Array('d', [0, 1e-9, 0.1, 1]), mp.Array('d', [1e-9, 0, 1, 1]), mp.Array('d', [0.1, 1, 0, 1]), mp.Array('d', [1, 1, 1, 0])]
# m[0,1] =
# m[1,0] = 0.1
names = ["a", "b", "c", "d"]
outFN = baseDir + "Input/Distances.phy"
max_og = 1.
orthologues.DendroBLASTTrees.WritePhylipMatrix(m, names, outFN, max_og)
# read values and check they are written in the corect format
with open(outFN, 'rb') as infile:
infile.next()
line = infile.next().rstrip().split()
self.assertEqual('0.000000', line[1]) # expected format for writing 0
self.assertEqual('0.000001', line[2]) # min non-zero value. Should be writen in decimal rather than scientific format
line = infile.next().rstrip().split()
self.assertEqual('0.000001', line[1])
os.remove(outFN)
示例14: test_server_multiproc
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_server_multiproc(mocker, set_timeout, restore_signal, start_method):
mpctx = mp.get_context(start_method)
mocker.patch('aiotools.server.mp', mpctx)
started = mpctx.Value('i', 0)
terminated = mpctx.Value('i', 0)
proc_idxs = mpctx.Array('i', 3)
set_timeout(0.2, interrupt)
aiotools.start_server(myserver_multiproc, num_workers=3,
args=(started, terminated, proc_idxs))
assert started.value == 3
assert terminated.value == 3
assert list(proc_idxs) == [0, 1, 2]
assert len(mp.active_children()) == 0
示例15: test_server_multiproc_custom_stop_signals
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Array [as 别名]
def test_server_multiproc_custom_stop_signals(
mocker, set_timeout, restore_signal, start_method):
mpctx = mp.get_context(start_method)
mocker.patch('aiotools.server.mp', mpctx)
started = mpctx.Value('i', 0)
terminated = mpctx.Value('i', 0)
received_signals = mpctx.Array('i', 2)
proc_idxs = mpctx.Array('i', 2)
set_timeout(0.2, interrupt_usr1)
aiotools.start_server(myserver_multiproc_custom_stop_signals,
num_workers=2,
stop_signals={signal.SIGUSR1},
args=(started, terminated, received_signals, proc_idxs))
assert started.value == 2
assert terminated.value == 2
assert list(received_signals) == [signal.SIGUSR1, signal.SIGUSR1]
assert list(proc_idxs) == [0, 1]
assert len(mpctx.active_children()) == 0