本文整理汇总了Python中multiprocessing.Manager.dict方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.dict方法的具体用法?Python Manager.dict怎么用?Python Manager.dict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Manager
的用法示例。
在下文中一共展示了Manager.dict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: line_integration
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def line_integration(rmid):
print("Begin process for " + str(rmid))
mjd_list = map(int, os.listdir(Location.project_loca + "data/raw/" +
str(rmid)))
os.chdir(Location.project_loca + "/result/flux_of_line/")
try:
os.mkdir(str(rmid))
except OSError:
pass
pool = Pool(processes=32)
m = Manager()
lock = m.Lock()
fe2dic = m.dict()
hbetandic = m.dict()
hbetabdic = m.dict()
o3dic = m.dict()
contdic = m.dict()
func = partial(line_integration_single, rmid, lock, fe2dic, hbetandic,
hbetabdic, o3dic, contdic)
pool.map(func, mjd_list)
output_flux(rmid, dict(fe2dic), "Fe2")
output_flux(rmid, dict(hbetandic), "Hbetan")
output_flux(rmid, dict(hbetabdic), "Hbetab")
output_flux(rmid, dict(contdic), "cont")
output_flux(rmid, dict(o3dic), "O3")
pool.close()
pool.join()
示例2: build_av_tf_idf_dv
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def build_av_tf_idf_dv(docs, doc_num, model, save=True, save_file="doc_vector_tfidf.bin"):
docs = list(docs)
vectorizer = CountVectorizer()
tfidf_transformer = TfidfTransformer()
count_fv = vectorizer.fit_transform(util.word2sentence(docs))
tfidf_fv = tfidf_transformer.fit_transform(count_fv)
num_features = model.syn0.shape[1]
manager = Manager()
global_word_set = manager.dict(util.get_word_vec_dict(model))
global_vocabulary = manager.dict(vectorizer.vocabulary_);
global_doc_vector = mp.Array('d', doc_num*num_features, lock=False)
pool = mp.Pool(initializer=initprocess, initargs=[global_doc_vector])
index = 0
# test(docs[0], global_word_set, 0, doc_num, global_vocabulary, global_doc_vector, global_tfidf_fv)
for words in docs:
pool.apply_async(single_av_tf_idf_dv, [words, global_word_set, index, doc_num, global_vocabulary, tfidf_fv[index]])
index += 1
pool.close()
pool.join()
doc_vector = np.frombuffer(global_doc_vector).reshape((doc_num, num_features))
if save:
np.save(save_file, doc_vector)
return doc_vector
示例3: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def __init__(self, config):
'''*config* can be obtained from the function :func:`cloudfusion.store.sugarsync.sugarsync_store.SugarsyncStore.get_config`,
but you need to add user and password::
config = SugarsyncStore.get_config()
config['user'] = '[email protected]' #your account username/e-mail address
config['password'] = 'MySecret!23$' #your account password
Or you can use a configuration file that already has password and username set by specifying a path::
path_to_my_config_file = '/home/joe/MySugarsync.ini'
config = get_config(path_to_my_config_file)
:param config: dictionary with key value pairs'''
#self.dir_listing_cache = {}
self._logging_handler = 'sugarsync'
self.logger = logging.getLogger(self._logging_handler)
self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger)
manager = Manager()
self.path_cache = manager.dict()
# use a lock for synchronized appends
self._dir_listing_cache = manager.dict()
self._dir_listing_cache_lock = RLock()
self._last_partial_cache = manager.list()
self._time_of_last_partial_cache = 0
#error handling for authorization error
self.root = config["root"]
try:
self.client = SugarsyncClient(config)
except Exception, e:
raise StoreAutorizationError(repr(e), 0)
示例4: process_job_parallel
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def process_job_parallel(scheduler, job, nr_cores, nr_iter, parameters = None):
Logger.log_level = 2
processes = []
manager = Manager()
return_values = manager.dict()
extremes = manager.dict()
start_time = datetime.datetime.now()
for i in range(nr_cores):
p = Process(target=worker, args=(i, nr_cores, scheduler, job, nr_iter, return_values, extremes, parameters,))
processes.append(p)
p.start()
for process in processes:
process.join()
#reduce
results = []
for value in return_values.values():
for entry in value:
results.append(entry)
min = None
max = None
for extreme in extremes.values():
if min is None or extreme[0].total_time < min.total_time:
min = extreme[0]
if max is None or extreme[1].total_time > max.total_time:
max = extreme[1]
Logger.warning("Min: %s" % min.total_time)
Logger.warning("Max: %s" % max.total_time)
duration = datetime.datetime.now() - start_time
Logger.warning("Simulation complete. Duration: %s" % (duration))
return results, (min,max)
示例5: run
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def run():
# build the mdp
start = time.time()
room_size = 3
num_rooms = 5
mdp = maze_mdp.MazeMDP(room_size=room_size, num_rooms=num_rooms)
# build the agent
m = Manager()
init_dict = {(s, a): 0 for s in mdp.states for a in mdp.ACTIONS + [None]}
shared_weights = m.dict(init_dict)
shared_value_weights = m.dict(init_dict)
agent = async_actor_critic.AsyncActorCritic(actions=mdp.ACTIONS, discount=mdp.DISCOUNT,
weights=shared_weights, value_weights=shared_value_weights, tau=.3, learning_rate=.5)
# build a single experiment
rewards = m.list()
start_state_values = m.list()
max_steps = (2 * room_size * num_rooms) ** 2
exp = experiment.Experiment(mdp=mdp, agent=agent, num_episodes=800, max_steps=max_steps,
rewards=rewards, start_state_values=start_state_values)
# run the experiment
multiexperiment = experiment.MultiProcessExperiment(experiment=exp, num_agents=NUM_PROCESSES)
multiexperiment.run()
# report results
end = time.time()
print 'took {} seconds to converge'.format(end - start)
mdp.print_state_values(shared_value_weights)
optimal = mdp.EXIT_REWARD + (2 * room_size * num_rooms * mdp.MOVE_REWARD)
utils.plot_values(rewards, optimal, 'rewards')
utils.plot_values(start_state_values, optimal, 'start state value')
示例6: main_parallel
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def main_parallel():
Component.resetPfKeeping()
Component.resetCostKeeping()
manager = Manager()
Component.pfkeeping = manager.dict(Component.pfkeeping)
Component.costkeeping = manager.dict(Component.costkeeping)
pool = Pool(processes=3)
toolbox.register("map", pool.map)
print "MULTIOBJECTIVE OPTIMIZATION: parallel version"
start_delta_time = time.time()
# optimization
random.seed(64)
npop = 100
ngen = 50
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean, axis=0)
stats.register("std", np.std, axis=0)
stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "avg", "std", "min", "max"
pop = toolbox.population(n=npop)
fits = toolbox.map(toolbox.evaluate, pop)
for fit,ind in zip(fits, pop):
ind.fitness.values = fit
nevals = npop
allpop = []
for gen in range(ngen):
allpop = allpop+pop
record = stats.compile(pop)
logbook.record(gen=gen, evals=nevals, **record)
print(logbook.stream)
offspring = algorithms.varOr(pop, toolbox, lambda_=npop, cxpb=0.5, mutpb=0.1)
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
nevals = len(invalid_ind)
fits = toolbox.map(toolbox.evaluate, invalid_ind)
for fit,ind in zip(fits, invalid_ind):
ind.fitness.values = fit
pop = toolbox.select(offspring+pop, k=npop)
front = toolbox.sort(allpop, k=int(ngen*npop), first_front_only=True)
pool.close()
pool.join()
delta_time = time.time() - start_delta_time
print 'DONE: {} s'.format(str(datetime.timedelta(seconds=delta_time)))
return allpop, logbook, front
示例7: launch_multiprocess
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def launch_multiprocess(launchpad, fworker, loglvl, nlaunches, num_jobs, sleep_time,
total_node_list=None, ppn=1, timeout=None, exclude_current_node=False,
local_redirect=False):
"""
Launch the jobs in the job packing mode.
Args:
launchpad (LaunchPad)
fworker (FWorker)
loglvl (str): level at which to output logs
nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
num_jobs(int): number of sub jobs
sleep_time (int): secs to sleep between rapidfire loop iterations
total_node_list ([str]): contents of NODEFILE (doesn't affect execution)
ppn (int): processors per node (doesn't affect execution)
timeout (int): # of seconds after which to stop the rapidfire process
exclude_current_node: Don't use the script launching node as a compute node
local_redirect (bool): redirect standard input and output to local file
"""
# parse node file contents
if exclude_current_node:
host = get_my_host()
l_dir = launchpad.get_logdir() if launchpad else None
l_logger = get_fw_logger('rocket.launcher', l_dir=l_dir, stream_level=loglvl)
if host in total_node_list:
log_multi(l_logger, "Remove the current node \"{}\" from compute node".format(host))
total_node_list.remove(host)
else:
log_multi(l_logger, "The current node is not in the node list, keep the node list as is")
node_lists, sub_nproc_list = split_node_lists(num_jobs, total_node_list, ppn)
# create shared dataserver
ds = DataServer.setup(launchpad)
port = ds.address[1]
manager = Manager()
running_ids_dict = manager.dict()
firing_state_dict = manager.dict()
# launch rapidfire processes
processes = start_rockets(fworker, nlaunches, sleep_time, loglvl, port, node_lists,
sub_nproc_list, timeout=timeout, running_ids_dict=running_ids_dict,
local_redirect=local_redirect, firing_state_dict=firing_state_dict)
FWData().Running_IDs = running_ids_dict
FWData().FiringState = firing_state_dict
# start pinging service
ping_stop = threading.Event()
ping_thread = threading.Thread(target=ping_multilaunch, args=(port, ping_stop))
ping_thread.start()
# wait for completion
for p in processes:
p.join()
ping_stop.set()
ping_thread.join()
ds.shutdown()
示例8: main
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def main():
parser = argparse.ArgumentParser(description='Takes deduplicated bam files and preprocess\'s for analysis\n')
parser.add_argument('-c', '--config', help='Conditions containing Sam/Bam files, values are naming', required=True)
parser.add_argument('-g', '--genome', help='Genome the samples are aligned to, options include mm10/mm9/hg19', required=True)
parser.add_argument('-o', '--outdir', help='Output directory, will create transdense, nfree and npres directories', required=True)
parser.add_argument('-t', '--threads', help='threads, default=1', default=1, required=False)
parser.add_argument('-b', action='store_true', help='Use if Config contains bam files', required=False)
parser.add_argument('-n', action='store_true', help='Runs just nfree <60 and >60', required=False)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = vars(parser.parse_args())
Config = ConfigParser.ConfigParser()
Config.optionxform = str
Config.read(args["config"])
conditions = ConfigSectionMap("Conditions", Config)
chrom = pkg_resources.resource_filename('pyatactools', 'data/{}.chrom.sizes'.format(args["genome"]))
if not os.path.isfile(chrom):
raise Exception("Unsupported Genome!")
transdense_dir = os.path.join(args["outdir"], "transdense")
nfree_dir = os.path.join(args["outdir"], "nfree")
npres_dir = os.path.join(args["outdir"], "npres")
pool = Pool(int(args["threads"]))
if not os.path.isdir(transdense_dir):
os.makedirs(transdense_dir)
os.makedirs(nfree_dir)
os.makedirs(npres_dir)
ddup_bams = list(conditions.keys())
if args["n"]:
manager = Manager()
return_dict = manager.dict()
pool = Pool(int(args["threads"]))
return_dict = manager.dict()
nfree_dir1 = os.path.join(args["outdir"], "nfree_small")
nfree_dir2 = os.path.join(args["outdir"], "nfree_large")
if not os.path.isdir(nfree_dir1):
os.makedirs(nfree_dir1)
os.makedirs(nfree_dir2)
pool.map(function5, itertools.izip(ddup_bams, itertools.repeat(nfree_dir1),itertools.repeat(nfree_dir2), itertools.repeat(return_dict)))
pool.map(function4, itertools.izip(list(return_dict.keys()), itertools.repeat(chrom)))
else:
manager = Manager()
return_dict = manager.dict()
pool = Pool(int(args["threads"]))
pool.map(function1, itertools.izip(ddup_bams, itertools.repeat(transdense_dir), itertools.repeat(return_dict)))
pool.map(function4, itertools.izip(list(return_dict.keys()), itertools.repeat(chrom)))
return_dict = manager.dict()
pool.map(function2, itertools.izip(ddup_bams, itertools.repeat(nfree_dir), itertools.repeat(return_dict)))
pool.map(function4, itertools.izip(list(return_dict.keys()), itertools.repeat(chrom)))
return_dict = manager.dict()
pool.map(function3, itertools.izip(ddup_bams, itertools.repeat(npres_dir), itertools.repeat(return_dict)))
pool.map(function4, itertools.izip(list(return_dict.keys()), itertools.repeat(chrom)))
示例9: plot_utrs
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def plot_utrs(conditions, rev_conds, outdir):
pool = Pool(24)
manager = Manager()
return_dict = manager.dict()
genes = manager.dict()
pool.map(function1, itertools.izip(list(conditions.keys()), itertools.repeat(outdir), itertools.repeat(return_dict), itertools.repeat(genes)))
combined_profiles = {}
normal = {}
#Have to add all conditions together
today = date.today()
date_format = "{}_{}_{}".format(today.day, today.month, today.year)
pp = PdfPages("{}/{}_UTR_averaged.pdf".format(outdir, date_format))
#First need to average over all genes:
averaged_profiles = {}
len_genes = {}
for key1, key2 in genes.keys(): #Chromosome, bam
if key2 not in len_genes:
len_genes[key2] = {}
len_genes[key2][key1] = 1
else:
len_genes[key2][key1] = 1
for key in return_dict.keys():
averaged_profiles[key] = return_dict[key]/len(len_genes[key].keys())
for key in rev_conds:
fig = pyplot.figure()
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green"])
for fasta in rev_conds[key]:
name = re.sub(".fa", "", fasta)
name = os.path.basename(name)
uniq_count = read_reports('{}/{}_report.txt'.format(outdir, name))
normal[fasta] = uniq_count
norm = 100000/float(uniq_count)
normalised_profile = norm * averaged_profiles[fasta]
if key not in combined_profiles:
combined_profiles[fasta] = normalised_profile
else:
combined_profiles[fasta] += normalised_profile
pyplot.plot( numpy.arange( -200, 200 ), normalised_profile, label=name)
pyplot.legend(prop={'size':6})
pyplot.title(key)
pp.savefig(fig)
pyplot.close()
fig = pyplot.figure()
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green"])
for key in combined_profiles:
name = re.sub(".fa", "", key)
name = os.path.basename(name)
pyplot.plot( numpy.arange( -200, 200 ), combined_profiles[key], label=name)
pyplot.legend(prop={'size':6})
pp.savefig(fig)
pp.close()
示例10: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def __init__(self, **kwargs):
"""
F1 class constructor
:param codi_r1: R1 code of the company
:type codi_r1: str
:param year: Year of the resolution
:type year: int
"""
super(F1, self).__init__(**kwargs)
self.codi_r1 = kwargs.pop('codi_r1')
self.year = kwargs.pop('year', datetime.now().year - 1)
manager = Manager()
self.cts = manager.dict()
self.cnaes = manager.dict()
self.base_object = 'CUPS'
self.report_name = 'F1 - CUPS'
self.reducir_cups = kwargs.get("reducir_cups", False)
mod_all_year = self.connection.GiscedataPolissaModcontractual.search([
("data_inici", "<=", "{}-01-01".format(self.year)),
("data_final", ">=", "{}-12-31".format(self.year))],
0, 0, False, {"active_test": False}
)
mods_ini = self.connection.GiscedataPolissaModcontractual.search(
[("data_inici", ">=", "{}-01-01".format(self.year)),
("data_inici", "<=", "{}-12-31".format(self.year))],
0, 0, False, {"active_test": False}
)
mods_fi = self.connection.GiscedataPolissaModcontractual.search(
[("data_final", ">=", "{}-01-01".format(self.year)),
("data_final", "<=", "{}-12-31".format(self.year))],
0, 0, False, {"active_test": False}
)
self.modcons_in_year = set(mods_fi + mods_ini + mod_all_year)
self.default_o_cod_tfa = None
self.default_o_cnae = None
search_params = [
('name', '=', 'libcnmc_4_2015_default_f1')
]
id_config = self.connection.ResConfig.search(
search_params
)
self.generate_derechos = kwargs.pop("derechos", False)
if id_config:
config = self.connection.ResConfig.read(id_config[0], [])
default_values = literal_eval(config['value'])
if default_values.get('o_cod_tfa'):
self.default_o_cod_tfa = default_values.get('o_cod_tfa')
if default_values.get('o_cnae'):
self.default_o_cnae = default_values.get('o_cnae')
示例11: _get
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def _get(self, args):
draft_id = args[0]
id = args[1] if len(args) > 1 else None
q = self.db.query(Player)
if id is not None:
player = q.filter(Player.id == int(id)).first()
team = self.db.query(Team).filter(and_(Team.is_owner == True,
Team.draft_id == draft_id)).first()
available_players = self.db.query(Player).join(Player.core).filter(and_(PlayerCore.rank != None,
PlayerCore.target_price != None,
PlayerCore.points > 0,
Player.draft_id == draft_id,
Player.team_id == None,
Player.id != player.id)).order_by(PlayerCore.rank).all()
min_price = 1
max_price = min(player.core.target_price + 21, team.money)
manager = Manager()
max_starters_points = manager.dict()
max_bench_points = manager.dict()
pool = Pool(processes=8)
starters, bench = get_starters_and_bench(self.db, team.id)
max_starters_points[0] = optimizer.optimize_roster(starters, available_players, team.money - (constants.BENCH_SIZE - len(bench)))[1]
for m in range(min_price, 10):
pool.apply_async(wrap_optimizer, args=(starters, available_players, team.money - m - (constants.BENCH_SIZE - len(bench)) + 1, max_bench_points, m))
full_starters = True
for s in starters:
if s is None:
full_starters = False
if not full_starters:
starters_clone = list(starters)
bench_clone = list(bench)
place_player(player, starters_clone, bench_clone)
for m in range(min_price, max_price):
pool.apply_async(wrap_optimizer, args=(starters_clone, available_players, team.money - m - (constants.BENCH_SIZE - len(bench_clone)), max_starters_points, m))
pool.close()
pool.join()
ret = player.to_dict(['core'])
ret['max_starters_points'] = dict(max_starters_points)
ret['max_bench_points'] = dict(max_bench_points)
return ret
else:
players = q.join(PlayerCore).filter(and_(Player.draft_id == int(draft_id),
PlayerCore.rank != None,
PlayerCore.target_price != None)).all()
return {'players': [p.to_dict(['core']) for p in players]}
示例12: run_parallel
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def run_parallel(
self, test_suites, test_runner, result_type=None,
results_path=None):
exit_code = 0
proc = None
unittest.installHandler()
processes = []
manager = Manager()
results = manager.dict()
manager.dict()
start = time.time()
test_mapping = {}
for test_suite in test_suites:
# Give each test suite an uuid so it can be
# matched to the correct test result
test_id = str(uuid.uuid4())
test_mapping[test_id] = test_suite
proc = Process(
target=self.execute_test,
args=(test_runner, test_id, test_suite, results))
processes.append(proc)
proc.start()
for proc in processes:
proc.join()
finish = time.time()
errors, failures, _ = self.dump_results(start, finish, results)
if result_type is not None:
all_results = []
for test_id, result in results.items():
tests = test_mapping[test_id]
result_parser = SummarizeResults(
vars(result), tests, (finish - start))
all_results += result_parser.gather_results()
reporter = Reporter(
result_parser=result_parser, all_results=all_results)
reporter.generate_report(
result_type=result_type, path=results_path)
if failures or errors:
exit_code = 1
return exit_code
示例13: parallel_peak_file_plot
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def parallel_peak_file_plot(ibams, bed_file, size_dict, halfwinwidth, norm, controls):
positions = set()
for line in open(bed_file):
fields = line.split( "\t" )
name = re.sub("chr", "", fields[0])
window = HTSeq.GenomicInterval( name, int(fields[1]), int(fields[2]), "." )
positions.add(window)
if controls == None:
manager = Manager()
return_dict = manager.dict()
pool = Pool(8)
if norm: #Normalisation provided
pool.map(read_bam_function, itertools.izip(list(ibams.keys()), itertools.repeat(positions), itertools.repeat(halfwinwidth),
itertools.repeat(return_dict), itertools.repeat(norm), itertools.repeat(None))) ##Running annotation in parallel
else:
pool.map(read_bam_function, itertools.izip(list(ibams.keys()), itertools.repeat(positions), itertools.repeat(halfwinwidth),
itertools.repeat(return_dict), itertools.repeat(None), itertools.repeat(size_dict)))
pool.close()
pool.join()
for key in return_dict.keys():
pyplot.plot( numpy.arange( -halfwinwidth, halfwinwidth ), return_dict[key], label=ibams[key])
pyplot.legend(prop={'size':8})
pyplot.savefig("Average_peak_profile.pdf".format(ibams[key]))
else:
manager = Manager()
return_dict = manager.dict()
pool = Pool(8)
pool.map(read_bam_function, itertools.izip(list(ibams.keys()), itertools.repeat(positions), itertools.repeat(halfwinwidth),
itertools.repeat(return_dict), itertools.repeat(None), itertools.repeat(None)))
control_dict = manager.dict()
control_bam = []
for key in controls:
control_bam.append(controls[key])
control_sizes = sam_size(control_bam)
pool = Pool(8)
pool.map(read_bam_function, itertools.izip((control_bam), itertools.repeat(positions), itertools.repeat(halfwinwidth),
itertools.repeat(control_dict), itertools.repeat(None), itertools.repeat(None)))
pool.close()
pool.join()
# colors = ["b", "g", "r", "y", "k"] #To make it more robust, just use default colors
c = 0
for key in return_dict.keys():
control = controls[key]
new_profile = return_dict[key] - control_dict[control] #Unsure if working properly, maybe make it more intelligent?
gapdh = read_counts(norm[key])
constant = 1000/float(gapdh)
new_profile = new_profile*constant
pyplot.plot( numpy.arange( -halfwinwidth, halfwinwidth ), new_profile, label=ibams[key])#, color=colors[c])
pyplot.legend(prop={'size':8})
pyplot.savefig("Average_peak_profile.pdf".format(ibams[key]))
示例14: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def __init__(self, **kwargs):
super(CreateCelles, self).__init__(**kwargs)
self.header = [
'name', 'tipus_element', 'installacio', 'tipus_posicio',
'inventari', 'aillament', 'cini', 'propietari', 'perc_financament',
'tensio'
]
self.search_keys = [('name')]
self.fields_read_ct = ['perc_financament', 'propietari']
self.fields_read_at_tram = ['perc_financament']
self.object = self.connection.GiscedataCellesCella
manager = Manager()
self.cts = manager.dict()
self.at_suports = manager.dict()
示例15: search
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import dict [as 别名]
def search(self, links=False):
"""
Get links from the search engines and fill them to the respective lists.
It gets self.pages of links from Search Engines, sends them to the formatter functions and gets the lists.
:return: nothing
"""
if self.type == "text":
mg = Manager()
ret = mg.dict()
jobs = []
p1 = Process(target=self.google_proc, args=(ret,))
jobs.append(p1)
p2 = Process(target=self.yahoo_proc, args=(ret,))
jobs.append(p2)
p3 = Process(target=self.bing_proc, args=(ret,))
jobs.append(p3)
p1.start()
p2.start()
p3.start()
for proc in jobs:
proc.join()
temp = ret.values()[0] + ret.values()[1] + ret.values()[2]
print temp
for i in temp:
f = 0
for j in self.uniquelinks:
if i[1] == j[1]:
f = 1
if f == 0:
self.uniquelinks.append(i)
if links:
return self.uniquelinks
else: # [[title, link, data], [title, link, data] ...]
mg = Manager()
ret = mg.dict()
jobs = []
n = 0
for li in self.uniquelinks[0:3]:
p = Process(target=self.data_collector, args=(n, li[1], ret))
n += 1
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
print ret.values()
print len(ret.values())