本文整理汇总了Python中multiprocessing.cpu_count函数的典型用法代码示例。如果您正苦于以下问题:Python cpu_count函数的具体用法?Python cpu_count怎么用?Python cpu_count使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cpu_count函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main(treeModelPath, dataInputPath, resultOutPath, debug):
# read model
treeModel = readModel(treeModelPath)
# create output dir
if not os.path.isdir(resultOutPath):
os.mkdir(resultOutPath)
if debug:
pool = multiprocessing.Pool(processes=1)
else:
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
print "Number of core: %d" % (multiprocessing.cpu_count())
start_time = datetime.now()
jobN = 0
for filename in os.listdir(dataInputPath):
if ".json" in filename:
if debug:
# debug model just test 1 file in 1 process
# filterFiles(jobN,filename,treeModel,debug)
pool.apply_async(filterFiles, (jobN, filename, dataInputPath, resultOutPath, treeModel, debug))
break
else:
pool.apply_async(filterFiles, (jobN, filename, dataInputPath, resultOutPath, treeModel, debug))
jobN += 1
pool.close()
pool.join()
diff = datetime.now() - start_time
print "Spend %d.%d seconds" % (diff.seconds, diff.microseconds)
示例2: get_needle_tips
def get_needle_tips(images):
"""Get sample tips from images."""
tips = []
results = []
# Do not make more processes than needed for the number of images.
if len(images) > multiprocessing.cpu_count():
proc_count = multiprocessing.cpu_count()
else:
proc_count = len(images)
pool = Pool(processes=proc_count)
for image in images:
results.append(pool.apply_async(_get_ellipse_point,
args=(image,)))
for result in results:
tip = result.get()
if tip is not None:
tips.append(tip)
if len(tips) == 0:
raise ValueError("No sample tip points found.")
return tips
示例3: parse_arguments
def parse_arguments():
"""
Encapsulate the use of argparse
@param: None
@return: An instance of argparse
"""
parser = argparse.ArgumentParser(description="Create some random charts")
# Required
# Nothing
# Optional
parser.add_argument("-e", "--executable",
help="The executable to use [default: None]",
type=str,
default=None)
parser.add_argument("-n", "--number",
help="Number of charts to randomly create [default: %(default)s]",
type=check_positive,
default=1)
parser.add_argument("-t", "--threads",
help="Number of threads to use [default: %(default)s]",
type=int,
default=multiprocessing.cpu_count()-1,
choices=range(1, multiprocessing.cpu_count()))
return parser.parse_args()
示例4: profile_locks
def profile_locks(cmd):
(c_samples, c, cc, c_dev, sections) = lttng_session( "profile_c"
, cmd(1, 'c')
, ['memcached:c_begin', 'memcached:c_end', 'memcached:inside_cc']
, measure_c)
n = lttng_session( "profile_n"
, cmd(multiprocessing.cpu_count(),'c')
, ['memcached:contention']
, measure_n)[1]
(nsamples, blk_samples) = lttng_session( "profile_block_costs"
, cmd(1,'c')
, ['memcached:block_id']
, measure_blocks)
blk_cnts = lttng_session( "profile_block_counts"
, cmd(1,'c')
, ['memcached:blk_cnts']
, count_blocks)
blk_costs = {k: ((blk_samples[k] * c / nsamples), v) for k, v in blk_cnts.items() if k in blk_samples}
nn = lttng_session( "profile_contention"
, cmd(multiprocessing.cpu_count(),'f')
, ['memcached:contention']
, measure_n)[1]
return((c,c_dev),cc,n,nn,sections,blk_costs)
示例5: ProcessStuff
def ProcessStuff(spp_list):
print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
NUMBER_OF_PROCESSES = multiprocessing.cpu_count()
TASKS = [(CallMaxent, (spp_list[i],)) for i in range(len(spp_list))]
#TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
示例6: GetParallelProcessCount
def GetParallelProcessCount(self):
# limit based on free memory
f = os.popen('vmstat', 'r')
f.readline()
f.readline()
line = f.readline()
f.close()
freeRAM = line.split()[3]
cache = line.split()[5]
ppCount = int((float(freeRAM) + float(cache)) / 80000.0)
if ppCount > multiprocessing.cpu_count(): # *three* extra processes
ppCount = multiprocessing.cpu_count()
if ppCount < 1: # need at least one process
ppCount = 1
# now limit based on CPU load
f = open('/proc/loadavg', 'r')
line = f.readline()
f.close()
load = float(line.split()[0])
if load > (float(multiprocessing.cpu_count()) + 0.5) and ppCount > 3:
ppCount = 3
if load > (float(multiprocessing.cpu_count()) + 1.0) and ppCount > 2:
ppCount = 2
if load > (float(multiprocessing.cpu_count()) + 1.5) and ppCount > 1:
ppCount = 1
return ppCount
示例7: take_screenshots_async
def take_screenshots_async(source_folder, config_path, verbose=False):
sizes = load_ss_config(config_path)
slides = parse_slide_folders(source_folder)
dests = list(map(lambda pair: pair[0], slides[0]))
urls = list(map(lambda pair: os.path.join(pair[0], pair[1]), slides[0]))
shots = list(gen_configs(urls, dests, sizes, local_slide_name))
q = mp.JoinableQueue()
procs = []
for i in range(mp.cpu_count()*2):
p = mp.Process(target=ss_q, args=(q,verbose))
procs.append(p)
p.start()
for item in shots:
q.put(tuple(item))
q.join()
for i in range(mp.cpu_count()*2):
q.put(None)
for proc in procs: proc.join()
示例8: retrieve_clusters
def retrieve_clusters(ne_lat, ne_lng, sw_lat, sw_lng, start_date, end_date, fatal, severe, light, inaccurate, zoom):
marker_boxes = divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng)
result_futures = []
logging.info("number of cores: " + str(multiprocessing.cpu_count()))
with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
for marker_box in marker_boxes:
markers_in_box = Marker.bounding_box_query(
marker_box[0],
marker_box[1],
marker_box[2],
marker_box[3],
start_date,
end_date,
fatal,
severe,
light,
inaccurate,
).all()
result_futures.append(executor.submit(calculate_clusters, markers_in_box, zoom))
completed_futures = concurrent.futures.wait(result_futures)
result = []
for future in completed_futures.done:
result.extend(future.result())
return result
示例9: parallel
def parallel(df, func):
if len(df) > 0:
p = Pool(cpu_count())
df = p.map(func, np.array_split(df, cpu_count()))
df = pd.concat(df, axis=0, ignore_index=True).reset_index(drop=True)
p.close(); p.join()
return df
示例10: concurrent_test_jobs
def concurrent_test_jobs(platform):
if platform == "windows":
return str(multiprocessing.cpu_count() // 4)
elif platform == "macos":
return str(multiprocessing.cpu_count() // 2)
else:
return str(multiprocessing.cpu_count())
示例11: ProcessStuff
def ProcessStuff(spp_list,epoch_list,model_list):
print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
NUMBER_OF_PROCESSES = multiprocessing.cpu_count()
task_queue = Queue()
done_queue = Queue()
for spp in spp_list:
for model in model_list:
for epoch in epoch_list:
TASKS = [(CallMaxEnt,(spp.rstrip("\r\n"),epoch.rstrip("\r\n"),model.rstrip("\r\n"),str(i))) for i in range(10)]
#print "Number of projections to be made = %d\n" % len(TASKS)
#print TASKS
print " "+spp
# Submit tasks
for task in TASKS:
#print task,"\n"
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results for '+spp.rstrip("\r\n")+':'
for i in range(len(TASKS)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
示例12: main
def main():
total_work = multiprocessing.cpu_count()
burnin = 30000
significance_samples = 100000
per_process_samples = significance_samples / multiprocessing.cpu_count()
alpha_count_slow = 0.001
alpha_count_fast = find_optimal_decay(alpha_count_slow)
alpha_mu_slow = 0.01
alpha_mu_fast = 0.01
buckets_slow = 50
buckets_fast = 50
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
runs = pool.map(
ergodic_chain,
[[burnin, per_process_samples,
alpha_count_slow, alpha_count_fast,
alpha_mu_slow, alpha_mu_fast,
buckets_slow, buckets_fast] for _ in range(total_work)])
aggregator = [[] for _ in range(len(FUNC_LIST))]
for run in runs:
for i, data_list in enumerate(run):
aggregator[i] += data_list
colors = ['red', 'green', 'blue', 'purple']
for label, data in zip(FUNC_LABELS, aggregator):
#data.sort()
_, _, patches = pylab.hist(
data, 250, label=label,
normed=True, histtype='stepfilled')
pylab.setp(patches, 'alpha', 0.4)
pylab.legend()
pylab.show()
示例13: check_num_cpus
def check_num_cpus(n_cpus, table_size, min_table_size):
messages = []
if multiprocessing.current_process().daemon and n_cpus != 1:
messages.append("WARNING: you choose n_cpus = %d but integrate already runs inside a "
"daemon process which is not allowed. therefore set n_cpus = 1" % n_cpus)
n_cpus = 1
if n_cpus < 0:
n_cpus = multiprocessing.cpu_count() + n_cpus
if n_cpus <= 0:
messages.append("WARNING: you requested to use %d cores, "
"we use single core instead !" % n_cpus)
n_cpus = 1
if n_cpus > 1 and table_size < min_table_size:
messages.append("INFO: as the table has les thann %d rows, we switch to one cpu mode"
% min_table_size)
n_cpus = 1
elif n_cpus > multiprocessing.cpu_count():
messages.append("WARNING: more processes demanded than available cpu cores, this might be "
"inefficient")
return messages, n_cpus
示例14: __init__
def __init__(self, tasks, num_cpus=0, profiling=False):
self.log = LoggingManager.get_logger('kraken')
try:
self.num_processes = int(num_cpus)
if self.num_processes < 1:
raise ValueError()
if self.num_processes > cpu_count():
self.log.warning("Number of cores (%d) larger than available." % self.num_processes)
raise ValueError()
except (ValueError, TypeError):
self.log.warning("Number of cores has not been specified or is incorrect. Using available cores.")
self.num_processes = cpu_count()
self.log.info("Kraken has %d tentacles (cpu cores)" % self.num_processes)
self.tasks = tasks
self.num_tasks = len(tasks)
self.tentacles = []
tentacle_tasks = [tasks[i::self.num_processes] for i in xrange(self.num_processes)]
for i in range(self.num_processes):
tentacle = Tentacle(tentacle_tasks[i], profiling)
self.tentacles.append(tentacle)
self.log.info("%d ships ready to be smashed" % self.num_tasks)
示例15: main
def main(opts):
"""The main loop of the module, do the renaming in parallel etc."""
log = logging.getLogger("exif2timestream")
setup_logs(opts)
# beginneth the actual main loop
start_time = time()
cameras = parse_camera_config_csv(opts["-c"])
n_images = 0
for camera in cameras:
msg = "Processing experiment {}, location {}\n".format(
camera[FIELDS["expt"]],
camera[FIELDS["location"]],
)
msg += "Images are coming from {}, being put in {}".format(
camera[FIELDS["source"]],
camera[FIELDS["destination"]],
)
print(msg)
log.info(msg)
for ext, images in find_image_files(camera).iteritems():
images = sorted(images)
n_cam_images = len(images)
print("{0} {1} images from this camera".format(n_cam_images, ext))
log.info("Have {0} {1} images from this camera".format(
n_cam_images, ext))
n_images += n_cam_images
last_date = None
subsec = 0
count = 0
# TODO: sort out the whole subsecond clusterfuck
if "-1" in opts and opts["-1"]:
log.info("Using 1 process (What is this? Fucking 1990?)")
for image in images:
count += 1
print("Processed {: 5d} Images".format(count), end='\r')
process_image((image, camera, ext))
else:
from multiprocessing import Pool, cpu_count
if "-t" in opts and opts["-t"] is not None:
try:
threads = int(opts["-t"])
except ValueError:
threads = cpu_count() - 1
else:
threads = cpu_count() - 1
# Ensure that we're using at least one thread
threads = max(threads, 1)
log.info("Using {0:d} processes".format(threads))
# set the function's camera-wide arguments
args = zip(images, cycle([camera]), cycle([ext]))
pool = Pool(threads)
for _ in pool.imap(process_image, args):
count += 1
print("Processed {: 5d} Images".format(count), end='\r')
pool.close()
pool.join()
print("Processed {: 5d} Images. Finished this cam!".format(count))
secs_taken = time() - start_time
print("\nProcessed a total of {0} images in {1:.2f} seconds".format(
n_images, secs_taken))