本文整理汇总了Python中multiprocessing.freeze_support方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.freeze_support方法的具体用法?Python multiprocessing.freeze_support怎么用?Python multiprocessing.freeze_support使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.freeze_support方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: print_sys_info
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def print_sys_info(opend_ip=None, opend_port=None):
if futu.IS_PY2:
mp.freeze_support()
opend_version = get_opend_version(opend_ip, opend_port)
futu_path = os.path.abspath(os.path.realpath(futu.__file__))
log_dir = _get_log_dir()
print('Futu path: ', futu_path)
print('Futu version: ', futu.__version__)
print('OpenD version:', opend_version)
print('Python path: ', sys.executable)
print('Python version: ', platform.python_version())
print('OS: ', sys.platform)
print('Platform: ', platform.platform())
print('Arch: ', platform.architecture())
print('Module search path: ', sys.path)
print('Log dir: ', log_dir)
示例2: main
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def main():
multiprocessing.freeze_support()
sys.excepthook = handle_exceptions
parse_config()
run_pool()
示例3: parts
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def parts(self):
words=set(self.voc.keys())
multiprocessing.freeze_support()
cores=multiprocessing.cpu_count()
pool=multiprocessing.Pool(processes=cores-2)
reuslt=pool.map(self.com_idf,words)
idf_dict=dict()
for r in reuslt:
k=list(r.keys())[0]
v=list(r.values())[0]
idf_dict[k]=idf_dict.get(k,0)+v
with codecs.open(self.file_idf,'w',encoding='utf-8') as f:
f.write(json.dumps(idf_dict,ensure_ascii=False,indent=2,sort_keys=False))
示例4: main_close
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def main_close(main_locals=None):
#import utool as ut
#if ut.VERBOSE:
# print('main_close')
# _close_parallel()
_reset_signals()
#if __name__ == '__main__':
# multiprocessing.freeze_support()
示例5: main
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def main():
multiprocessing.freeze_support()
create_and_run_plugin(BNetPlugin, sys.argv)
示例6: Main
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def Main():
multiprocessing.freeze_support()
Config.Config.ClearLogs()
###GET OPTIONS###
arguements = GetArguements()
options = arguements.parse_args()
# Check if there is geodb if frozen
if getattr(sys,'frozen',False):
geodb_file = os.path.join(
'geodb',
'GeoLite2-City.mmdb'
)
if not os.path.isfile(geodb_file):
if GetYesNo(("There is no geodb found, would you like to download it? "
"This is required for using basic Geo IP support within the "
"report queries. If you choose not to use this functionality "
"expect errors for templates that use custom functions calling "
"geoip functions.")):
InitGeoDb(geodb_file)
else:
SqliteCustomFunctions.GEO_MANAGER.AttachGeoDbs('geodb')
if options.subparser_name == "process":
options.db_name = os.path.join(
options.output_path,
options.evidencename+'.db'
)
manager = WindowsEventManager.WindowsEventManager(
options
)
manager.ProcessEvents()
CreateReports(options)
elif options.subparser_name == "report":
CreateReports(options)
else:
raise(Exception("Unknown subparser: {}".format(options.subparser_name)))
示例7: run_view
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def run_view():
if len(sys.argv) > 1 and "fork" in sys.argv[1]:
multiprocessing.freeze_support()
exit(0)
def callback():
run(False)
url = "http://%s:%s" % ("127.0.0.1", 5000)
view = Webview(url)
view.callback = callback
view.title = "RD-USB"
view.width = 1250
view.height = 800
view.start()
示例8: main
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def main():
multiprocessing.freeze_support()
cli.run_cli(sys.argv)
示例9: main
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def main():
#These two lines are for windows threads
if __name__ == '__main__':
multiprocessing.freeze_support()
endOrNot = setup(False)
if (endOrNot == True):
if (len(sys.argv) > 1) :
arg = sys.argv[1]
for i in range(len(sys.argv)):
if (str(sys.argv[i]) == "--debug" or str(sys.argv[i]) == "debug"):
v.debugMode = True
if (str(sys.argv[i]) == "--full" or str(sys.argv[i]) == "full"):
v.fullMode = True
if (v.fullMode == True):
print("Full mode : now doing predictions with linear regression.")
if (isInt(arg, len(v.desc))):
Pred(int(arg))
elif (str(arg) == "help"):
print("For unimodal prediction, here the correspondance")
for i in range(len(v.desc)):
print i,v.nameMod[i]
elif (str(arg) == "--debug" or str(arg) == "debug" or str(arg) == "full" or str(arg) == "--full"):
Pred(None)
else :
print("Error on arguments")
print("For unimodal prediction, here the correspondance")
for i in range(len(v.desc)):
print i,v.nameMod[i]
print("For full mode (using linear regressions for predictions) type --full or full")
print("For debug mode, type --debug or debug")
else :
Pred(None)
else :
print ("Error on setup, please check files")
示例10: main
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def main():
#os.chdir(get_script_dir())
os.chdir(os.path.dirname(sys.argv[0]))
print("changing to folder %s" % os.path.dirname(sys.argv[0]))
multiprocessing.freeze_support()
app = OpenroastApp()
app.run()
示例11: multi_proc
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def multi_proc(configs):
global t_conf
multiprocessing.freeze_support()
proc = multiprocessing.Pool(16)
proc_result = []
if isinstance(configs, dict):
t = []
t.append(configs.copy())
configs = t
for i, ei in enumerate(configs):
r = proc.apply_async(sub_proc, args=(i, ei, t_conf))
proc_result.append(r)
proc.close()
proc.join()
configs_all = []
for k in proc_result:
configs_all.append(k.get())
info = []
configs_good_temp = []
configs_bad_temp = []
configs_bad = []
configs_good = []
for j in configs_all:
info.append((j[1],j[2]))
if j[1] == 9:
configs_bad_temp.append(j[0])
else:
configs_good_temp.append(j)
if configs_good_temp:
configs_good_temp.sort(key = lambda x:x[2])
configs_good_temp.sort(key = lambda x:x[1])
for i in configs_good_temp:
r = re.match('^\d_\d\.\d{2}_(.*)', i[0].get('remarks'))
if r:
remarks = r.group(1)
else:
remarks = i[0].get('remarks')
remarks = '{}_{}_{}'.format(i[1], i[2], remarks)
i[0]['remarks'] = remarks[:60]
configs_good.append(i[0])
if configs_bad_temp:
for k in configs_bad_temp:
r = re.match('^\d_\d\.\d{2}_(.*)', k.get('remarks'))
if r:
remarks = r.group(1)
else:
remarks = k.get('remarks')
remarks = '{}_{}_HCR_{}'.format('9', '9.99', remarks)
k['remarks'] = remarks[:60]
configs_bad.append(k)
return configs_good, configs_bad, info
示例12: train_classifier
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import freeze_support [as 别名]
def train_classifier(test, blocker=False):
number_train=20
number_valid=30
number_test=25
steps = 1000
batch_size= 1024
conv_layers = 3
if test:
number_train=2
number_valid=2
number_test=2
steps = 50
batch_size = 20
conv_layers = 2
multiprocessing.freeze_support()
episode_paths = frame.episode_paths(input_path)
print('Found {} episodes'.format(len(episode_paths)))
np.random.seed(seed=42)
np.random.shuffle(episode_paths)
if blocker:
common_hparams = dict(use_action=True, expected_positive_weight=0.05)
labeller = humanrl.pong_catastrophe.PongBlockerLabeller()
else:
common_hparams = dict(use_action=False)
labeller = humanrl.pong_catastrophe.PongClassifierLabeller()
data_loader = DataLoader(labeller, TensorflowClassifierHparams(**common_hparams))
datasets = data_loader.split_episodes(episode_paths,
number_train, number_valid, number_test, use_all=False)
hparams_list = [
dict(image_crop_region=((34,34+160),(0,160)), #image_shape=[42, 42, 1],
convolution2d_stack_args=[(4, [3, 3], [2, 2])] * conv_layers, batch_size=batch_size, multiprocess=False,
fully_connected_stack_args=[50,10],
use_observation=False, use_image=True,
verbose=True
)
]
start_experiment = time.time()
print('Run experiment params: ', dict(number_train=number_train, number_valid=number_valid,
number_test=number_test, steps=steps, batch_size=batch_size,
conv_layers=conv_layers) )
print('hparams', common_hparams, hparams_list[0])
logdir = save_classifier_path
run_experiments(
logdir, data_loader, datasets, common_hparams, hparams_list, steps=steps, log_every=int(.1*steps))
time_experiment = time.time() - start_experiment
print('Steps: {}. Time in mins: {}'.format(steps, (1/60)*time_experiment))
run_classifier_metrics()