本文整理汇总了Python中tornado.process.fork_processes函数的典型用法代码示例。如果您正苦于以下问题:Python fork_processes函数的具体用法?Python fork_processes怎么用?Python fork_processes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fork_processes函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
options.parse_command_line()
_port = options.options.port
_process_num = options.options.process
_debug_level = options.options.debug * 10
process.fork_processes(_process_num, max_restarts=3)
process_port = _port + process.task_id()
process_debug = _process_num <= 1 and _debug_level < 30
print('Service Running on %d ...' % process_port)
app = web.Application((
(r'/', views.base.IndexHandler),
(r'/home', views.base.HomeHandler),
(r'/auth/redirect', views.auth.OAuth2RedirectHandler),
(r'/auth/revoke', views.auth.OAuth2RevokeHandler),
(r'/auth/authorize', views.auth.OAuth2AuthorizeHandler),
(r'/auth/info', views.auth.OAuth2InfoHandler),
(r'/user/info', views.rest.UserInfoHandler),
(r'/user/option', views.rest.UserOptionHandler),
(r'/weibo/public', views.rest.WeiboPublicHandler),
(r'/weibo/sync', views.rest.WeiboSyncHandler),
(r'/weibo/query', views.rest.WeiboQueryHandler),
(r'/weibo/redirect', views.rest.WeiboRedirectHandler),
(r'/emotion/query', views.rest.EmotionQueryHandler),
), debug=process_debug, cookie_secret=setting.COOKIE_SECRET)
app.listen(process_port, xheaders=True)
loop = ioloop.IOLoop.instance()
loop.start()
示例2: main
def main():
script_path = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(script_path, 'etc/harvest.cfg')
config = ConfigParser()
config.read(config_path)
sockets = bind_sockets(config.get('server', 'port'),
config.get('server', 'address'))
fork_processes(config.getint('server', 'instances'))
datastore = DataStore(config.get('datastore', 'host'),
config.getint('datastore', 'port'),
config.get('datastore', 'username'),
config.get('datastore', 'password'),
config.get('datastore', 'database'))
app = Application([(r"/rpc/store", Handler,
{'datastore': datastore,
'api_key': config.get('server', 'api_key')})])
server = HTTPServer(app,
no_keep_alive=config.get('server', 'no_keep_alive'),
ssl_options={
'certfile': config.get('server', 'certfile'),
'keyfile': config.get('server', 'keyfile')})
server.add_sockets(sockets)
IOLoop.instance().start()
示例3: start
def start(self, num_processes: Optional[int] = 1, max_restarts: int = None) -> None:
"""Starts this server in the `.IOLoop`.
By default, we run the server in this process and do not fork any
additional child process.
If num_processes is ``None`` or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If num_processes is given and > 1, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``TCPServer.start(n)``.
Values of ``num_processes`` other than 1 are not supported on Windows.
The ``max_restarts`` argument is passed to `.fork_processes`.
.. versionchanged:: 6.0
Added ``max_restarts`` argument.
"""
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes, max_restarts)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
示例4: main
def main():
""" entry """
try:
conf = __import__('conf')
except ImportError as e:
app_log.critical("Unable to load site config. ({})".format(e))
raise SystemExit()
parse_command_line()
if options.debug:
app_log.setLevel(logging.DEBUG)
if not options.debug:
fork_processes(None)
options.port += task_id() or 0
if not os.path.isdir(conf.app_path):
app_log.critical("{p} isn't accessible, maybe "
"create it?".format(p=conf.app_path))
raise SystemExit()
app_log.debug("Starting {name} on port {port}".format(name=conf.name,
port=options.port))
# initialize the application
tornado.httpserver.HTTPServer(Application(options,
conf)).listen(options.port,
'0.0.0.0')
ioloop = tornado.ioloop.IOLoop.instance()
if options.debug:
tornado.autoreload.start(ioloop)
# enter the Tornado IO loop
ioloop.start()
示例5: start
def start(self, num_processes=1):
"""Starts this server in the IOLoop.
By default, we run the server in this process and do not fork any
additional child process.
If num_processes is ``None`` or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If num_processes is given and > 1, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``debug=True`` option to `tornado.web.Application`).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``TCPServer.start(n)``.
"""
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
示例6: start
def start(self, num_processes=1):
r"""Starts the server in IOLoop."""
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes)
sockets, self._pending_sockets = self._pending_sockets, []
self.add_sockets(sockets)
示例7: main
def main():
try:
options,datafile_json=optparse_lib().parse_args()
cmdstr="""nohup python easystatserver.py > /dev/null 2>&1 &"""
#cmdstr="""nohup ./easystatserver > /dev/null 2>&1 &"""
#status,output=cmd_execute(cmdstr)
import os
os.system(cmdstr)
#print(u"start benchmark test...")
if(options.processnum != -1):
process.fork_processes(options.processnum)
'''logging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(asctime)s) <%(message)s>',datefmt='%a,%Y-%m-%d %H:%M:%S',
filename="./log/process."+str(tornado.process.task_id())+".log",filemode='w')'''
logging.basicConfig(level=logging.ERROR,
format='[%(levelname)s] [%(asctime)s] [%(filename)s-line:%(lineno)d] [%(funcName)s-%(threadName)s] %(message)s',
datefmt='%a,%Y-%m-%d %H:%M:%S',
filename="./log/easyhttpbenchmark.log",
filemode='a')
easyhttpbc=easyhttpbenchmark(options.maxclientnum,options.clientnum,options.testtime,options.flag,datafile_json)
easyhttpbc.benchmark_test()
#print(u"benchmark test end...")
except Exception as e:
logging.error(str(e))
try:
from xmlrpclib import ServerProxy
cfg_json=json.load(open("./conf/easyhttpbenchmark.conf", "r"),encoding='utf-8')
stat_rpc_server=cfg_json['stat_rpc_server']
stat_rpc_port=cfg_json['stat_rpc_port']
svr=ServerProxy("http://"+stat_rpc_server+":"+stat_rpc_port)
'''print("total_req_cnt:"+str(easyhttpbc.total_req_cnt))
print("total_res_cnt:"+str(easyhttpbc.total_res_cnt))
print("total_err_cnt:"+str(easyhttpbc.total_err_cnt))
print("total_nul_cnt:"+str(easyhttpbc.total_nul_cnt))'''
import multiprocessing
cpu_count=multiprocessing.cpu_count()
if(options.processnum != 0):
svr.stat_maxclientnum(options.processnum*options.maxclientnum)
svr.stat_clientnum(options.processnum*options.clientnum)
else:
svr.stat_maxclientnum(cpu_count*options.maxclientnum)
svr.stat_clientnum(cpu_count*options.clientnum)
svr.set_test_time(easyhttpbc.testtime)
svr.stat_total_req_cnt(easyhttpbc.total_req_cnt)
svr.stat_total_res_cnt(easyhttpbc.total_res_cnt)
svr.stat_total_err_cnt(easyhttpbc.total_err_cnt)
svr.stat_total_nul_cnt(easyhttpbc.total_nul_cnt)
svr.stat_total_below_10(easyhttpbc.below_10)
svr.stat_total_between_10_20(easyhttpbc.between_10_20)
svr.stat_total_between_20_30(easyhttpbc.between_20_30)
svr.stat_total_over_30(easyhttpbc.over_30)
svr.stat_total_res_time(easyhttpbc.total_res_time)
except Exception as e:
logging.error(str(e))
示例8: server_forever
def server_forever(self, *args, **kwargs):
try:
if self._multiprocess:
info('starting tornado server in multi-process mode')
fork_processes(self._num_processes)
else:
info('starting tornado server in single-process mode')
self._server = HTTPServer(self._app)
self._server.add_sockets(self._sockets)
IOLoop.instance().start()
except Exception, e:
error("exception in serve_forever: %s", e)
示例9: main
def main():
parse_command_line()
settings = {
'debug': options.debug
}
sockets = bind_sockets(options.port)
fork_processes(0)
application = router(settings)
_http_server = HTTPServer(application)
_http_server.add_sockets(sockets)
IOLoop.current().start()
示例10: main
def main():
options,DataFile=OptParseLib().parse_args()
ConfFile=json.load(open(ConfFilePath, "r"),encoding='utf-8')
#print(ConfFile)
#print(options.ProcessNUM,options.ClientNUM,options.TEST_TIME,DataFile)
print(u"Start Benchmark Test...")
if options.ProcessNUM != -1:
process.fork_processes(options.ProcessNUM)
logging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(asctime)s) <%(message)s>',datefmt='%a,%Y-%m-%d %H:%M:%S',
filename=ConfFile["LogFilePath"]+str(tornado.process.task_id())+".log",filemode='w')
easyBC=easyBenchmarkTesttool(options.ClientNUM,options.TEST_TIME,DataFile)
easyBC.benchmark_test()
print(u"Benchmark Test End...")
示例11: run
def run(self):
if options.debug:
app_log.setLevel(logging.DEBUG)
if not options.debug:
fork_processes(None)
options.port += task_id() or 0
app_log.debug("Starting %s on port %s" % (cfg.platform_name, options.port))
# initialize the application
tornado.httpserver.HTTPServer(Application(self.commons)).listen(options.port, '0.0.0.0')
ioloop = tornado.ioloop.IOLoop.instance()
if options.debug:
tornado.autoreload.start(ioloop)
# enter the Tornado IO loop
ioloop.start()
示例12: main
def main():
numProcs = inventory.NUM_INDEX_SHARDS + inventory.NUM_DOC_SHARDS + 1
taskID = process.fork_processes(numProcs, max_restarts=0)
port = inventory.BASE_PORT + taskID
if taskID == 0:
app = httpserver.HTTPServer(tornado.web.Application([
(r"/search", Web),
(r"/upload", UploadHandler),
(r"/(.*)", IndexDotHTMLAwareStaticFileHandler, dict(path=SETTINGS['static_path']))
], **SETTINGS))
logging.info("Front end is listening on " + str(port))
else:
if taskID <= inventory.NUM_INDEX_SHARDS:
shardIx = taskID - 1
#data = pickle.load(open("data/index%d.pkl" % (shardIx), "r"))
inverted_path = os.path.join(os.getcwd(),"../assignment5/df_jobs/%d.out" % (shardIx))
logging.info("Inverted file path: %s" % inverted_path)
data = pickle.load(open(inverted_path ,'r'))
idf_path = os.path.join(os.getcwd(), "../assignment5/idf_jobs/0.out")
logIDF = pickle.load(open(idf_path,'r'))
app = httpserver.HTTPServer(web.Application([(r"/index", index.Index, dict(data=data, logIDF=logIDF))]))
logging.info("Index shard %d listening on %d" % (shardIx, port))
else:
shardIx = taskID - inventory.NUM_INDEX_SHARDS - 1
#data = pickle.load(open("data/doc%d.pkl" % (shardIx), "r"))
doc_path = os.path.join(os.getcwd(),"../assignment5/i_df_jobs/%d.out" % (shardIx))
logging.info("Doc Server path %s" % doc_path)
data = pickle.load(open(doc_path, "r"))
app = httpserver.HTTPServer(web.Application([(r"/doc", doc.Doc, dict(data=data))]))
logging.info("Doc shard %d listening on %d" % (shardIx, port))
app.add_sockets(netutil.bind_sockets(port))
IOLoop.current().start()
示例13: fork
def fork():
"""
Fork the processes off, set process titles (master, worker) and return
and ioloop.
Returns an instance of tornado.ioloop.IOLoop
"""
# Set the custom process title of the master
set_process_title()
# Fork and create the ioloop
options.workers = opts.workers()
process.fork_processes(options.workers)
io_loop = ioloop.IOLoop.instance()
# Set the custom process title of the workers
set_process_title()
return io_loop
示例14: main
def main():
if options.urls_file:
with open(options.urls_file) as f:
lines = [line for line in f.read().splitlines()
if not line.startswith('#')]
if options.url_template:
lines = [options.url_template % line for line in lines]
requests.extend(lines)
if not requests:
sys.exit(0)
if options.multi_processes != -1:
process.fork_processes(options.multi_processes)
bc = BenchClient(requests, options.timeout, options.max_clients,
options.time_len)
bc.bench()
示例15: main
def main():
entries = {}
for urls_file in urls_files:
with open(urls_file) as f:
for line in f.read().splitlines():
if not line.startswith("#"):
for host in options.hosts.split(","):
entry = Entry.make(line, host.strip(), options.retry_times)
entries[entry.url] = entry
if not entries:
sys.exit(0)
if options.multi_processes != -1:
process.fork_processes(options.multi_processes)
bc = Checker(entries.values(), options.timeout, options.max_clients)
bc.check()