本文整理汇总了Python中worker.Worker.run方法的典型用法代码示例。如果您正苦于以下问题:Python Worker.run方法的具体用法?Python Worker.run怎么用?Python Worker.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类worker.Worker
的用法示例。
在下文中一共展示了Worker.run方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def run(self):
# set thread ID
self.tid = self._Thread__ident
try:
Worker.run(self)
finally:
# invalidate thread ID
self.tid = None
示例2: main
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def main():
url, max_workers, retry = parse_args()
enable_pretty_logging()
connection = Connection.use_connection(url or read_url())
worker = Worker(connection, max_workers, retry)
try:
worker.run()
except KeyboardInterrupt:
worker.stop()
worker.report()
示例3: spawn_worker
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def spawn_worker(self):
sleep(0.1)
worker = Worker(self.pid, self.app.server, self.app.args)
pid = os.fork()
if pid != 0:
# parent process
self.workers[pid] = worker
return pid
# child process
try:
worker.run()
sys.exit(0)
except Exception as e:
print e
finally:
worker.stop(self.app.args.grace)
示例4: spawn_worker
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def spawn_worker(self):
sleep(0.1)
worker = Worker(self.pid, self.app.server, self.app.args)
pid = os.fork()
if pid != 0:
# parent process
self.workers[pid] = worker
return pid
# child process
try:
worker.init_worker()
worker.run()
sys.exit(0)
except Exception as e:
self.logger.exception('Exception: %s', e)
finally:
worker.stop()
示例5: main
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def main(args):
if args.save_path is not None and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
summary_writer = tf.summary.FileWriter(os.path.join(args.save_path, 'log'))
global_steps_counter = itertools.count() # thread-safe
global_net = Net(S_DIM, A_DIM, 'global', args)
num_workers = args.threads
workers = []
# create workers
for i in range(1, num_workers + 1):
worker_summary_writer = summary_writer if i == 0 else None
worker = Worker(i, make_env(args), global_steps_counter,
worker_summary_writer, args)
workers.append(worker)
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if args.model_path is not None:
print('Loading model...\n')
ckpt = tf.train.get_checkpoint_state(args.model_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Initializing a new model...\n')
sess.run(tf.global_variables_initializer())
print_params_nums()
# Start work process for each worker in a seperated thread
worker_threads = []
for worker in workers:
t = threading.Thread(target=lambda: worker.run(sess, coord, saver))
t.start()
time.sleep(0.5)
worker_threads.append(t)
if args.eval_every > 0:
evaluator = Evaluate(
global_net, summary_writer, global_steps_counter, args)
evaluate_thread = threading.Thread(
target=lambda: evaluator.run(sess, coord))
evaluate_thread.start()
coord.join(worker_threads)
示例6: Worker
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
#!/usr/bin/env python
# Encoding: utf-8
# -----------------------------------------------------------------------------
# Project : Time Reader
# -----------------------------------------------------------------------------
# Author : Edouard Richard <[email protected]>
# -----------------------------------------------------------------------------
# License : GNU Lesser General Public License
# -----------------------------------------------------------------------------
# Creation : 08-Jul-2013
# Last mod : 08-Jul-2013
# -----------------------------------------------------------------------------
from worker import Worker
import sys
worker = Worker(async=False)
if __name__ == "__main__":
worker.run(*sys.argv[1:])
# EOF
示例7: plugin
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
'container. "ALL" will crawl all namespaces including the host '
'itself. This option is only valid for INVM crawl mode. Example: '
'--crawlContainers 5f3380d2319e,681be3e32661',
)
parser.add_argument(
'--environment',
dest='environment',
type=str,
default='cloudsight',
help='This speficies some environment specific behavior, like how '
'to name a container. The way to add a new behavior is by '
'implementing a plugin (see plugins/cloudsight_environment.py '
'as an example. Defaults to "cloudsight".',
)
misc.setup_logger('crawlutils', 'linker.log')
misc.setup_logger('yapsy', 'yapsy.log')
args = parser.parse_args()
crawler = DockerContainersLogsLinker(environment=args.environment,
user_list=args.crawlContainers,
host_namespace=args.namespace)
worker = Worker(emitters=None,
frequency=args.frequency,
crawler=crawler)
try:
worker.run()
except KeyboardInterrupt:
pass
示例8: main
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def main():
euid = os.geteuid()
if euid != 0:
print 'Need to run this as root.'
exit(1)
parser = argparse.ArgumentParser()
parser.add_argument(
'--options',
dest='options',
type=json_parser,
default={},
help='JSON dict of crawler options used to be passed as arguments'
'to the crawler plugins.'
)
parser.add_argument(
'--url',
dest='url',
type=csv_list,
default=['stdout://'],
help='Send the snapshot data to URL. Defaults to the console.',
)
parser.add_argument(
'--namespace',
dest='namespace',
type=str,
nargs='?',
default=misc.get_host_ipaddr(),
help='Data source this crawler is associated with. Defaults to '
'/localhost',
)
parser.add_argument(
'--features',
dest='features',
type=csv_list,
default=['os', 'cpu'],
help='Comma-separated list of feature-types to crawl. Defaults to '
'os,cpu',
)
parser.add_argument(
'--frequency',
dest='frequency',
type=int,
default=-1,
help='Target time period for iterations. Defaults to -1 which '
'means only run one iteration.'
)
parser.add_argument(
'--compress',
dest='compress',
action='store_true',
default=False,
help='Whether to GZIP-compress the output frame data, must be one of '
'{true,false}. Defaults to false',
)
parser.add_argument('--logfile', dest='logfile', type=str,
default='crawler.log',
help='Logfile path. Defaults to crawler.log'
)
parser.add_argument(
'--crawlmode',
dest='crawlmode',
type=str,
choices=[
Modes.INVM,
Modes.OUTVM,
Modes.MOUNTPOINT,
Modes.OUTCONTAINER,
Modes.MESOS,
],
default=Modes.INVM,
help='The crawler mode: '
'{INVM,OUTVM,MOUNTPOINT,OUTCONTAINER}. '
'Defaults to INVM',
)
parser.add_argument(
'--mountpoint',
dest='mountpoint',
type=str,
default='/',
help='Mountpoint location used as the / for features like packages,'
'files, config'
)
parser.add_argument(
'--format',
dest='format',
type=str,
default='csv',
choices=['csv', 'graphite', 'json'],
help='Emitted data format.',
)
parser.add_argument(
'--crawlContainers',
dest='crawlContainers',
type=str,
nargs='?',
default='ALL',
help='List of containers to crawl as a list of Docker container IDs'
'(only Docker is supported at the moment). ' 'Defaults to all '
#.........这里部分代码省略.........
示例9: smooth
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
policy_net=policy_net,
value_net=value_net,
global_counter=global_counter,
returns_list=returns_list,
discount_factor = 0.99,
max_global_steps=MAX_GLOBAL_STEPS)
workers.append(worker)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
# Start worker threads
worker_threads = []
for worker in workers:
worker_fn = lambda: worker.run(sess, coord, STEPS_PER_UPDATE)
t = threading.Thread(target=worker_fn)
t.start()
worker_threads.append(t)
# Wait for all workers to finish
coord.join(worker_threads, stop_grace_period_secs=300)
# Plot the smoothed returns
x = np.array(returns_list)
y = smooth(x)
plt.plot(x, label='orig')
plt.plot(y, label='smoothed')
plt.legend()
plt.show()
示例10: PolicyMonitor
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
pe = PolicyMonitor(
env=make_env(wrap=False),
policy_net=policy_net,
summary_writer=summary_writer,
saver=saver)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
# Load a previous checkpoint if it exists
latest_checkpoint = tf.train.latest_checkpoint(CHECKPOINT_DIR)
if latest_checkpoint:
print("Loading model checkpoint: {}".format(latest_checkpoint))
saver.restore(sess, latest_checkpoint)
# Start worker threads
worker_threads = []
for worker in workers:
worker_fn = lambda worker=worker: worker.run(sess, coord, FLAGS.t_max)
t = threading.Thread(target=worker_fn)
t.start()
worker_threads.append(t)
# Start a thread for policy eval task
monitor_thread = threading.Thread(target=lambda: pe.continuous_eval(FLAGS.eval_every, sess, coord))
monitor_thread.start()
# Wait for all workers to finish
coord.join(worker_threads)
示例11: serve
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def serve(port, profile, daemon):
app = make_app(profile=profile)
from worker import Worker
worker = Worker(port=port, daemon=daemon, app=app)
worker.run()
示例12: MyForm
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
#.........这里部分代码省略.........
self.ui.taskslists[8].setDate("thisweek")
self.ui.taskslists[8].week=getWeekNr()
self.ui.lineeditlist[8].setDate("thisweek")
tasks=self.db.getForWeek(getWeekNr())
assigned=[]
for j in tasks:
n=Task(j[1],j[0],j[6])
if j[2]!="thisweek":
assigned.append(n)
n.setFlags(QtCore.Qt.ItemFlags())
else:
self.ui.taskslists[8].addItem(n)
for i in assigned:
self.ui.taskslists[8].addItem(i)
def moveTask(self,item,date,week):
self.db.moveForDate(item.itemid, date, week)
if date!="thisweek":
self.loadThisWeek()
def editTask(self,item):
name=str(item.text()).strip()
ifnew=self.db.checkIfNew(name)
if not ifnew:
self.db.editTask(item.itemid,name)
self.loadThisWeek()
else:
old=self.db.getForId(item.itemid)
item.setText(str(old[0][1]))
msg=QtGui.QMessageBox(self)
msg.setWindowTitle("Error")
msg.setText("Task already exists. It's marked due date: %s"%str(ifnew[2]))
msg.show()
def createNewTask(self,name,tdate,due_week):
name=str(name).strip()
ifnew=self.db.checkIfNew(name)
if not ifnew:
newid=self.db.createTask(name,tdate,due_week)
for i in self.ui.taskslists:
if i.date==tdate:
i.addItem(Task(name,newid))
if due_week==getWeekNr():self.loadThisWeek()
else:
msg=QtGui.QMessageBox(self)
msg.setWindowTitle("Error")
msg.setText("Task already exists. It's marked due date: %s"%str(ifnew[2]))
msg.show()
@QtCore.pyqtSlot()
def on_next_clicked(self):
self.v+=1
self.fillWeek()
@QtCore.pyqtSlot()
def on_back_clicked(self):
self.v-=1
self.fillWeek()
@QtCore.pyqtSlot()
def on_next_week_clicked(self):
self.v+=7
self.fillWeek()
@QtCore.pyqtSlot()
def on_back_week_clicked(self):
self.v-=7
self.fillWeek()
@QtCore.pyqtSlot()
def on_bottomPanel_btn_clicked(self):
if self.options['bottomPnlHidden']:
self.options['bottomPnlHidden']=False
self.ui.bottomPnl.show()
else:
self.options['bottomPnlHidden']=True
self.ui.bottomPnl.hide()
def createTask(self,name):
return Task(name)
def taskDone(self,itemid,done):
self.db.setToDone(itemid,done)
print itemid,done
self.loadThisWeek()
self.loadOutDated()
def resortTask(self,widget):
items=[]
pos=[]
disableds=0
for i in range(widget.count()):
if widget.item(i).flags()==QtCore.Qt.ItemFlags():
disableds+=1
else:
items.append(widget.item(i).itemid)
pos.append(i-disableds)
self.worker=Worker(items,pos,self)
self.connect(self.worker,QtCore.SIGNAL("finished"),self.loadThisWeek)
self.worker.run()
def ldropEvent(self,e):
e.accept()
r=e.source().row(e.source().currentItem())
item=e.source().takeItem(r)
self.db.deleteTask(item.itemid)
del(item)
self.loadThisWeek()
def ldragMoveEvent(self,e):
e.accept()
示例13: main
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def main(args):
worker = Worker(args)
return_code = worker.run()
if not isinstance(return_code, return_codes.Success):
print return_code
return return_code.code
示例14: main
# 需要导入模块: from worker import Worker [as 别名]
# 或者: from worker.Worker import run [as 别名]
def main(argv=sys.argv[1:]):
global worker_obj
usage = "%prog [options] start|stop|status|version (defaults to start)"
parser = OptionParser(usage=usage)
parser.add_option("-f", "--foreground", dest="foreground", default=False,
action="store_true",
help="If specified, run worker in the foreground instead of daemonizing")
parser.add_option("-l", "--log-dir", dest="log_dir", default="~/djm/log",
help="Directory to use for log and pid files (defaults to ~/djm/log)")
parser.add_option("--pid-file", dest="pid_file", default=None,
help="File to store pid information (defaults to <log_dir>/worker.pid)")
parser.add_option("--port", default=DEFAULT_WORKER_PORT, type=int,
help="Port to listen for requests (defaults to %d)"%
DEFAULT_WORKER_PORT)
parser.add_option("--restart", default=False, action="store_true",
help="If already running, force a restart to ensure settings are used")
parser.add_option("--log-main", default=False, action="store_true",
help="If specified, log main program details to worker_main.log")
(options, args) = parser.parse_args(argv)
# setup logging
log_dir = os.path.abspath(os.path.expanduser(options.log_dir))
if not os.path.exists(log_dir):
os.makedirs(log_dir)
root_logger = logging.getLogger()
if len(root_logger.handlers)==0:
# we only mess with the root logger if there isn't already
# handlers
root_logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
root_logger.addHandler(console_handler)
if options.log_main:
handler = logging.FileHandler(os.path.join(log_dir, "worker_main.log"))
handler.setLevel(logging.DEBUG)
root_logger.addHandler(handler)
logger.debug("Running worker_main with command line %s" % argv)
if options.pid_file:
pid_file = os.path.abspath(os.path.expanduser(options.pid_file))
else:
pid_file = os.path.join(log_dir, "worker.pid")
if len(args)==0:
cmd = "start"
elif len(args)>1:
parser.error("Too many arguments")
else:
cmd = args[0]
VALID_COMMANDS = ["start", "stop", "status", "version"]
if cmd not in VALID_COMMANDS:
parser.error("Command must be one of %s" % VALID_COMMANDS)
if cmd!="start" and options.restart:
parser.error("--restart option only valid for start command")
if cmd=="stop":
return stop(pid_file)
elif cmd=="status":
pid = get_daemon_pid(pid_file)
if not pid:
return 1
else:
logger.info("Worker process %d is running" % pid)
return 0
elif cmd=="version":
logger.info("%s" % version.VERSION)
return 0
# otherwise we are starting the worker
if os.path.exists(pid_file) and lockfile.FileLock(pid_file).is_locked():
logger.info("Pid file %s is locked - daemon is already running" %
pid_file)
if options.restart:
rc = stop(pid_file)
if rc!=0:
return rc
logger.info("Will now start new worker")
else:
return 0
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if options.foreground:
lock = lockfile.FileLock(pid_file)
with lock:
worker_obj = Worker(log_dir, pid_file,
port=options.port,
running_in_fg=True)
worker_obj.setup()
signal.signal(signal.SIGTERM, terminate)
logger.info("running in Foreground")
return worker_obj.run()
else:
logger.info("Daemonizing, pid file will be at %s, port is %d" % \
(pid_file, options.port))
with daemon.DaemonContext(signal_map=signal_map,
#.........这里部分代码省略.........