当前位置: 首页>>代码示例>>Python>>正文


Python worker.Worker方法代码示例

本文整理汇总了Python中worker.Worker方法的典型用法代码示例。如果您正苦于以下问题:Python worker.Worker方法的具体用法?Python worker.Worker怎么用?Python worker.Worker使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在worker的用法示例。


在下文中一共展示了worker.Worker方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testCrawlContainerKafka2

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def testCrawlContainerKafka2(self):
        emitters = EmittersManager(urls=['kafka://localhost:9092/test'])
        crawler = ContainersCrawler(
            features=['os', 'process'],
            user_list=self.container['Id'])
        worker = Worker(emitters=emitters, frequency=-1,
                        crawler=crawler)
        worker.iterate()
        kafka = pykafka.KafkaClient(hosts='localhost:9092')
        topic = kafka.topics['test']
        consumer = topic.get_simple_consumer()
        message = consumer.consume()
        assert '"cmd":"/bin/sleep 60"' in message.value

        for i in range(1, 5):
            worker.iterate()
            message = consumer.consume()
            assert '"cmd":"/bin/sleep 60"' in message.value 
开发者ID:cloudviz,项目名称:agentless-system-crawler,代码行数:20,代码来源:test_functional_containers_crawler.py

示例2: make_worker

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def make_worker(env_producer, i, q, w_in_queue):
    return Worker(env_producer, i, q, w_in_queue) 
开发者ID:jet-black,项目名称:ppo-lstm-parallel,代码行数:4,代码来源:master.py

示例3: main

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def main():
  '''Example of A3C running on MountainCar environment'''
  tf.reset_default_graph()

  history = []

  with tf.device('/{}:0'.format(DEVICE)):
    sess = tf.Session()
    global_model = ac_net.AC_Net(
        STATE_SIZE,
        ACTION_SIZE,
        LEARNING_RATE,
        'global',
        n_h1=N_H1,
        n_h2=N_H2)
    workers = []
    for i in xrange(NUM_WORKERS):
      env = gym.make(ENV_NAME)
      env._max_episode_steps = MAX_STEPS
      workers.append(worker.Worker(env,
                                   state_size=STATE_SIZE, action_size=ACTION_SIZE,
                                   worker_name='worker_{}'.format(i), global_name='global',
                                   lr=LEARNING_RATE, gamma=GAMMA, t_max=T_MAX, sess=sess,
                                   history=history, n_h1=N_H1, n_h2=N_H2, logdir=LOG_DIR))

    sess.run(tf.global_variables_initializer())

    for workeri in workers:
      worker_work = lambda: workeri.work(NUM_EPISODES)
      thread = threading.Thread(target=worker_work)
      thread.start() 
开发者ID:yrlu,项目名称:reinforcement_learning,代码行数:33,代码来源:mountaincar_a3c.py

示例4: main

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def main():
  '''Example of A3C running on Cartpole environment'''
  tf.reset_default_graph()

  history = []

  with tf.device('/{}:0'.format(DEVICE)):
    sess = tf.Session()
    global_model = ac_net.AC_Net(
        STATE_SIZE,
        ACTION_SIZE,
        LEARNING_RATE,
        'global',
        n_h1=N_H1,
        n_h2=N_H2)
    workers = []
    for i in xrange(NUM_WORKERS):
      env = gym.make('CartPole-v0')
      env._max_episode_steps = 200
      workers.append(worker.Worker(env,
                                   state_size=STATE_SIZE, action_size=ACTION_SIZE,
                                   worker_name='worker_{}'.format(i), global_name='global',
                                   lr=LEARNING_RATE, gamma=GAMMA, t_max=T_MAX, sess=sess,
                                   history=history, n_h1=N_H1, n_h2=N_H2, logdir=LOG_DIR))

    sess.run(tf.global_variables_initializer())

    for workeri in workers:
      worker_work = lambda: workeri.work(NUM_EPISODES)
      thread = threading.Thread(target=worker_work)
      thread.start() 
开发者ID:yrlu,项目名称:reinforcement_learning,代码行数:33,代码来源:cartpole_a3c.py

示例5: main

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def main():
  '''Example of A3C running on Acrobot environment'''
  tf.reset_default_graph()

  history = []

  with tf.device('/{}:0'.format(DEVICE)):
    sess = tf.Session()
    global_model = ac_net.AC_Net(
        STATE_SIZE,
        ACTION_SIZE,
        LEARNING_RATE,
        'global',
        n_h1=N_H1,
        n_h2=N_H2)
    workers = []
    for i in xrange(NUM_WORKERS):
      env = gym.make('Acrobot-v1')
      env._max_episode_steps = 3000
      workers.append(worker.Worker(env,
                                   state_size=STATE_SIZE, action_size=ACTION_SIZE,
                                   worker_name='worker_{}'.format(i), global_name='global',
                                   lr=LEARNING_RATE, gamma=GAMMA, t_max=T_MAX, sess=sess,
                                   history=history, n_h1=N_H1, n_h2=N_H2, logdir=LOG_DIR))

    sess.run(tf.global_variables_initializer())

    for workeri in workers:
      worker_work = lambda: workeri.work(NUM_EPISODES)
      thread = threading.Thread(target=worker_work)
      thread.start() 
开发者ID:yrlu,项目名称:reinforcement_learning,代码行数:33,代码来源:acrobot_a3c.py

示例6: testLinkUnlinkContainer

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def testLinkUnlinkContainer(self):
        docker_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace,
                                  self.container_name, 'docker.log')
        messages_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace,
                                    self.container_name, 'var/log/messages')
        crawler = DockerContainersLogsLinker(
            environment='cloudsight',
            user_list='ALL',
            host_namespace=self.host_namespace)
        worker = Worker(crawler=crawler)

        self.startContainer()
        worker.iterate()
        with open(docker_log, 'r') as log:
            assert 'hi' in log.read()
        with open(messages_log, 'r') as log:
            assert 'hi' in log.read()
        assert os.path.exists(docker_log)
        assert os.path.exists(messages_log)
        assert os.path.islink(docker_log)
        assert os.path.islink(messages_log)

        self.removeContainer()
        worker.iterate()
        assert not os.path.exists(docker_log)
        assert not os.path.exists(messages_log)
        assert not os.path.islink(docker_log)
        assert not os.path.islink(messages_log)

        self.startContainer()
        worker.iterate()
        assert os.path.exists(docker_log)
        with open(docker_log, 'r') as log:
            assert 'hi' in log.read()
        with open(messages_log, 'r') as log:
            assert 'hi' in log.read()
        assert os.path.exists(messages_log)
        assert os.path.islink(docker_log)
        assert os.path.islink(messages_log)

        self.removeContainer() 
开发者ID:cloudviz,项目名称:agentless-system-crawler,代码行数:43,代码来源:test_functional_logs_linker.py

示例7: start

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def start(cls):
        cls.init()
        # 开启任务分发线程
        Distributor(FETCH_MID_FROM, FETCH_MID_TO + 1).start()
        # 开启爬虫线程
        for i in range(0, THREADS_NUM):
            Worker(f'Worker-{i}').start() 
开发者ID:cwjokaka,项目名称:bilibili_member_crawler,代码行数:9,代码来源:bilibili_member_crawler.py

示例8: main

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    print('state size:', num_inputs)
    print('action size:', num_actions)

    online_net = QNet(num_inputs, num_actions)
    target_net = QNet(num_inputs, num_actions)
    target_net.load_state_dict(online_net.state_dict())
    online_net.share_memory()
    target_net.share_memory()

    optimizer = SharedAdam(online_net.parameters(), lr=lr)
    global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()

    writer = SummaryWriter('logs')

    online_net.to(device)
    target_net.to(device)
    online_net.train()
    target_net.train()

    workers = [Worker(online_net, target_net, optimizer, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())]
    [w.start() for w in workers]
    res = []
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
            [ep, ep_r, loss] = r
            writer.add_scalar('log/score', float(ep_r), ep)
            writer.add_scalar('log/loss', float(loss), ep)
        else:
            break
    [w.join() for w in workers] 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:41,代码来源:train.py

示例9: main

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    env.close()

    global_model = Model(num_inputs, num_actions)
    global_average_model = Model(num_inputs, num_actions)
    global_model.share_memory()
    global_average_model.share_memory()
    global_optimizer = SharedAdam(global_model.parameters(), lr=lr)
    global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()

    writer = SummaryWriter('logs')

    n = mp.cpu_count()
    workers = [Worker(global_model, global_average_model, global_optimizer, global_ep, global_ep_r, res_queue, i) for i in range(n)]
    [w.start() for w in workers]
    res = []
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
            [ep, ep_r, loss] = r
            writer.add_scalar('log/score', float(ep_r), ep)
            writer.add_scalar('log/loss', float(loss), ep)
        else:
            break
    [w.join() for w in workers] 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:34,代码来源:train.py

示例10: main

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    global_model = Model(num_inputs, num_actions)
    global_model.share_memory()
    global_optimizer = SharedAdam(global_model.parameters(), lr=lr)
    global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()

    writer = SummaryWriter('logs')

    workers = [Worker(global_model, global_optimizer, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())]
    [w.start() for w in workers]
    res = []
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
            [ep, ep_r, loss] = r
            writer.add_scalar('log/score', float(ep_r), ep)
            writer.add_scalar('log/loss', float(loss), ep)
        else:
            break
    [w.join() for w in workers] 
开发者ID:g6ling,项目名称:Reinforcement-Learning-Pytorch-Cartpole,代码行数:29,代码来源:train.py

示例11: main

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def main(args):
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    summary_writer = tf.summary.FileWriter(os.path.join(args.save_path, 'log'))
    global_steps_counter = itertools.count()  # thread-safe

    global_net = Net(S_DIM, A_DIM, 'global', args)
    num_workers = args.threads
    workers = []

    # create workers
    for i in range(1, num_workers + 1):
        worker_summary_writer = summary_writer if i == 0 else None
        worker = Worker(i, make_env(args), global_steps_counter,
                        worker_summary_writer, args)
        workers.append(worker)

    saver = tf.train.Saver(max_to_keep=5)

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        if args.model_path is not None:
            print('Loading model...\n')
            ckpt = tf.train.get_checkpoint_state(args.model_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('Initializing a new model...\n')
            sess.run(tf.global_variables_initializer())
        print_params_nums()
        # Start work process for each worker in a separated thread
        worker_threads = []
        for worker in workers:
            t = threading.Thread(target=lambda: worker.run(sess, coord, saver))
            t.start()
            time.sleep(0.5)
            worker_threads.append(t)

        if args.eval_every > 0:
            evaluator = Evaluate(
                global_net, summary_writer, global_steps_counter, args)
            evaluate_thread = threading.Thread(
                target=lambda: evaluator.run(sess, coord))
            evaluate_thread.start()

        coord.join(worker_threads) 
开发者ID:borgwang,项目名称:reinforce_py,代码行数:48,代码来源:train_A3C.py

示例12: main

# 需要导入模块: import worker [as 别名]
# 或者: from worker import Worker [as 别名]
def main(args):
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    tf.reset_default_graph()

    global_ep = tf.Variable(
        0, dtype=tf.int32, name='global_ep', trainable=False)
    
    env = Doom(visiable=False)
    Net(env.state_dim, env.action_dim, 'global', None)
    num_workers = args.parallel
    workers = []

    # create workers
    for i in range(num_workers):
        w = Worker(i, Doom(), global_ep, args)
        workers.append(w)

    print('%d workers in total.\n' % num_workers)
    saver = tf.train.Saver(max_to_keep=3)

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        if args.model_path is not None:
            print('Loading model...')
            ckpt = tf.train.get_checkpoint_state(args.model_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('Initializing a new model...')
            sess.run(tf.global_variables_initializer())
        print_net_params_number()

        # Start work process for each worker in a separated thread
        worker_threads = []
        for w in workers:
            run_fn = lambda: w.run(sess, coord, saver)
            t = threading.Thread(target=run_fn)
            t.start()
            time.sleep(0.5)
            worker_threads.append(t)
        coord.join(worker_threads) 
开发者ID:borgwang,项目名称:reinforce_py,代码行数:44,代码来源:train_A3C.py


注:本文中的worker.Worker方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。