本文整理汇总了Python中ray.get方法的典型用法代码示例。如果您正苦于以下问题:Python ray.get方法的具体用法?Python ray.get怎么用?Python ray.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ray
的用法示例。
在下文中一共展示了ray.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gen_exp_name
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def gen_exp_name(model_class, model_kwargs):
"""Generates experiment name from model class and parameters.
:param model_class: (type) the class, one of GaussianMixture, PCAPreDensity or KernelDensity.
:param model_kwargs: (dict) constructor arguments to the class.
:return A string succinctly encoding the class and parameters."""
if model_class == GaussianMixture:
n_components = model_kwargs.get("n_components", 1)
covariance_type = model_kwargs.get("covariance_type", "full")
return f"gmm_{n_components}_components_{covariance_type}"
elif model_class == PCAPreDensity:
if model_kwargs["density_class"] == KernelDensity:
return "pca_kde"
elif model_kwargs["density_class"] == GaussianMixture:
return "pca_gmm"
else:
return "pca_unknown"
elif model_class == KernelDensity:
return "kde"
else:
return "default"
示例2: db_jobs
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def db_jobs(self):
# print("Running do_jobs")
while not self.has_been_stopped.is_set():
try:
item = self.db_jobs_queue.get() # timeout=1)
self.db_jobs_queue.task_done()
except Empty:
if self.has_been_stopped.is_set():
break
else:
if item is None or self.has_been_stopped.is_set():
break
db_job: RayDbJob = item
# self.print_timecheck("Doing db job", item)
try:
db_job.execute()
except Exception as e:
if db_job.error is None:
print(traceback.format_exc())
self._print_timecheck(
"Continuing after error running DB job:", e
)
sleep(1)
# else:
# self.print_timecheck("Done db job", item)
示例3: ray_init
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def ray_init(self):
"""
Connects to a Ray cluster or starts one if none exists.
"""
self.logger.info("Initializing Ray cluster with executor spec:")
for spec_key, value in self.executor_spec.items():
self.logger.info("{}: {}".format(spec_key, value))
# Avoiding accidentally starting local redis clusters.
if 'redis_address' not in self.executor_spec:
self.logger.warning("Warning: No redis address provided, starting local redis server.")
ray.init(
redis_address=self.executor_spec.get('redis_address', None),
num_cpus=self.executor_spec.get('num_cpus', None),
num_gpus=self.executor_spec.get('num_gpus', None)
)
示例4: result_by_worker
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def result_by_worker(self, worker_index=None):
"""
Retrieves full episode-reward time series for a worker by id (or first worker in registry if None).
Args:
worker_index (Optional[int]): Index of worker to fetch.
Returns:
dict: Full results for this worker.
"""
if worker_index is not None:
ray_worker = self.ray_env_sample_workers[worker_index]
else:
# Otherwise just pick first.
ray_worker = self.ray_env_sample_workers[0]
task = ray_worker.get_workload_statistics.remote()
metrics = ray.get(task)
# Return full reward series.
return dict(
episode_rewards=metrics["episode_rewards"],
episode_timesteps=metrics["episode_timesteps"]
)
示例5: get_all_worker_results
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def get_all_worker_results(self):
"""
Retrieves full episode-reward time series for all workers.
Returns:
list: List dicts with worker results (timesteps and rewards)
"""
results = list()
for ray_worker in self.ray_env_sample_workers:
task = ray_worker.get_workload_statistics.remote()
metrics = ray.get(task)
results.append(dict(
episode_rewards=metrics["episode_rewards"],
episode_timesteps=metrics["episode_timesteps"],
episode_total_times=metrics["episode_total_times"],
episode_sample_times=metrics["episode_sample_times"]
))
return results
示例6: test_worker_update
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def test_worker_update(self):
"""
Tests if a worker can update from an external batch correct including all
corrections and postprocessing using the pong spec.
N.b. this test does not use Ray.
"""
ray.init()
agent_config = config_from_path("configs/ray_apex_for_pong.json")
ray_spec = agent_config["execution_spec"].pop("ray_spec")
worker_cls = RayValueWorker.as_remote().remote
ray_spec["worker_spec"]["worker_sample_size"] = 198
ray_spec["worker_spec"]["worker_executes_exploration"] = True
ray_spec["worker_spec"]["ray_exploration"] = 0.4
worker = worker_cls(agent_config, ray_spec["worker_spec"], self.env_spec,)
time.sleep(5)
start = time.perf_counter()
task = worker.execute_and_get_with_count.remote()
result, count = ray.get(task)
task_time = time.perf_counter() - start
print("internal result metrics = {}, external task time = {},"
"external throughput = {}".format(result.get_metrics(), task_time, 198 / task_time))
示例7: __init__
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def __init__(self, logdir, config_dict=None, **kwargs):
if ray is None:
raise ImportError("No module named 'ray'.")
logger = SafeLifeLogger(logdir, **kwargs)
self.logdir = logdir
self.actor = self.SafeLifeLoggingActor.remote(logger, config_dict)
self._cstats = logger.cumulative_stats.copy()
# _promises stores references to remote updates to cumulative_stats
# that will be received in response to having sent a log item. There
# is no point exposing this state because there is in general no way
# to get up-to-date statistics to any thread, and therefore no benefit
# from knowing whether you're waiting for an update.
self._promises = []
self._last_update = time.time()
示例8: step
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def step(self, action):
observation, reward, done, info = self.env.step(action)
if self.record_history and not self._did_log_episode:
game = self.env.game
self._episode_history['board'].append(game.board)
self._episode_history['goals'].append(game.goals)
self._episode_history['orientation'].append(game.orientation)
if done and not self._did_log_episode and self.logger is not None:
self._did_log_episode = True
self.logger.log_episode(
game, info.get('episode', {}),
self._episode_history if self.record_history else None,
self.is_training)
return observation, reward, done, info
示例9: step_walkers
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def step_walkers(self) -> None:
"""
Make the walkers evolve to their next state sampling an action from the \
:class:`Model` and applying it to the :class:`Environment`.
"""
model_states = self.walkers.get("model_states")
env_states = self.walkers.get("env_states")
walkers_states = self.walkers.get("states")
parent_ids = (
copy.deepcopy(self.walkers.get("id_walkers")) if self.tree is not None else None
)
model_states = self.model.predict(
env_states=env_states, model_states=model_states, walkers_states=walkers_states
)
env_states = await self.env.step.remote(model_states=model_states, env_states=env_states)
# env_states = ray.get(step_id)
self.walkers.update_states(
env_states=env_states, model_states=model_states,
)
self.update_tree(parent_ids)
示例10: _ensure_resources
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def _ensure_resources(self, instances):
"""Checks we have enough ray resources to create the request
TODO: This doesn't really work with more than 1 receiver as they create
and check in parallel. In any case ray will not error if we create an
actor without resources it will just wait and not be used until it can
be run
"""
available = ray.available_resources()
required = {
"CPU": self._cpus_per_worker,
**self._custom_resources_per_worker
}
required = {key: val * instances for key, val in required.items()}
if not np.all(
[available.get(key, 0) >= required[key] for key in required]):
raise RuntimeError(
"Not enough RAY resources to start the acting pool. "
f"Need: {required} Available: {available}")
示例11: main
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def main(self):
while True:
flag, data = ray.get(self.ds.pull.remote())
packet_id, mode, datatype = flag.split('_')
if mode == 'STOP':
break
if len(data) > 0:
if mode == 'infer':
if datatype == 'float':
data = np.asarray(data)
results = self.model.predict(data, batch_size=self.batch_size)
self.ds.push.remote(results, packet_id)
elif datatype == 'int8':
data = np.asarray(data)
data = np.float16(data / 255)
results = self.model.predict(data, batch_size=self.batch_size)
self.ds.push.remote(results, packet_id)
else:
raise UserWarning("Invalid datatype flag {}".format(datatype))
else:
raise UserWarning("Invalid mode flag {}".format(mode))
else:
time.sleep(self.wait_time)
示例12: get
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def get(self):
st = time()
worker_data = [
self.rollout_queue.get(True) for _ in range(self.num_rollouts)
]
et = time()
self._host_wait_time += et - st
rollouts = []
terminal_rewards = []
terminal_infos = []
for w in worker_data:
r, t, i = w["rollout"], w["terminal_rewards"], w["terminal_infos"]
rollouts.append(r)
terminal_rewards.append(t)
terminal_infos.append(i)
return rollouts, terminal_rewards, terminal_infos
示例13: train
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def train(self):
"""Spawn processes and run training loop."""
print("Spawning and initializing communication...")
# Spawn processes
self._spawn()
# Initialize communication
for proc in self.processes:
proc.init_communication.remote()
# Run main training loop
print("Running main training loop...")
run_procs = [proc.run.remote() for proc in self.processes]
futures = ray.get(run_procs)
# Retreive workers' data and write to wandb
# NOTE: Logger logs the mean scores of each episode per update step
if self.args.log:
worker_logs = [f for f in futures if f is not None]
self.logger.write_worker_log.remote(worker_logs)
print("Exiting training...")
示例14: test_base_experiment_gpu
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def test_base_experiment_gpu(tmpdir, request):
skip_if_no_gpu(request.config)
tmpdir_path = Path(tmpdir)
ds = MockDataset.load()
MockExperiment(
MockModel,
ds,
data_dir=tmpdir_path / "test",
ray_kwargs={"num_gpus": 1},
ignore_ray_initialized_error=True,
)
# Make sure GPUs are available
# in a mock remote function
# They won't necessarily be available on the master process
@ray.remote(num_gpus=1)
def find_gpus():
return ray.get_gpu_ids()
assert len(ray.get(find_gpus.remote())) > 0
示例15: full_grad
# 需要导入模块: import ray [as 别名]
# 或者: from ray import get [as 别名]
def full_grad(theta):
theta_id = ray.put(theta)
grad_ids = [grad.remote(theta_id, xs_id, ys_id) for (xs_id, ys_id) in batch_ids]
return sum(ray.get(grad_ids)).astype("float64") # This conversion is necessary for use with fmin_l_bfgs_b.
# From the perspective of scipy.optimize.fmin_l_bfgs_b, full_loss is simply a
# function which takes some parameters theta, and computes a loss. Similarly,
# full_grad is a function which takes some parameters theta, and computes the
# gradient of the loss. Internally, these functions use Ray to distribute the
# computation of the loss and the gradient over the data that is represented
# by the remote object IDs x_batches and y_batches and which is potentially
# distributed over a cluster. However, these details are hidden from
# scipy.optimize.fmin_l_bfgs_b, which simply uses it to run the L-BFGS
# algorithm.
# Load the mnist data and turn the data into remote objects.