本文整理汇总了Python中ray.remote方法的典型用法代码示例。如果您正苦于以下问题:Python ray.remote方法的具体用法?Python ray.remote怎么用?Python ray.remote使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ray
的用法示例。
在下文中一共展示了ray.remote方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def __init__(self, logdir, config_dict=None, **kwargs):
if ray is None:
raise ImportError("No module named 'ray'.")
logger = SafeLifeLogger(logdir, **kwargs)
self.logdir = logdir
self.actor = self.SafeLifeLoggingActor.remote(logger, config_dict)
self._cstats = logger.cumulative_stats.copy()
# _promises stores references to remote updates to cumulative_stats
# that will be received in response to having sent a log item. There
# is no point exposing this state because there is in general no way
# to get up-to-date statistics to any thread, and therefore no benefit
# from knowing whether you're waiting for an update.
self._promises = []
self._last_update = time.time()
示例2: step_walkers
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def step_walkers(self) -> None:
"""
Make the walkers evolve to their next state sampling an action from the \
:class:`Model` and applying it to the :class:`Environment`.
"""
model_states = self.walkers.get("model_states")
env_states = self.walkers.get("env_states")
walkers_states = self.walkers.get("states")
parent_ids = (
copy.deepcopy(self.walkers.get("id_walkers")) if self.tree is not None else None
)
model_states = self.model.predict(
env_states=env_states, model_states=model_states, walkers_states=walkers_states
)
env_states = await self.env.step.remote(model_states=model_states, env_states=env_states)
# env_states = ray.get(step_id)
self.walkers.update_states(
env_states=env_states, model_states=model_states,
)
self.update_tree(parent_ids)
示例3: main
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def main(self):
while True:
flag, data = ray.get(self.ds.pull.remote())
packet_id, mode, datatype = flag.split('_')
if mode == 'STOP':
break
if len(data) > 0:
if mode == 'infer':
if datatype == 'float':
data = np.asarray(data)
results = self.model.predict(data, batch_size=self.batch_size)
self.ds.push.remote(results, packet_id)
elif datatype == 'int8':
data = np.asarray(data)
data = np.float16(data / 255)
results = self.model.predict(data, batch_size=self.batch_size)
self.ds.push.remote(results, packet_id)
else:
raise UserWarning("Invalid datatype flag {}".format(datatype))
else:
raise UserWarning("Invalid mode flag {}".format(mode))
else:
time.sleep(self.wait_time)
示例4: as_remote
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def as_remote(
cls,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
):
# Worker can't use more than 1 gpu, but can also be cpu only
assert num_gpus is None or num_gpus <= 1
return ray.remote(
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
)(cls)
示例5: main_async
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def main_async():
import asyncio
from ray.experimental import async_api
ray.init(num_cpus=4)
remote_worker = Worker.remote()
loop = asyncio.get_event_loop()
t_zero = time.time()
tasks = [
async_api.as_future(remote_worker.sleep.remote(i)) for i in range(1, 3)
]
loop.run_until_complete(asyncio.gather(tasks))
print("delta", time.time() - t_zero)
示例6: test_base_experiment_gpu
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def test_base_experiment_gpu(tmpdir, request):
skip_if_no_gpu(request.config)
tmpdir_path = Path(tmpdir)
ds = MockDataset.load()
MockExperiment(
MockModel,
ds,
data_dir=tmpdir_path / "test",
ray_kwargs={"num_gpus": 1},
ignore_ray_initialized_error=True,
)
# Make sure GPUs are available
# in a mock remote function
# They won't necessarily be available on the master process
@ray.remote(num_gpus=1)
def find_gpus():
return ray.get_gpu_ids()
assert len(ray.get(find_gpus.remote())) > 0
示例7: full_grad
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def full_grad(theta):
theta_id = ray.put(theta)
grad_ids = [grad.remote(theta_id, xs_id, ys_id) for (xs_id, ys_id) in batch_ids]
return sum(ray.get(grad_ids)).astype("float64") # This conversion is necessary for use with fmin_l_bfgs_b.
# From the perspective of scipy.optimize.fmin_l_bfgs_b, full_loss is simply a
# function which takes some parameters theta, and computes a loss. Similarly,
# full_grad is a function which takes some parameters theta, and computes the
# gradient of the loss. Internally, these functions use Ray to distribute the
# computation of the loss and the gradient over the data that is represented
# by the remote object IDs x_batches and y_batches and which is potentially
# distributed over a cluster. However, these details are hidden from
# scipy.optimize.fmin_l_bfgs_b, which simply uses it to run the L-BFGS
# algorithm.
# Load the mnist data and turn the data into remote objects.
示例8: shuffle_pair
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def shuffle_pair(first_batch, second_batch):
"""Shuffle two batches of data.
Args:
first_batch (Tuple[ObjectID. ObjectID]): The first batch to be shuffled. The
first component is the object ID of a batch of images, and the second
component is the object ID of the corresponding batch of labels.
second_batch (Tuple[ObjectID, ObjectID]): The second batch to be shuffled.
The first component is the object ID of a batch of images, and the second
component is the object ID of the corresponding batch of labels.
Returns:
Tuple[ObjectID, ObjectID]: The first batch of shuffled data.
Tuple[ObjectID, ObjectID]: Two second bach of shuffled data.
"""
images1, labels1, images2, labels2 = shuffle_arrays.remote(first_batch[0], first_batch[1], second_batch[0], second_batch[1])
return (images1, labels1), (images2, labels2)
示例9: testComputationGraph
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def testComputationGraph(self):
ray.init(start_ray_local=True, num_workers=1)
@ray.remote
def f(x):
return x
@ray.remote
def g(x, y):
return x, y
a = f.remote(1)
b = f.remote(1)
c = g.remote(a, b)
c = g.remote(a, 1)
# Make sure that we can produce a computation_graph visualization.
ray.visualize_computation_graph(view=False)
ray.worker.cleanup()
示例10: testPythonMode
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def testPythonMode(self):
reload(test_functions)
ray.init(start_ray_local=True, driver_mode=ray.PYTHON_MODE)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
assert_equal(xref, np.ones([3, 4, 5])) # remote functions should return by value
assert_equal(xref, ray.get(xref)) # ray.get should be the identity
y = np.random.normal(size=[11, 12])
assert_equal(y, ray.put(y)) # ray.put should be the identity
# make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = test_functions.python_mode_f.remote()
assert_equal(aref, np.array([0, 0]))
bref = test_functions.python_mode_g.remote(aref)
assert_equal(aref, np.array([0, 0])) # python_mode_g should not mutate aref
assert_equal(bref, np.array([1, 0]))
ray.worker.cleanup()
示例11: testAttachingToCluster
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def testAttachingToCluster(self):
node_ip_address = "127.0.0.1"
scheduler_port = np.random.randint(40000, 50000)
scheduler_address = "{}:{}".format(node_ip_address, scheduler_port)
ray.services.start_scheduler(scheduler_address, cleanup=True)
time.sleep(0.1)
ray.services.start_node(scheduler_address, node_ip_address, num_workers=1, cleanup=True)
ray.init(node_ip_address=node_ip_address, scheduler_address=scheduler_address)
@ray.remote
def f(x):
return x + 1
self.assertEqual(ray.get(f.remote(0)), 1)
ray.worker.cleanup()
示例12: testAttachingToClusterWithMultipleObjectStores
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def testAttachingToClusterWithMultipleObjectStores(self):
node_ip_address = "127.0.0.1"
scheduler_port = np.random.randint(40000, 50000)
scheduler_address = "{}:{}".format(node_ip_address, scheduler_port)
ray.services.start_scheduler(scheduler_address, cleanup=True)
time.sleep(0.1)
ray.services.start_node(scheduler_address, node_ip_address, num_workers=5, cleanup=True)
ray.services.start_node(scheduler_address, node_ip_address, num_workers=5, cleanup=True)
ray.services.start_node(scheduler_address, node_ip_address, num_workers=5, cleanup=True)
ray.init(node_ip_address=node_ip_address, scheduler_address=scheduler_address)
@ray.remote
def f(x):
return x + 1
self.assertEqual(ray.get(f.remote(0)), 1)
ray.worker.cleanup()
示例13: close
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def close(self):
super(RayRunner, self).close()
for process in self.ray_processes.values():
process.stop.remote()
示例14: __push_prompts
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def __push_prompts(self):
try:
item = self.downstream_prompt_queue.get() # timeout=1)
self.downstream_prompt_queue.task_done()
# Todo: Instead, drain the queue and consolidate prompts.
except Empty:
self._print_timecheck(
"timed out getting item from downstream prompt " "queue"
)
if self.has_been_stopped.is_set():
return
else:
# self.print_timecheck("task done on downstream prompt queue")
if item is None or self.has_been_stopped.is_set():
return
elif isinstance(item, PromptToPull):
if item.head_notification_id:
head_notification_id = item.head_notification_id
else:
head_notification_id = self._get_max_notification_id()
prompt = RayPrompt(
self.process_application.name,
self.process_application.pipeline_id,
head_notification_id,
)
else:
prompt = item
# self._print_timecheck('pushing prompt with', prompt.notification_ids)
prompt_response_ids = []
# self.print_timecheck("pushing prompts", prompt)
for downstream_name, ray_process in self.downstream_processes.items():
prompt_response_ids.append(ray_process.prompt.remote(prompt))
if self.has_been_stopped.is_set():
return
# self._print_timecheck("pushed prompt to", downstream_name)
ray.get(prompt_response_ids)
# self._print_timecheck("pushed prompts")
示例15: __call__
# 需要导入模块: import ray [as 别名]
# 或者: from ray import remote [as 别名]
def __call__(self, *args, **kwargs):
ray_id = self.ray_process.call.remote(self.attribute_name, *args, **kwargs)
return_value = ray.get(ray_id)
if isinstance(return_value, ExceptionWrapper):
raise return_value.e
else:
return return_value