本文整理汇总了Python中datasets.nav_env.get_multiplexer_class方法的典型用法代码示例。如果您正苦于以下问题:Python nav_env.get_multiplexer_class方法的具体用法?Python nav_env.get_multiplexer_class怎么用?Python nav_env.get_multiplexer_class使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类datasets.nav_env
的用法示例。
在下文中一共展示了nav_env.get_multiplexer_class方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _compute_hardness
# 需要导入模块: from datasets import nav_env [as 别名]
# 或者: from datasets.nav_env import get_multiplexer_class [as 别名]
def _compute_hardness():
# Load the stanford data to compute the hardness.
if FLAGS.type == '':
args = sna.get_args_for_config(FLAGS.config_name+'+bench_'+FLAGS.imset)
else:
args = sna.get_args_for_config(FLAGS.type+'.'+FLAGS.config_name+'+bench_'+FLAGS.imset)
args.navtask.logdir = None
R = lambda: nav_env.get_multiplexer_class(args.navtask, 0)
R = R()
rng_data = [np.random.RandomState(0), np.random.RandomState(0)]
# Sample a room.
h_dists = []
gt_dists = []
for i in range(250):
e = R.sample_env(rng_data)
nodes = e.task.nodes
# Initialize the agent.
init_env_state = e.reset(rng_data)
gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s]
for j, s in enumerate(e.episode.start_node_ids)]
for j in range(args.navtask.task_params.batch_size):
start_node_id = e.episode.start_node_ids[j]
end_node_id =e.episode.goal_node_ids[0][j]
h_dist = graph_utils.heuristic_fn_vec(
nodes[[start_node_id],:], nodes[[end_node_id], :],
n_ori=args.navtask.task_params.n_ori,
step_size=args.navtask.task_params.step_size)[0][0]
gt_dist = e.episode.dist_to_goal[0][j][start_node_id]
h_dists.append(h_dist)
gt_dists.append(gt_dist)
h_dists = np.array(h_dists)
gt_dists = np.array(gt_dists)
e = R.sample_env([np.random.RandomState(0), np.random.RandomState(0)])
input = e.get_common_data()
orig_maps = input['orig_maps'][0,0,:,:,0]
return h_dists, gt_dists, orig_maps
示例2: _train
# 需要导入模块: from datasets import nav_env [as 别名]
# 或者: from datasets.nav_env import get_multiplexer_class [as 别名]
def _train(args):
container_name = ""
R = lambda: nav_env.get_multiplexer_class(args.navtask, args.solver.task)
m = utils.Foo()
m.tf_graph = tf.Graph()
config = tf.ConfigProto()
config.device_count['GPU'] = 1
with m.tf_graph.as_default():
with tf.device(tf.train.replica_device_setter(args.solver.ps_tasks,
merge_devices=True)):
with tf.container(container_name):
m = args.setup_to_run(m, args, is_training=True,
batch_norm_is_training=True, summary_mode='train')
train_step_kwargs = args.setup_train_step_kwargs(
m, R(), os.path.join(args.logdir, 'train'), rng_seed=args.solver.task,
is_chief=args.solver.task==0,
num_steps=args.navtask.task_params.num_steps*args.navtask.task_params.num_goals, iters=1,
train_display_interval=args.summary.display_interval,
dagger_sample_bn_false=args.arch.dagger_sample_bn_false)
delay_start = (args.solver.task*(args.solver.task+1))/2 * FLAGS.delay_start_iters
logging.error('delaying start for task %d by %d steps.',
args.solver.task, delay_start)
additional_args = {}
final_loss = slim.learning.train(
train_op=m.train_op,
logdir=args.logdir,
master=args.solver.master,
is_chief=args.solver.task == 0,
number_of_steps=args.solver.max_steps,
train_step_fn=tf_utils.train_step_custom_online_sampling,
train_step_kwargs=train_step_kwargs,
global_step=m.global_step_op,
init_op=m.init_op,
init_fn=m.init_fn,
sync_optimizer=m.sync_optimizer,
saver=m.saver_op,
startup_delay_steps=delay_start,
summary_op=None, session_config=config, **additional_args)
示例3: _compute_hardness
# 需要导入模块: from datasets import nav_env [as 别名]
# 或者: from datasets.nav_env import get_multiplexer_class [as 别名]
def _compute_hardness():
# Load the stanford data to compute the hardness.
if FLAGS.type == '':
args = sna.get_args_for_config(FLAGS.config_name+'+bench_'+FLAGS.imset)
else:
args = sna.get_args_for_config(FLAGS.type+'.'+FLAGS.config_name+'+bench_'+FLAGS.imset)
args.navtask.logdir = None
R = lambda: nav_env.get_multiplexer_class(args.navtask, 0)
R = R()
rng_data = [np.random.RandomState(0), np.random.RandomState(0)]
# Sample a room.
h_dists = []
gt_dists = []
for i in range(250):
e = R.sample_env(rng_data)
nodes = e.task.nodes
# Initialize the agent.
init_env_state = e.reset(rng_data)
gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s]
for j, s in enumerate(e.episode.start_node_ids)]
for j in range(args.navtask.task_params.batch_size):
start_node_id = e.episode.start_node_ids[j]
end_node_id =e.episode.goal_node_ids[0][j]
h_dist = graph_utils.heuristic_fn_vec(
nodes[[start_node_id],:], nodes[[end_node_id], :],
n_ori=args.navtask.task_params.n_ori,
step_size=args.navtask.task_params.step_size)[0][0]
gt_dist = e.episode.dist_to_goal[0][j][start_node_id]
h_dists.append(h_dist)
gt_dists.append(gt_dist)
h_dists = np.array(h_dists)
gt_dists = np.array(gt_dists)
e = R.sample_env([np.random.RandomState(0), np.random.RandomState(0)])
input = e.get_common_data()
orig_maps = input['orig_maps'][0,0,:,:,0]
return h_dists, gt_dists, orig_maps