本文整理汇总了Python中src.utils.Foo方法的典型用法代码示例。如果您正苦于以下问题:Python utils.Foo方法的具体用法?Python utils.Foo怎么用?Python utils.Foo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类src.utils
的用法示例。
在下文中一共展示了utils.Foo方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_arch_vars
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def get_arch_vars(arch_str):
if arch_str == '': vals = []
else: vals = arch_str.split('_')
ks = ['ver', 'lstm_dim', 'dropout']
# Exp Ver
if len(vals) == 0: vals.append('v0')
# LSTM dimentsions
if len(vals) == 1: vals.append('lstm2048')
# Dropout
if len(vals) == 2: vals.append('noDO')
assert(len(vals) == 3)
vars = utils.Foo()
for k, v in zip(ks, vals):
setattr(vars, k, v)
logging.error('arch_vars: %s', vars)
return vars
示例2: get_arch_vars
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def get_arch_vars(arch_str):
if arch_str == '': vals = []
else: vals = arch_str.split('_')
ks = ['var1', 'var2', 'var3']
ks = ks[:len(vals)]
# Exp Ver.
if len(vals) == 0: ks.append('var1'); vals.append('v0')
# custom arch.
if len(vals) == 1: ks.append('var2'); vals.append('')
# map scape for projection baseline.
if len(vals) == 2: ks.append('var3'); vals.append('fr2')
assert(len(vals) == 3)
vars = utils.Foo()
for k, v in zip(ks, vals):
setattr(vars, k, v)
logging.error('arch_vars: %s', vars)
return vars
示例3: _write_map_files
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def _write_map_files(b_in, b_out, transform):
cats = get_categories()
env = utils.Foo(padding=10, resolution=5, num_point_threshold=2,
valid_min=-10, valid_max=200, n_samples_per_face=200)
robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120,
camera_elevation_degree=-15)
building_loader = factory.get_dataset('sbpd')
for flip in [False, True]:
b = nav_env.Building(b_out, robot, env, flip=flip,
building_loader=building_loader)
logging.info("building_in: %s, building_out: %s, transform: %d", b_in,
b_out, transform)
maps = _get_semantic_maps(b_in, transform, b.map, flip, cats)
maps = np.transpose(np.array(maps), axes=[1,2,0])
# Load file from the cache.
file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl'
file_name = file_name.format(b.building_name, b.map.size[0], b.map.size[1],
b.map.origin[0], b.map.origin[1],
b.map.resolution, flip)
out_file = os.path.join(DATA_DIR, 'processing', 'class-maps', file_name)
logging.info('Writing semantic maps to %s.', out_file)
save_variables(out_file, [maps, cats], ['maps', 'cats'], overwrite=True)
示例4: get_default_summary_ops
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def get_default_summary_ops():
return utils.Foo(summary_ops=None, print_summary_ops=None,
additional_return_ops=[], arop_summary_iters=[],
arop_eval_fns=[])
示例5: adjust_args_for_mode
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def adjust_args_for_mode(args, mode):
if mode == 'train':
args.control.train = True
elif mode == 'val1':
# Same settings as for training, to make sure nothing wonky is happening
# there.
args.control.test = True
args.control.test_mode = 'val'
args.navtask.task_params.batch_size = 32
elif mode == 'val2':
# No data augmentation, not sampling but taking the argmax action, not
# sampling from the ground truth at all.
args.control.test = True
args.arch.action_sample_type = 'argmax'
args.arch.sample_gt_prob_type = 'zero'
args.navtask.task_params.data_augment = \
utils.Foo(lr_flip=0, delta_angle=0, delta_xy=0, relight=False,
relight_fast=False, structured=False)
args.control.test_mode = 'val'
args.navtask.task_params.batch_size = 32
elif mode == 'bench':
# Actually testing the agent in settings that are kept same between
# different runs.
args.navtask.task_params.batch_size = 16
args.control.test = True
args.arch.action_sample_type = 'argmax'
args.arch.sample_gt_prob_type = 'zero'
args.navtask.task_params.data_augment = \
utils.Foo(lr_flip=0, delta_angle=0, delta_xy=0, relight=False,
relight_fast=False, structured=False)
args.summary.test_iters = 250
args.control.only_eval_when_done = True
args.control.reset_rng_seed = True
args.control.test_mode = 'test'
else:
logging.fatal('Unknown mode: %s.', mode)
assert(False)
return args
示例6: get_solver_vars
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def get_solver_vars(solver_str):
if solver_str == '': vals = [];
else: vals = solver_str.split('_')
ks = ['clip', 'dlw', 'long', 'typ', 'isdk', 'adam_eps', 'init_lr'];
ks = ks[:len(vals)]
# Gradient clipping or not.
if len(vals) == 0: ks.append('clip'); vals.append('noclip');
# data loss weight.
if len(vals) == 1: ks.append('dlw'); vals.append('dlw20')
# how long to train for.
if len(vals) == 2: ks.append('long'); vals.append('nolong')
# Adam
if len(vals) == 3: ks.append('typ'); vals.append('adam2')
# reg loss wt
if len(vals) == 4: ks.append('rlw'); vals.append('rlw1')
# isd_k
if len(vals) == 5: ks.append('isdk'); vals.append('isdk415') # 415, inflexion at 2.5k.
# adam eps
if len(vals) == 6: ks.append('adam_eps'); vals.append('aeps1en8')
# init lr
if len(vals) == 7: ks.append('init_lr'); vals.append('lr1en3')
assert(len(vals) == 8)
vars = utils.Foo()
for k, v in zip(ks, vals):
setattr(vars, k, v)
logging.error('solver_vars: %s', vars)
return vars
示例7: get_navtask_vars
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def get_navtask_vars(navtask_str):
if navtask_str == '': vals = []
else: vals = navtask_str.split('_')
ks_all = ['dataset_name', 'modality', 'task', 'history', 'max_dist',
'num_steps', 'step_size', 'n_ori', 'aux_views', 'data_aug']
ks = ks_all[:len(vals)]
# All data or not.
if len(vals) == 0: ks.append('dataset_name'); vals.append('sbpd')
# modality
if len(vals) == 1: ks.append('modality'); vals.append('rgb')
# semantic task?
if len(vals) == 2: ks.append('task'); vals.append('r2r')
# number of history frames.
if len(vals) == 3: ks.append('history'); vals.append('h0')
# max steps
if len(vals) == 4: ks.append('max_dist'); vals.append('32')
# num steps
if len(vals) == 5: ks.append('num_steps'); vals.append('40')
# step size
if len(vals) == 6: ks.append('step_size'); vals.append('8')
# n_ori
if len(vals) == 7: ks.append('n_ori'); vals.append('4')
# Auxiliary views.
if len(vals) == 8: ks.append('aux_views'); vals.append('nv0')
# Normal data augmentation as opposed to structured data augmentation (if set
# to straug.
if len(vals) == 9: ks.append('data_aug'); vals.append('straug')
assert(len(vals) == 10)
for i in range(len(ks)):
assert(ks[i] == ks_all[i])
vars = utils.Foo()
for k, v in zip(ks, vals):
setattr(vars, k, v)
logging.error('navtask_vars: %s', vals)
return vars
示例8: get_default_args
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def get_default_args():
summary_args = utils.Foo(display_interval=1, test_iters=26,
arop_full_summary_iters=14)
control_args = utils.Foo(train=False, test=False,
force_batchnorm_is_training_at_test=False,
reset_rng_seed=False, only_eval_when_done=False,
test_mode=None)
return summary_args, control_args
示例9: get_args_for_config
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def get_args_for_config(config_name):
args = utils.Foo()
args.summary, args.control = get_default_args()
exp_name, mode_str = config_name.split('+')
arch_str, solver_str, navtask_str = exp_name.split('.')
logging.error('config_name: %s', config_name)
logging.error('arch_str: %s', arch_str)
logging.error('navtask_str: %s', navtask_str)
logging.error('solver_str: %s', solver_str)
logging.error('mode_str: %s', mode_str)
args.solver = cc.process_solver_str(solver_str)
args.navtask = cc.process_navtask_str(navtask_str)
args = process_arch_str(args, arch_str)
args.arch.isd_k = args.solver.isd_k
# Train, test, etc.
mode, imset = mode_str.split('_')
args = cc.adjust_args_for_mode(args, mode)
args.navtask.building_names = args.navtask.dataset.get_split(imset)
args.control.test_name = '{:s}_on_{:s}'.format(mode, imset)
# Log the arguments
logging.error('%s', args)
return args
示例10: get_default_cmp_args
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def get_default_cmp_args():
batch_norm_param = {'center': True, 'scale': True,
'activation_fn':tf.nn.relu}
mapper_arch_args = utils.Foo(
dim_reduce_neurons=64,
fc_neurons=[1024, 1024],
fc_out_size=8,
fc_out_neurons=64,
encoder='resnet_v2_50',
deconv_neurons=[64, 32, 16, 8, 4, 2],
deconv_strides=[2, 2, 2, 2, 2, 2],
deconv_layers_per_block=2,
deconv_kernel_size=4,
fc_dropout=0.5,
combine_type='wt_avg_logits',
batch_norm_param=batch_norm_param)
readout_maps_arch_args = utils.Foo(
num_neurons=[],
strides=[],
kernel_size=None,
layers_per_block=None)
arch_args = utils.Foo(
vin_val_neurons=8, vin_action_neurons=8, vin_ks=3, vin_share_wts=False,
pred_neurons=[64, 64], pred_batch_norm_param=batch_norm_param,
conv_on_value_map=0, fr_neurons=16, fr_ver='v2', fr_inside_neurons=64,
fr_stride=1, crop_remove_each=30, value_crop_size=4,
action_sample_type='sample', action_sample_combine_type='one_or_other',
sample_gt_prob_type='inverse_sigmoid_decay', dagger_sample_bn_false=True,
vin_num_iters=36, isd_k=750., use_agent_loc=False, multi_scale=True,
readout_maps=False, rom_arch=readout_maps_arch_args)
return arch_args, mapper_arch_args
示例11: make_map
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def make_map(padding, resolution, vertex=None, sc=1.):
"""Returns a map structure."""
min_, max_ = _get_xy_bounding_box(vertex*sc, padding=padding)
sz = np.ceil((max_ - min_ + 1) / resolution).astype(np.int32)
max_ = min_ + sz * resolution - 1
map = utils.Foo(origin=min_, size=sz, max=max_, resolution=resolution,
padding=padding)
return map
示例12: reset
# 需要导入模块: from src import utils [as 别名]
# 或者: from src.utils import Foo [as 别名]
def reset(self, rngs):
rng = rngs[0]; rng_perturb = rngs[1];
nodes = self.task.nodes
tp = self.task_params
start_node_ids, goal_node_ids, dists, target_class = \
_nav_env_reset_helper(tp.type, rng, self.task.nodes, tp.batch_size,
self.task.gtG, tp.max_dist, tp.num_steps,
tp.num_goals, tp.data_augment,
**(self.task.reset_kwargs))
start_nodes = [tuple(nodes[_,:]) for _ in start_node_ids]
goal_nodes = [[tuple(nodes[_,:]) for _ in __] for __ in goal_node_ids]
data_augment = tp.data_augment
perturbs = _gen_perturbs(rng_perturb, tp.batch_size,
(tp.num_steps+1)*tp.num_goals,
data_augment.lr_flip, data_augment.delta_angle,
data_augment.delta_xy, data_augment.structured)
perturbs = np.array(perturbs) # batch x steps x 4
end_perturbs = perturbs[:,-(tp.num_goals):,:]*1 # fixed perturb for the goal.
perturbs = perturbs[:,:-(tp.num_goals),:]*1
history = -np.ones((tp.batch_size, tp.num_steps*tp.num_goals), dtype=np.int32)
self.episode = utils.Foo(
start_nodes=start_nodes, start_node_ids=start_node_ids,
goal_nodes=goal_nodes, goal_node_ids=goal_node_ids, dist_to_goal=dists,
perturbs=perturbs, goal_perturbs=end_perturbs, history=history,
target_class=target_class, history_frames=[])
return start_node_ids