本文整理汇总了Python中attrdict.AttrDict方法的典型用法代码示例。如果您正苦于以下问题:Python attrdict.AttrDict方法的具体用法?Python attrdict.AttrDict怎么用?Python attrdict.AttrDict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类attrdict
的用法示例。
在下文中一共展示了attrdict.AttrDict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params):
super().__init__()
self.train_mode = train_mode
self.loader_params = AttrDict(loader_params)
self.dataset_params = AttrDict(dataset_params)
self.augmentation_params = AttrDict(augmentation_params)
self.mask_transform = None
self.image_transform = None
self.image_augment_train = None
self.image_augment_inference = None
self.image_augment_with_target_train = None
self.image_augment_with_target_inference = None
self.dataset = None
示例2: transform
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def transform(self, X):
try:
res = []
for idx, row in tqdm(X.iterrows(), total=len(X)):
res.append(self.tokenizer.tokenize(**row)[1:])
res = pd.DataFrame(res, columns=['tokens', 'pronoun_offset_token',
'a_offset_token', 'b_offset_token', 'a_span',
'b_span', 'pronoun_token', 'a_tokens', 'b_tokens'])
cols = set(X.columns).difference(res.columns)
X = pd.concat([X[cols], res], axis=1)
return AttrDict({'X': X})
except Exception as e:
print(row.text)
raise e
示例3: example_to_debug
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def example_to_debug(self, X, idx):
ex = AttrDict(X['X'].to_dict(orient='records')[idx])
text = ex.text
text = '{}<A>{}'.format(text[:ex.a_offset], text[ex.a_offset:])
text = '{}<B>{}'.format(text[:ex.b_offset+3], text[ex.b_offset+3:])
offset = ex.pronoun_offset
if ex.pronoun_offset > ex.a_offset:
offset += 3
if ex.pronoun_offset > ex.b_offset:
offset += 3
text = '{}<P>{}'.format(text[:offset], text[offset:])
ex.a_offset = text.index('<A>')
ex.b_offset = text.index('<B>')
ex.pronoun_offset = text.index('<P>')
ex.text = text
return ex
示例4: __init__
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def __init__(self):
self.font_width = 7
self.lane = AttrDict({
"xs": 20, # tmpgraphlane0.width
"ys": 20, # tmpgraphlane0.height
"xg": 120, # tmpgraphlane0.x
"yg": 0, # head gap
"yh0": 0, # head gap title
"yh1": 0, # head gap
"yf0": 0, # foot gap
"yf1": 0, # foot gap
"y0": 5, # tmpgraphlane0.y
"yo": 30, # tmpgraphlane1.y - y0
"tgo": -10, # tmptextlane0.x - xg
"ym": 15, # tmptextlane0.y - y0
"xlabel": 6, # tmptextlabel.x - xg
"xmax": 1,
"scale": 1,
"head": {},
"foot": {}
})
示例5: exec
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def exec(self, cmd: str, quiet=False, supress_error=False) -> AttrDict:
ret = AttrDict()
if not quiet:
self.log.debug(cmd)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
# output processing
out = p.stdout.read().decode().strip()
ret.out = out
err = p.stderr.read().decode().strip()
ret.err = err
output = '{} <output> {}'.format(cmd, out if out else 'Nothing')
if err and not supress_error:
output += ' <error> ' + err
if not quiet:
self.log.debug(output)
return ret
示例6: load
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def load(config, **unused_kwargs):
del unused_kwargs
if not os.path.exists(config.data_folder):
os.makedirs(config.data_folder)
dataset = input_data.read_data_sets(config.data_folder)
train_data = {'imgs': dataset.train.images, 'labels': dataset.train.labels}
valid_data = {'imgs': dataset.validation.images, 'labels': dataset.validation.labels}
# This function turns a dictionary of numpy.ndarrays into tensors.
train_tensors = tensors_from_data(train_data, config.batch_size, shuffle=True)
valid_tensors = tensors_from_data(valid_data, config.batch_size, shuffle=False)
data_dict = AttrDict(
train_img=train_tensors['imgs'],
valid_img=valid_tensors['imgs'],
train_label=train_tensors['labels'],
valid_label=valid_tensors['labels'],
)
return data_dict
示例7: load
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def load(config, **unused_kwargs):
del unused_kwargs
if not os.path.exists(config.data_folder):
os.makedirs(config.data_folder)
dataset = input_data.read_data_sets(config.data_folder)
train_data = {'imgs': dataset.train.images, 'labels': dataset.train.labels}
valid_data = {'imgs': dataset.validation.images, 'labels': dataset.validation.labels}
train_tensors = tensors_from_data(train_data, config.batch_size, shuffle=True)
valid_tensors = tensors_from_data(valid_data, config.batch_size, shuffle=False)
data_dict = AttrDict(
train_img=train_tensors['imgs'],
valid_img=valid_tensors['imgs'],
train_label=train_tensors['labels'],
valid_label=valid_tensors['labels'],
)
return data_dict
示例8: __init__
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def __init__(self, strings=6, frets=(0, 5), inlays=None, style=None):
self.frets = list(range(max(frets[0] - 1, 0), frets[1] + 1))
self.strings = [attrdict.AttrDict({
'color': None,
'label': None,
'font_color': None,
}) for x in range(strings)]
self.markers = []
self.inlays = inlays if inlays is not None else self.inlays
self.layout = attrdict.AttrDict()
self.style = attrdict.AttrDict(
dict_merge(
copy.deepcopy(self.default_style),
style or {}
)
)
示例9: __init__
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def __init__(self, positions=None, fingers=None, style=None):
if positions is None:
positions = []
elif '-' in positions:
positions = positions.split('-')
else:
positions = list(positions)
self.positions = list(map(lambda p: int(p) if p.isdigit() else None, positions))
self.fingers = list(fingers) if fingers else []
self.style = attrdict.AttrDict(
dict_merge(
copy.deepcopy(self.default_style),
style or {}
)
)
示例10: __init__
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def __init__(self, train_mode, loader_params, dataset_params):
super().__init__()
self.train_mode = train_mode
self.loader_params = AttrDict(loader_params)
self.dataset_params = AttrDict(dataset_params)
sampler_name = self.dataset_params.sampler_name
if sampler_name == 'fixed':
self.sampler = FixedSizeSampler
elif sampler_name == 'aspect ratio':
self.sampler = AspectRatioSampler
else:
msg = "expected sampler name from (fixed, aspect ratio), got {} instead".format(sampler_name)
raise Exception(msg)
self.target_encoder = DataEncoder(**self.dataset_params.data_encoder)
self.dataset = ImageDetectionDataset
self.image_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD),
])
self.image_augment = aug_seq
示例11: get_generator
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def get_generator(checkpoint):
args = AttrDict(checkpoint['args'])
generator = TrajectoryGenerator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
encoder_h_dim=args.encoder_h_dim_g,
decoder_h_dim=args.decoder_h_dim_g,
mlp_dim=args.mlp_dim,
num_layers=args.num_layers,
noise_dim=args.noise_dim,
noise_type=args.noise_type,
noise_mix_type=args.noise_mix_type,
pooling_type=args.pooling_type,
pool_every_timestep=args.pool_every_timestep,
dropout=args.dropout,
bottleneck_dim=args.bottleneck_dim,
neighborhood_size=args.neighborhood_size,
grid_size=args.grid_size,
batch_norm=args.batch_norm)
generator.load_state_dict(checkpoint['g_state'])
generator.cuda()
generator.train()
return generator
示例12: main
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def main(args):
if os.path.isdir(args.model_path):
filenames = os.listdir(args.model_path)
filenames.sort()
paths = [
os.path.join(args.model_path, file_) for file_ in filenames
]
else:
paths = [args.model_path]
for path in paths:
checkpoint = torch.load(path)
generator = get_generator(checkpoint)
_args = AttrDict(checkpoint['args'])
path = get_dset_path(_args.dataset_name, args.dset_type)
_, loader = data_loader(_args, path)
ade, fde = evaluate(_args, loader, generator, args.num_samples)
print('Dataset: {}, Pred Len: {}, ADE: {:.2f}, FDE: {:.2f}'.format(
_args.dataset_name, _args.pred_len, ade, fde))
示例13: main
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def main():
parser = argparse.ArgumentParser(description='Parse the config path')
parser.add_argument("-c", "--config", dest="path",
help='The path to the config file. e.g. python run.py --config dc_config.json')
config = parser.parse_args()
with open(config.path) as f:
args = json.load(f)
args = AttrDict(args)
device = torch.device(args.device)
args.model = onssen.nn.chimera(args.model_options)
args.model.to(device)
args.train_loader = data.edinburgh_tts_dataloader(args.model_name, args.feature_options, 'train', args.cuda_option, self.device)
args.valid_loader = data.edinburgh_tts_dataloader(args.model_name, args.feature_options, 'validation', args.cuda_option, self.device)
args.optimizer = utils.build_optimizer(args.model.parameters(), args.optimizer_options)
args.loss_fn = loss.loss_chimera_psa
trainer = onssen.utils.trainer(args)
trainer.run()
tester = onssen.utils.tester(args)
tester.eval()
示例14: main
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def main():
config_path = './config.json'
with open(config_path) as f:
args = json.load(f)
args = AttrDict(args)
device = torch.device(args.device)
args.device = device
args.model = nn.ConvTasNet(**args["model_options"])
args.model.to(device)
args.train_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'tr', device)
args.valid_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'cv', device)
args.test_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'tt', device)
args.optimizer = utils.build_optimizer(args.model.parameters(), args.optimizer_options)
args.loss_fn = loss.si_snr_loss
trainer = utils.trainer(args)
trainer.run()
tester = tester_tasnet(args)
tester.eval()
示例15: main
# 需要导入模块: import attrdict [as 别名]
# 或者: from attrdict import AttrDict [as 别名]
def main():
parser = argparse.ArgumentParser(description='Parse the config path')
parser.add_argument("-c", "--config", dest="path",
help='The path to the config file. e.g. python run.py --config onfig.json')
config = parser.parse_args()
with open(config.path) as f:
args = json.load(f)
args = AttrDict(args)
device = torch.device(args.device)
args.model = nn.deep_clustering(**(args['model_options']))
args.model.to(device)
args.train_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'tr', device)
args.valid_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'cv', device)
args.test_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'tt', device)
args.optimizer = utils.build_optimizer(args.model.parameters(), args.optimizer_options)
args.loss_fn = loss.loss_dc
trainer = utils.trainer(args)
trainer.run()
tester = tester_dc(args)
tester.eval()