本文整理汇总了Python中json5.load方法的典型用法代码示例。如果您正苦于以下问题:Python json5.load方法的具体用法?Python json5.load怎么用?Python json5.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类json5
的用法示例。
在下文中一共展示了json5.load方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sample_file
# 需要导入模块: import json5 [as 别名]
# 或者: from json5 import load [as 别名]
def test_sample_file(self):
path = os.path.join(os.path.dirname(__file__), '..', 'sample.json5')
with open(path) as fp:
obj = json5.load(fp)
self.assertEqual({
u'oh': [
u"we shouldn't forget",
u"arrays can have",
u"trailing commas too",
],
u"this": u"is a multi-line string",
u"delta": 10,
u"hex": 3735928559,
u"finally": "a trailing comma",
u"here": "is another",
u"to": float("inf"),
u"while": True,
u"half": 0.5,
u"foo": u"bar"
}, obj)
示例2: _load_param
# 需要导入模块: import json5 [as 别名]
# 或者: from json5 import load [as 别名]
def _load_param(root, file: str):
file = os.path.join(root, file)
if not file.endswith('.json5'):
file += '.json5'
with open(file) as f:
config = json5.load(f)
return config
示例3: main
# 需要导入模块: import json5 [as 别名]
# 或者: from json5 import load [as 别名]
def main():
site_pins = json5.load(sys.stdin)
output_site_pins = {}
output_site_pins["tile_type"] = site_pins["tile_type"]
output_site_pins["sites"] = copy.deepcopy(site_pins["sites"])
site_pin_to_wires = create_site_pin_to_wire_maps(
site_pins['tile_name'], site_pins['nodes'])
min_x_coord, min_y_coord = find_origin_coordinate(site_pins['sites'])
for site in output_site_pins['sites']:
orig_site_name = site['name']
coordinate = SITE_COORDINATE_PATTERN.match(orig_site_name)
x_coord = int(coordinate.group(2))
y_coord = int(coordinate.group(3))
site['name'] = 'X{}Y{}'.format(
x_coord - min_x_coord, y_coord - min_y_coord)
site['prefix'] = coordinate.group(1)
site['x_coord'] = x_coord - min_x_coord
site['y_coord'] = y_coord - min_y_coord
for site_pin in site['site_pins']:
assert site_pin['name'].startswith(orig_site_name + '/')
if site_pin['name'] in site_pin_to_wires:
site_pin['wire'] = site_pin_to_wires[site_pin['name']]
else:
print(
(
'***WARNING***: Site pin {} for tile type {} is not connected, '
'make sure all instaces of this tile type has this site_pin '
'disconnected.').format(
site_pin['name'], site_pins['tile_type']),
file=sys.stderr)
site_pin['name'] = site_pin['name'][len(orig_site_name) + 1:]
json.dumps(output_site_pins, indent=2, sort_keys=True)
示例4: _post_process
# 需要导入模块: import json5 [as 别名]
# 或者: from json5 import load [as 别名]
def _post_process(args: Object):
if not args.output_dir.startswith('models'):
args.output_dir = os.path.join('models', args.output_dir)
os.makedirs(args.output_dir, exist_ok=True)
if not args.name:
args.name = str(datetime.now())
args.summary_dir = os.path.join(args.output_dir, args.name)
if os.path.exists(args.summary_dir):
shutil.rmtree(args.summary_dir)
os.makedirs(args.summary_dir)
data_config_file = os.path.join(args.output_dir, 'data_config.json5')
if os.path.exists(data_config_file):
with open(data_config_file) as f:
config = json5.load(f)
for k, v in config.items():
if not hasattr(args, k) or getattr(args, k) != v:
print('ERROR: Data configurations are different. Please use another output_dir or '
'remove the older one manually.')
exit()
else:
with open(data_config_file, 'w') as f:
keys = ['data_dir', 'min_df', 'max_vocab', 'max_len', 'min_len', 'lower_case',
'pretrained_embeddings', 'embedding_mode']
json5.dump({k: getattr(args, k) for k in keys}, f)
args.metric = args.metric.lower()
args.watch_metrics = [m.lower() for m in args.watch_metrics]
if args.metric not in args.watch_metrics:
args.watch_metrics.append(args.metric)
args.cuda = args.cuda and torch.cuda.is_available()
args.fix_embeddings = args.pretrained_embeddings and args.fix_embeddings
def samples2steps(n):
return int(math.ceil(n / args.batch_size))
if not hasattr(args, 'log_per_updates'):
args.log_per_updates = samples2steps(args.log_per_samples)
if not hasattr(args, 'eval_per_updates'):
args.eval_per_updates = samples2steps(args.eval_per_samples)
if not hasattr(args, 'eval_per_updates_warmup'):
args.eval_per_updates_warmup = samples2steps(args.eval_per_samples_warmup)
if not hasattr(args, 'eval_warmup_steps'):
args.eval_warmup_steps = samples2steps(args.eval_warmup_samples)
if not hasattr(args, 'min_steps'):
args.min_steps = samples2steps(args.min_samples)
if not hasattr(args, 'early_stopping'):
args.early_stopping = samples2steps(args.tolerance_samples)
if not hasattr(args, 'lr_warmup_steps'):
args.lr_warmup_steps = samples2steps(args.lr_warmup_samples)
if not hasattr(args, 'lr_decay_steps'):
args.lr_decay_steps = samples2steps(args.lr_decay_samples)
if not hasattr(args, 'summary_per_updates'):
args.summary_per_updates = args.summary_per_logs * args.log_per_updates
assert args.lr >= args.min_lr, 'initial learning rate must be larger than min_lr'
示例5: _post_process
# 需要导入模块: import json5 [as 别名]
# 或者: from json5 import load [as 别名]
def _post_process(args: Object):
if not args.output_dir.startswith('models'):
args.output_dir = os.path.join('models', args.output_dir)
os.makedirs(args.output_dir, exist_ok=True)
if not args.name:
args.name = str(datetime.now())
args.summary_dir = os.path.join(args.output_dir, args.name)
if os.path.exists(args.summary_dir):
shutil.rmtree(args.summary_dir)
os.makedirs(args.summary_dir)
data_config_file = os.path.join(args.output_dir, 'data_config.json5')
if os.path.exists(data_config_file):
with open(data_config_file) as f:
config = json5.load(f)
for k, v in config.items():
if not hasattr(args, k) or getattr(args, k) != v:
print('ERROR: Data configurations are different. Please use another output_dir or '
'remove the older one manually.')
exit()
else:
with open(data_config_file, 'w') as f:
keys = ['data_dir', 'min_df', 'max_vocab', 'max_len', 'min_len', 'lower_case',
'pretrained_embeddings', 'embedding_mode']
json5.dump({k: getattr(args, k) for k in keys}, f)
args.metric = args.metric.lower()
args.watch_metrics = [m.lower() for m in args.watch_metrics]
if args.metric not in args.watch_metrics:
args.watch_metrics.append(args.metric)
assert args.pretrained_embeddings, 'pretrained embeddings must be provided.'
def samples2steps(n):
return int(math.ceil(n / args.batch_size))
if not hasattr(args, 'log_per_updates'):
args.log_per_updates = samples2steps(args.log_per_samples)
if not hasattr(args, 'eval_per_updates'):
args.eval_per_updates = samples2steps(args.eval_per_samples)
if not hasattr(args, 'eval_per_updates_warmup'):
args.eval_per_updates_warmup = samples2steps(args.eval_per_samples_warmup)
if not hasattr(args, 'eval_warmup_steps'):
args.eval_warmup_steps = samples2steps(args.eval_warmup_samples)
if not hasattr(args, 'min_steps'):
args.min_steps = samples2steps(args.min_samples)
if not hasattr(args, 'early_stopping'):
args.early_stopping = samples2steps(args.tolerance_samples)
if not hasattr(args, 'lr_warmup_steps'):
args.lr_warmup_steps = samples2steps(args.lr_warmup_samples)
if not hasattr(args, 'lr_decay_steps'):
args.lr_decay_steps = samples2steps(args.lr_decay_samples)