本文整理汇总了Python中ujson.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_loadFileArgsError
def test_loadFileArgsError(self):
try:
ujson.load("[]")
except TypeError:
pass
else:
assert False, "expected TypeError"
示例2: load_cooc_dict
def load_cooc_dict():
global cw_dict, c_dict
liblogger.info("load cooc dict")
pxy_cache_file = cooc_dict_file + ".pxy.cache"
py_cache_file = cooc_dict_file + ".py.cache"
if using_cache and os.path.exists(pxy_cache_file) and os.path.exists(py_cache_file):
cw_dict = json.load(open(pxy_cache_file))
c_dict = json.load(open(py_cache_file))
return
cooc_dict = json.load(open(cooc_dict_file))
cw_dict = defaultdict(int)
c_dict = defaultdict(int)
for w in cooc_dict:
#ctxs = [eval(ctx) for ctx in cooc_dict[w].keys()]
for ctx in cooc_dict[w]:
count = cooc_dict[w][ctx]
cw = (w, ctx)
count = cooc_dict[w][ctx]
cw_dict[cw] += count
c_dict[ctx] += count
liblogger.info("norm cooc dict for P(x, y)")
cw_sum = float(sum(cw_dict.values()))
for cw in cw_dict:
cw_dict[cw] = math.log(cw_dict[cw] / cw_sum)
json.dump(cw_dict, open(pxy_cache_file, "w"))
liblogger.info("ctx dict P(y)")
c_sum = float(sum(c_dict.values()))
for c in c_dict:
c_dict[c] = math.log(c_dict[c] / c_sum)
json.dump(c_dict, open(py_cache_file, "w"))
示例3: extract_json_data
def extract_json_data(self, filename, option):
'''
Imports .json files from peeringdb and returns a list of dictionaries with all the retrieved IXP information.
Input:
a) filename: A .json file name.
b) mypath: The directory path of the database.
c) option: Flag to download the file.
d) config: Dictionary that contains the config file.
Ouput:
a) A list of dictionaries.
'''
try:
with open(self.homepath + '/database' + filename) as data_file:
obj = ujson.load(data_file)
except:
print(filename + ' was not found.')
if not self.downloader.download_peering(option):
print("Could not download " + filename +
". Copying from the default database.")
try:
copyfile(self.libpath + '/database/Default' + filename,
self.homepath + '/database' + filename)
except:
print('Could not copy ' + filename +
' from the default database.')
try:
with open(self.homepath + '/database' + filename) as data_file:
obj = ujson.load(data_file)
except:
print('Could not open ' + filename + '. Exiting.')
exit(0)
return (obj['data'])
示例4: get_translation_percentage
def get_translation_percentage(self, locale_path: Text, locale: Text) -> int:
# backend stats
po = polib.pofile(self.get_po_filename(locale_path, locale))
not_translated = len(po.untranslated_entries())
total = len(po.translated_entries()) + not_translated
# frontend stats
with open(self.get_json_filename(locale_path, locale)) as reader:
for key, value in ujson.load(reader).items():
total += 1
if value == '':
not_translated += 1
# mobile stats
with open(os.path.join(locale_path, 'mobile_info.json')) as mob:
mobile_info = ujson.load(mob)
try:
info = mobile_info[locale]
except KeyError:
if self.strict:
raise
info = {'total': 0, 'not_translated': 0}
total += info['total']
not_translated += info['not_translated']
return (total - not_translated) * 100 // total
示例5: load_place_savers
def load_place_savers(user_dir):
"""
This function loads the following place saving parameters:
1. cur_hop - Current hop of collection algorithm
2. cur_user_list - List of users collented during current hop
3. next_user_list - List of users to collect on next hop
4. added_topics_for_cur_hop - Topics added from current hop (if relevant to sampling method)
5. unavailable_accounts - List of unavailable accounts
6. finished_users - Users that have already been collected
:param user_dir: Directory where profile information is saved
:return place_saver_obj: Python dictionary of forementioned fields
"""
# Load object
try:
jfid = open(os.path.join(user_dir, "place_saver_v1.txt"))
place_saver_obj = ujson.load(jfid)
jfid.close()
except ValueError:
jfid = open(os.path.join(user_dir, "place_saver_v2.txt"))
place_saver_obj = ujson.load(jfid)
jfid.close()
except IOError:
print "The object 'place_saver' does not exist, creating it now"
place_saver_obj = {}
# Make all necessary fields in case they don't already exist
if "cur_user_list" not in place_saver_obj.keys():
place_saver_obj["cur_user_list"] = set([])
if "next_user_list" not in place_saver_obj.keys():
place_saver_obj["next_user_list"] = set([])
if "cur_hop" not in place_saver_obj.keys():
place_saver_obj["cur_hop"] = 0
if "added_topics_for_cur_hop" not in place_saver_obj.keys():
place_saver_obj["added_topics_for_cur_hop"] = set([])
if "unavailable_accounts" not in place_saver_obj.keys():
place_saver_obj["unavailable_accounts"] = set([])
if "finished_users" not in place_saver_obj.keys():
place_saver_obj["finished_users"] = {}
jsons = filter(lambda k: re.match("userInfo_*", k), os.listdir(user_dir))
for jj in range(len(jsons)):
if jj % 200 == 0:
print "Check profile JSON {} of {}".format(jj + 1, len(jsons))
try:
full_filename = os.path.join(user_dir, jsons[jj])
if os.path.getsize(full_filename) == 0:
continue
jfid = open(full_filename)
profile = ujson.load(jfid)
jfid.close()
if profile["id"] in place_saver_obj["finished_users"].keys():
continue
else:
place_saver_obj["finished_users"][profile["id"]] = jsons[jj]
except ValueError:
continue
# Ensure that all fields are set objects
for kk in place_saver_obj.keys():
if (kk != "finished_users") and (kk != "cur_hop"):
place_saver_obj[kk] = set(place_saver_obj[kk])
return place_saver_obj
示例6: setUp
def setUp(self):
with open("tests/data/square.geojson") as f:
self.square_geojson = json.load(f)
with open("tests/data/square.topojson") as f:
self.square_topojson = json.load(f)
with open("tests/data/multipolygons_spherical.geojson") as f:
self.ref = json.load(f)
示例7: test_orderbook
def test_orderbook():
variable_order_book = Book()
control_order_book = Book()
with open('testdata/messages.json') as messages_json_file:
messages = json.load(messages_json_file)
with open('testdata/beginning_level_3.json') as begin_json_file:
beginning_level_3 = json.load(begin_json_file)
with open('testdata/ending_level_3.json') as end_json_file:
ending_level_3 = json.load(end_json_file)
try:
assert beginning_level_3['sequence'] + 1 == messages[0]['sequence']
assert ending_level_3['sequence'] == messages[-1]['sequence']
except AssertionError:
print("Problem with sample data sequences")
variable_order_book.get_level3(beginning_level_3)
start = time.time()
[variable_order_book.process_message(message) for message in messages]
end = time.time()
print('messages per sec: {0}'.format(int(len(messages)/(end-start))))
control_order_book.get_level3(ending_level_3)
dict_compare(variable_order_book.asks.price_map, control_order_book.asks.price_map, price_map=True)
dict_compare(variable_order_book.asks.order_map, control_order_book.asks.order_map, order_map=True)
示例8: _load
def _load(logger, tests_root, manifest, types=None, meta_filters=None, allow_cached=True):
# "manifest" is a path or file-like object.
manifest_path = (manifest if isinstance(manifest, string_types)
else manifest.name)
if allow_cached and manifest_path in __load_cache:
return __load_cache[manifest_path]
if isinstance(manifest, string_types):
if os.path.exists(manifest):
logger.debug("Opening manifest at %s" % manifest)
else:
logger.debug("Creating new manifest at %s" % manifest)
try:
with open(manifest) as f:
rv = Manifest.from_json(tests_root,
fast_json.load(f),
types=types,
meta_filters=meta_filters)
except IOError:
return None
except ValueError:
logger.warning("%r may be corrupted", manifest)
return None
else:
rv = Manifest.from_json(tests_root,
fast_json.load(manifest),
types=types,
meta_filters=meta_filters)
if allow_cached:
__load_cache[manifest_path] = rv
return rv
示例9: __init__
def __init__(self, path, writer_queue=None):
"""Initialize using path to file and optional thread-safe queue.
Queue is used for json serializable data to be written to file when
self.write_queued() is called.
If the file at 'path' doesn't exist it will be created.
"""
self.path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(self.path):
print("Persistence file %s does not exist yet, creating it...")
json.dump({}, open(self.path, 'w'))
else:
# check for json-ness
try:
json.load(open(self.path))
LOG.debug("Loaded existing persistence file %s.",
os.path.relpath(self.path))
except ValueError as err:
raise ValueError("The persistence file -> %s is not "
"a valid json file. | %s"
% (os.path.relpath(self.path), err))
if writer_queue and not isinstance(writer_queue, Queue.Queue):
raise TypeError('writer_queue should be a Queue.Queue.')
elif writer_queue:
self.synq = writer_queue
self.synq._persisted = set()
else:
self.synq = None
示例10: combine_dicts
def combine_dicts():
with open('title10to100000.json') as tag200, open('title100000plus.json') as tag1500:
tag200dict = ujson.load(tag200)
tag500dict = ujson.load(tag1500)
newdict = dict(chain(tag200dict.items(), tag500dict.items()))
with open('titletagwords.json', 'w') as write:
ujson.dump(newdict, write)
示例11: reading_vqa_data
def reading_vqa_data(vqa_dir, section):
ans = 'mscoco_%s2014_annotations.json' % section
with (vqa_dir / ans).open() as file_:
ans_data = json.load(file_)
image_by_id = {}
answers_by_id = {}
for answer in ans_data['annotations']:
image = str(answer['image_id'])
mca = answer['multiple_choice_answer']
img = '0'*(12 - len(image)) + image
s = '/data/%s/images' % section
s = s + '/COCO_%s2014_' % section + img + '.jpg'
image_by_id[answer['question_id']] = s
answers_by_id[answer['question_id']] = mca
filename = ('MultipleChoice_mscoco_'
'%s2014_questions.json' % section)
with (vqa_dir / filename).open() as file_:
ques_data = json.load(file_)
for question in ques_data['questions']:
text = question['question']
ques_id = question['question_id']
options = question['multiple_choices']
image_path = image_by_id[ques_id]
image = Image.open(image_path)
if min(image.size) < IMAGE_SIZE:
image_path = prev_image
image_by_id[ques_id] = image_path
else:
if (answers_by_id[ques_id] == 'yes'):
prev_image = image_path
yield ques_id, image_by_id[ques_id], text, options, answers_by_id[ques_id]
示例12: addin_dubbed_video_mappings
def addin_dubbed_video_mappings(node_data, lang=en_lang_code):
# Get the dubbed videos from the spreadsheet and substitute them
# for the video, and topic attributes of the returned data struct.
build_path = os.path.join(os.getcwd(), "build")
# Create a dubbed_video_mappings.json, at build folder.
if os.path.exists(os.path.join(build_path, "dubbed_video_mappings.json")):
logging.info("Dubbed videos json already exist at %s" % (DUBBED_VIDEOS_MAPPING_FILEPATH))
else:
main()
# Get the list of video ids from dubbed video mappings
lang_code = get_lang_name(lang).lower()
dubbed_videos_path = os.path.join(build_path, "dubbed_video_mappings.json")
with open(dubbed_videos_path, "r") as f:
dubbed_videos_load = ujson.load(f)
dubbed_videos_list = dubbed_videos_load.get(lang_code)
# If dubbed_videos_list is None It means that the language code is not available in dubbed video mappings.
if not dubbed_videos_list:
return node_data
# Get the current youtube_ids, and topic_paths from the khan api node data.
youtube_ids = []
topic_paths = []
for node in node_data:
node_kind = node.get("kind")
if node_kind == NodeType.video:
youtube_ids.append(node.get("youtube_id"))
if node_kind == NodeType.topic:
topic_paths.append(node.get("path"))
en_nodes_path = os.path.join(build_path, "en_nodes.json")
with open(en_nodes_path, "r") as f:
en_node_load = ujson.load(f)
en_node_list = []
# The en_nodes.json must be the same data structure to node_data variable from khan api.
for node in en_node_load:
node_kind = node.get("kind")
if node_kind == NodeType.video:
youtube_id = node["youtube_id"]
if not youtube_id in youtube_ids:
if youtube_id in dubbed_videos_list:
node["youtube_id"] = dubbed_videos_list[youtube_id]
node["translated_youtube_lang"] = lang
en_node_list.append(node)
youtube_ids.append(youtube_id)
# Append all topics that's not in topic_paths list.
if node_kind == NodeType.topic:
if not node["path"] in topic_paths:
en_node_list.append(node)
topic_paths.append(node["path"])
node_data += en_node_list
return node_data
示例13: main
def main():
parser = argparse.ArgumentParser(description = "Analysis scripts for LexNorm in W-NUT 2015")
parser.add_argument("--pred", required = True, help = "A JSON file: Your predictions over test data formatted in JSON as training data")
parser.add_argument("--oracle", required = True, help = "A JSON file: The oracle annotations of test data formatted in JSON as training data")
args = parser.parse_args()
predicates = json.load(open(args.pred))
training_list = json.load(open(args.pred))
oov_detection_performance(training_list,predicates)
示例14: LoadData
def LoadData(self):
fp=gzip.open('data/dictbase/word_pos.txt.gz')
self.word_pos=json.load(fp)
fp.close()
fp=gzip.open('data/dictbase/word_pos_max.txt.gz')
self.word_pos_max=json.load(fp)
fp.close()
fp=gzip.open('data/dictbase/word_trans.txt.gz')
self.word_tran=json.load(fp)
fp.close()
示例15: demo
def demo(config):
with open(config.word_emb_file, "r") as fh:
word_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.char_emb_file, "r") as fh:
char_mat = np.array(json.load(fh), dtype=np.float32)
with open(config.test_meta, "r") as fh:
meta = json.load(fh)
model = Model(config, None, word_mat, char_mat, trainable=False, demo = True)
demo = Demo(model, config)