本文整理汇总了Python中msgpack.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: convert_msg_to_txt_file
def convert_msg_to_txt_file(self):
pat=re.compile('t(\d{2})-(\d{2})\.dat')
for _file_num in range(len(self.msg_t_file_list)):
with open(self.msg_t_file_list[_file_num].get_path(), 'rb') as f:
self.ttt.extend(msgpack.load(f))
with open(self.msg_x_file_list[_file_num].get_path(), 'rb') as f:
self.x.extend(msgpack.load(f))
with open(self.msg_y_file_list[_file_num].get_path(), 'rb') as f:
self.y.extend(msgpack.load(f))
with open(self.msg_I_file_list[_file_num].get_path(), 'rb') as f:
self.I.extend(msgpack.load(f))
print(123,self.msg_t_file_list[_file_num].get_path())
print(234,len(self.ttt),len(self.x),len(self.y),len(self.I))
_cur_res = re.search(pat, self.msg_t_file_list[_file_num].name)
beads_num=_cur_res.group(1)
file_num=_cur_res.group(2)
_cur_dir_path=self.msg_t_file_list[_file_num].parent.parent.get_path()+'/txt/'+self.msg_t_file_list[_file_num].parent.name
if not os.path.exists(_cur_dir_path): os.makedirs(_cur_dir_path)
_cur_file_path=_cur_dir_path+'/beads{}_{}.txt'.format(beads_num, file_num)
print(555,len(self.ttt))
with open(_cur_file_path, 'w+') as f:
f.write('time(sec),x(nm),y(nm),total intensity(V)\r\n')
for i in range(len(self.ttt)):
f.write('{},{},{},{}\r\n'.format(self.ttt[i], self.x[i], self.y[i], self.I[i]))
if i%10**6==0:print(i)
self.ttt, self.x, self.y, self.I = [],[],[],[]
示例2: read_msg_file_to_tx2
def read_msg_file_to_tx2(self, _file_num=None):
if _file_num == None:
print('please assign file_number')
with open(self.msg_t_file_list[_file_num].get_path(), 'rb') as f:
self.ttt.extend(msgpack.load(f))
with open(self.msg_x_file_list[_file_num].get_path(), 'rb') as f:
self.x.extend(msgpack.load(f))
print(222,self.msg_t_file_list[_file_num].get_path())
print(len(self.ttt))
示例3: __init__
def __init__(self, ensemble_clf_model):
with open(ensemble_clf_model, "rb") as file_handler_in:
self.classifiers, self.manual_feature_list = msgpack.load(file_handler_in)
# interp GBDT x 3: x, v, a | manual GBDT | Output: LR
self.interp_gbdts = [CustomGradientBoostingClassifier(arg) for arg in self.classifiers[:3]]
self.manual_gbdt = CustomGradientBoostingClassifier(self.classifiers[3])
self.lr = CustomLogisticRegression(self.classifiers[4])
示例4: get_msgpack_object_ref
def get_msgpack_object_ref(path):
"""Get object-id ref for object in messagepack-encoded file.
Args:
(str) path: Full path to file.
Returns:
(str) reference, in form A/B/C e.g. '93/111124/2'
Raises:
IOError if the file cannot be opened.
ValueError if the data in the file cannot be decoded.
KeyError if the reference field is not found in the data.
"""
import msgpack
try:
f = open(path)
except IOError as err:
raise IOError('Cannot open file for reading: {}'.format(path))
try:
t = msgpack.load(f)
except Exception as err:
raise ValueError('Cannot decode messagepack data in path "{}": {}'
''.format(path, err))
try:
ref = t['ref']
except KeyError:
raise KeyError('Field "ref" not found in object at "{}"'.format(path))
return ref
示例5: parseFromFile
def parseFromFile(self, fname):
"""
Overwritten to read Msgpack files.
"""
import msgpack
f = open(fname, "r")
return msgpack.load(f)
示例6: get_or_build
def get_or_build(path, build_fn, *args, **kwargs):
"""
Load from serialized form or build an object, saving the built
object.
Remaining arguments are provided to `build_fn`.
"""
save = False
obj = None
if path is not None and os.path.isfile(path):
with open(path, 'rb') as obj_f:
obj = msgpack.load(obj_f, use_list=False, encoding='utf-8')
else:
save = True
if obj is None:
obj = build_fn(*args, **kwargs)
if save and path is not None:
with open(path, 'wb') as obj_f:
msgpack.dump(obj, obj_f)
return obj
示例7: load_check_result_url
def load_check_result_url(dic_file, check_url_file):
person_result_dic = {} # {person:([](right_set),[](wrong_set))} # 肯定正确和肯定错的的图片
right_url_count = wrong_url_count = error_format_count = no_baike_count = no_meaning_count = 0
if os.path.exists(dic_file):
person_result_dic = msgpack.load(open(dic_file, 'rb'))
for line in open(check_url_file):
tmp = line.rstrip().split('\t')
# [person_name, pic_index, pic_url, baike_name, baike_sim, newbaike_sim, guess_info]
person_name = tmp[0]
right_list, wrong_list = person_result_dic.get(person_name, ([], []))
if len(tmp) == 7:
if tmp[3] not in no_meaning_list:
if tmp[3] == no_find_baike:
no_baike_count += 1
continue
else:
if get_newbaike_sim(tmp[4]) > sim_threshold:
if tmp[0] == tmp[3]:
right_list.append(tmp[1])
right_url_count += 1
else:
wrong_url_count += 1
wrong_list.append(tmp[1])
else: # 小于某概率时结果不可信,需要标注
no_baike_count += 1
continue
else:
no_meaning_count += 1
continue
else:
error_format_count += 1
continue
person_result_dic[person_name] = (right_list, wrong_list)
print right_url_count, wrong_url_count, no_baike_count, no_meaning_count, error_format_count
msgpack.dump(person_result_dic, open('person_result_dic.p', 'w'))
示例8: load_msgpack
def load_msgpack(blob, **kwargs):
"""
Load a dict packed with msgpack into kwargs for
a Trimesh constructor
Parameters
----------
blob : bytes
msgpack packed dict containing
keys 'vertices' and 'faces'
Returns
----------
loaded : dict
Keyword args for Trimesh constructor, aka
mesh=trimesh.Trimesh(**loaded)
"""
import msgpack
if hasattr(blob, 'read'):
data = msgpack.load(blob)
else:
data = msgpack.loads(blob)
loaded = load_dict(data)
return loaded
示例9: parse
def parse(self, stream, media_type=None, parser_context=None):
try:
return msgpack.load(stream,
use_list=True,
object_hook=MessagePackDecoder().decode)
except Exception, exc:
raise ParseError('MessagePack parse error - %s' % unicode(exc))
示例10: move_pic
def move_pic():
pic_folder = '/data/pictures_face/'
right_pic_folder = '/data/pictures_face_baidu_filter/'
need_annotate_folder = '/data/pictures_face_need_annotate/'
person_result_dic = msgpack.load(open('person_result_dic.p', 'r'))
person_list = os.listdir(pic_folder)
for person in person_list:
old_person_path = os.path.join(pic_folder, person)
right_person_path = os.path.join(right_pic_folder, person)
annotate_person_path = os.path.join(need_annotate_folder, person)
right_index_list, wrong_index_list = person_result_dic.get(person.decode('gbk').encode('utf-8'), ([], []))
right_index_list = set(right_index_list)
wrong_index_list = set(wrong_index_list)
old_pic_list = os.listdir(old_person_path)
for pic in old_pic_list:
pic_index = pic.replace('.png', '').replace('0.jpg', '').replace('_', '')
if pic_index in right_index_list:
if not os.path.exists(right_person_path):
os.makedirs(right_person_path)
shutil.copyfile(os.path.join(old_person_path, pic),
os.path.join(right_person_path, pic))
elif pic_index in wrong_index_list:
continue
else:
if not os.path.exists(annotate_person_path):
os.makedirs(annotate_person_path)
shutil.copyfile(os.path.join(old_person_path, pic),
os.path.join(annotate_person_path, pic))
示例11: redis_store
def redis_store(input_dir, name, server, port, **kw):
import redis
r = redis.StrictRedis(server, port)
times = set()
sensor_types = {}
fn_to_time = lambda x: int(x.rsplit('/', 1)[-1].split('.', 1)[0])
r.sadd('users', name)
for fn in sorted(glob.glob(input_dir + '/*'), key=fn_to_time):
fn_time = fn_to_time(fn) / 1000.
if fn.endswith('.jpg'):
times.add(sample[1])
r.zadd(name + ':images', fn_time, os.path.basename(fn))
else:
try:
data = msgpack.load(open(fn))
except ValueError:
print('Could not parse [%s]' % fn)
continue
print(data)
for sensor_name, type_num in data[2].items():
sensor_types[sensor_name] = msgpack.dumps(type_num)
for sensor_name, samples in data[3].items():
for sample in samples:
times.add(sample[1])
r.zadd(name + ':sensor:' + sensor_name, sample[1], msgpack.dumps(sample))
r.hmset(name + ':sensors', sensor_types)
r.zadd(name + ':times', **{msgpack.dumps(x): x for x in times})
示例12: targets
def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
'''
Return the targets from the flat yaml file, checks opts for location but
defaults to /etc/salt/roster
'''
cache = os.path.join(syspaths.CACHE_DIR, 'master', 'minions', tgt, 'data.p')
if not os.path.exists(cache):
return {}
roster_order = __opts__.get('roster_order', (
'public', 'private', 'local'
))
with salt.utils.fopen(cache, 'r') as fh_:
cache_data = msgpack.load(fh_)
ipv4 = cache_data.get('grains', {}).get('ipv4', [])
preferred_ip = extract_ipv4(roster_order, ipv4)
if preferred_ip is None:
return {}
return {
tgt: {
'host': preferred_ip,
}
}
示例13: load_file
def load_file(self, fp):
try:
return msgpack.load(fp)
except Exception as ex:
log.warn('Unable to load object from file: %s', ex, exc_info=True)
return None
示例14: main
def main(path):
sensor_time_values = {"Pupil Eyetracker": {}, "LTR-506ALS Light sensor": {}}
for fn in glob.glob(path + "/*.msgpack"):
s = msgpack.load(open(fn))
for k, vs in s[3].items():
for v in vs:
if k == "Pupil Eyetracker":
sensor_time_values[k][v[1]] = v[0][2]
elif k == "LTR-506ALS Light sensor":
sensor_time_values[k][v[1]] = v[0][0]
mp.ion()
mp.show()
for k, vs in sensor_time_values.items():
if k == "Pupil Eyetracker":
c = [0, 1, 0]
elif k == "LTR-506ALS Light sensor":
c = [1, 0, 0]
va = np.array(vs.values())
M = np.max(va)
m = np.min(va)
s = 1.0 / (M - m)
prev_x = 0
xs = []
ys = []
for x, y in sorted(vs.items()):
if x - prev_x < 0.25:
continue
xs.append(x)
ys.append(y)
prev_x = x
mp.plot(np.array(xs) - xs[0], (np.array(ys) - m) * s, c=c, label=k)
mp.title("Pupil Radius and Ambient Light over Time")
mp.legend()
mp.draw()
mp.savefig("pupil_light_plot.png")
示例15: split_all_url
def split_all_url():
'''
将url列表以人分成多个文件,然后用hadoop爬数据
'''
result_folder = 'person_url_check'
if not os.path.exists(result_folder):
os.makedirs(result_folder)
pic_face_index_dic = msgpack.load(open('pic_face_index_url_dic.p', 'rb'))
person_count = 0
url_count = 0
person_index = 0
for person in pic_face_index_dic:
start = time()
person_index += 1
with open(os.path.join(result_folder, str(person_index)), 'w') as f_result:
try:
need_check_url_index_list = pic_face_index_dic.get(person)
for index, pic_url in need_check_url_index_list:
write_content = [person, index, pic_url]
f_result.write('\t'.join(map(str, write_content))+'\n')
url_count += 1
except:
traceback.print_exc()
continue
person_count += 1
print person, person_count, url_count, time()-start