本文整理汇总了Python中joblib.load方法的典型用法代码示例。如果您正苦于以下问题:Python joblib.load方法的具体用法?Python joblib.load怎么用?Python joblib.load使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类joblib
的用法示例。
在下文中一共展示了joblib.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_variables
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def load_variables(load_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
示例2: main
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def main():
test_args = parse_args()
args = joblib.load('models/%s/args.pkl' %test_args.name)
folds = []
losses = []
scores = []
for fold in range(args.n_splits):
log_path = 'models/%s/log_%d.csv' %(args.name, fold+1)
if not os.path.exists(log_path):
continue
log = pd.read_csv('models/%s/log_%d.csv' %(args.name, fold+1))
loss, score = log.loc[log['val_loss'].values.argmin(), ['val_loss', 'val_score']].values
print(loss, score)
folds.append(str(fold+1))
losses.append(loss)
scores.append(score)
results = pd.DataFrame({
'fold': folds + ['mean'],
'loss': losses + [np.mean(losses)],
'score': scores + [np.mean(scores)],
})
print(results)
results.to_csv('models/%s/results.csv' % args.name, index=False)
示例3: load_from_disk
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def load_from_disk(filename):
"""Load a dataset from file."""
name = filename
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
extension = os.path.splitext(name)[1]
if extension == ".pkl":
return load_pickle_from_disk(filename)
elif extension == ".joblib":
return joblib.load(filename)
elif extension == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(filename, header=0)
df = df.replace(np.nan, str(""), regex=True)
return df
elif extension == ".npy":
return np.load(filename, allow_pickle=True)
else:
raise ValueError("Unrecognized filetype for %s" % filename)
示例4: load_trajectories
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def load_trajectories(filenames, max_steps=None):
assert len(filenames) > 0
paths = []
for filename in filenames:
paths.append(joblib.load(filename))
def get_obs_and_act(path):
obses = path['obs'][:-1]
next_obses = path['obs'][1:]
actions = path['act'][:-1]
if max_steps is not None:
return obses[:max_steps], next_obses[:max_steps], actions[:max_steps-1]
else:
return obses, next_obses, actions
for i, path in enumerate(paths):
if i == 0:
obses, next_obses, acts = get_obs_and_act(path)
else:
obs, next_obs, act = get_obs_and_act(path)
obses = np.vstack((obs, obses))
next_obses = np.vstack((next_obs, next_obses))
acts = np.vstack((act, acts))
return {'obses': obses, 'next_obses': next_obses, 'acts': acts}
示例5: load_from_disk
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def load_from_disk(filename):
"""Load a dataset from file."""
name = filename
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
if os.path.splitext(name)[1] == ".pkl":
return load_pickle_from_disk(filename)
elif os.path.splitext(name)[1] == ".joblib":
try:
return joblib.load(filename)
except KeyError:
# Try older joblib version for legacy files.
return old_joblib.load(filename)
except ValueError:
return old_joblib.load(filename)
elif os.path.splitext(name)[1] == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(filename, header=0)
df = df.replace(np.nan, str(""), regex=True)
return df
else:
raise ValueError("Unrecognized filetype for %s" % filename)
示例6: load_cv_dataset_from_disk
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def load_cv_dataset_from_disk(save_dir, fold_num):
assert fold_num > 1
loaded = False
train_data = []
valid_data = []
for i in range(fold_num):
fold_dir = os.path.join(save_dir, "fold" + str(i + 1))
train_dir = os.path.join(fold_dir, "train_dir")
valid_dir = os.path.join(fold_dir, "valid_dir")
if not os.path.exists(train_dir) or not os.path.exists(valid_dir):
return False, None, list()
train = dcCustom.data.DiskDataset(train_dir)
valid = dcCustom.data.DiskDataset(valid_dir)
train_data.append(train)
valid_data.append(valid)
loaded = True
with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f:
transformers = pickle.load(f)
return loaded, list(zip(train_data, valid_data)), transformers
示例7: fit_fold_parallel
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def fit_fold_parallel(*args, **kwargs):
verbose = args[-1]
data_path = '{}/{}'.format(args[4], str(uuid.uuid4()))
cmd = "python3 models/apex/fit_fold.py \
--data_path='{}' \
--args='{}' \
--kwargs='{}'".format(data_path, json.dumps(args), json.dumps(kwargs))
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
for line in iter(p.stdout.readline, ''):
if verbose:
logger.info(line.strip())
else:
sys.stdout.write('\r{0: <140}'.format(line.strip())[:140])
sys.stdout.flush()
sys.stdout.write('\r{0: <140}'.format(''))
sys.stdout.flush()
retval = p.wait()
time.sleep(10)
return joblib.load(data_path)
示例8: load_tf_policy
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def load_tf_policy(fpath, itr, deterministic=False):
""" Load a tensorflow policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'tf1_save'+itr)
print('\n\nLoading from %s.\n\n'%fname)
# load the things!
sess = tf.Session()
model = restore_tf_graph(sess, fname)
# get the correct op for executing actions
if deterministic and 'mu' in model.keys():
# 'deterministic' is only a valid option for SAC policies
print('Using deterministic action op.')
action_op = model['mu']
else:
print('Using default action op.')
action_op = model['pi']
# make function for producing an action given a single state
get_action = lambda x : sess.run(action_op, feed_dict={model['x']: x[None,:]})[0]
return get_action
示例9: load_pytorch_policy
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def load_pytorch_policy(fpath, itr, deterministic=False):
""" Load a pytorch policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'pyt_save', 'model'+itr+'.pt')
print('\n\nLoading from %s.\n\n'%fname)
model = torch.load(fname)
# make function for producing an action given a single state
def get_action(x):
with torch.no_grad():
x = torch.as_tensor(x, dtype=torch.float32)
action = model.act(x)
return action
return get_action
示例10: loadmodel
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def loadmodel(self, nameprefix):
""" Load the classification model together with the topic model.
:param nameprefix: prefix of the paths of the model files
:return: None
:type nameprefix: str
"""
self.topicmodeler.loadmodel(nameprefix)
self.classifier = joblib.load(nameprefix+'.pkl')
# for backward compatibility, shorttext<1.0.0 does not have _classlabels.txt
if os.path.exists(nameprefix+'_classlabels.txt'):
labelfile = open(nameprefix+'_classlabels.txt', 'r')
self.classlabels = [s.strip() for s in labelfile.readlines()]
labelfile.close()
else:
self.classlabels = self.topicmodeler.classlabels
示例11: test_calculate_illumination_raster
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def test_calculate_illumination_raster(monkeypatch):
# The generate latlon array function is massively time-consuming.
# This replaces it with precomputed data.
def mock_latlon(foo, bar, baz):
lat = joblib.load("test_data/lat_array_indo")
lon = joblib.load("test_data/lon_array_indo")
return lat, lon
monkeypatch.setattr(terrain_correction, "_generate_latlon_arrays", mock_latlon)
os.chdir(pathlib.Path(__file__).parent)
dem_path = "test_data/dem_test_indonesia.tif"
raster_timezone = pytz.timezone("Asia/Jakarta")
raster_datetime = dt.datetime(2019, 6, 1, 12, 00, 00, tzinfo=raster_timezone)
out_path = "test_outputs/illumination_indonesia.tif"
terrain_correction.calculate_illumination_condition_array(dem_path, raster_datetime, out_path)
示例12: load_local_or_remote_file
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def load_local_or_remote_file(filepath, file_type=None):
local_path = local_path_from_s3_or_local_path(filepath)
if local_path is None:
return None
if file_type is None:
extension = local_path.split('.')[-1]
if extension == 'npy':
file_type = NUMPY
else:
file_type = PICKLE
else:
file_type = PICKLE
if file_type == NUMPY:
object = np.load(open(local_path, "rb"))
elif file_type == JOBLIB:
object = joblib.load(local_path)
else:
object = pickle.load(open(local_path, "rb"))
print("loaded", local_path)
return object
示例13: main
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def main():
cap = cv2.VideoCapture(0)
face_recogniser = joblib.load(MODEL_PATH)
preprocess = preprocessing.ExifOrientationNormalize()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
img = Image.fromarray(frame)
faces = face_recogniser(preprocess(img))
if faces is not None:
draw_bb_on_img(faces, img)
# Display the resulting frame
cv2.imshow('video', np.array(img))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the captureq
cap.release()
cv2.destroyAllWindows()
示例14: _copy_model
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def _copy_model(self, dest_dir):
"""Copies the files needed to recreate a DeepChem NN model from the current model
directory to a destination directory.
Args:
dest_dir (str): The destination directory for the model files
"""
chkpt_file = os.path.join(self.model_dir, 'checkpoint')
with open(chkpt_file, 'r') as chkpt_in:
chkpt_dict = yaml.load(chkpt_in.read())
chkpt_prefix = chkpt_dict['model_checkpoint_path']
files = [chkpt_file]
files.append(os.path.join(self.model_dir, 'model.pickle'))
files.append(os.path.join(self.model_dir, '%s.index' % chkpt_prefix))
files.append(os.path.join(self.model_dir, '%s.meta' % chkpt_prefix))
files = files + glob.glob(os.path.join(self.model_dir, '%s.data-*' % chkpt_prefix))
self._clean_up_excess_files(dest_dir)
for file in files:
shutil.copy2(file, dest_dir)
self.log.info("Saved model files to '%s'" % dest_dir)
# ****************************************************************************************
示例15: save
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import load [as 别名]
def save(self, filename, ensure_compatibility = True):
"""
Pickle a class instance. E.g., corex.save('saved.pkl')
When set to True, ensure_compatibility resets self.words before saving
a pickle to avoid Unicode loading issues usually seen when trying to load
the pickle from a Python 2 implementation.
It is recommended to set it to False if you know you are going to load the
model in an all Python 3 implementation as self.words is required for fetching
the topics via get_topics().
"""
# Avoid saving words with object.
#TODO: figure out why Unicode sometimes causes an issue with loading after pickling
temp_words = self.words
if ensure_compatibility and (self.words is not None):
self.words = None
# Save CorEx object
import pickle
if path.dirname(filename) and not path.exists(path.dirname(filename)):
makedirs(path.dirname(filename))
pickle.dump(self, open(filename, 'wb'), protocol=-1)
# Restore words to CorEx object
self.words = temp_words