本文整理汇总了Python中numpy.savez方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.savez方法的具体用法?Python numpy.savez怎么用?Python numpy.savez使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.savez方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_consistency
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def test_consistency(dump=False):
shape = (299, 299)
_get_model()
_get_data(shape)
if dump:
_dump_images(shape)
gt = None
else:
gt = {n: mx.nd.array(a) for n, a in np.load('data/inception-v3-dump.npz').items()}
data = np.load('data/test_images_%d_%d.npy'%shape)
sym, arg_params, aux_params = mx.model.load_checkpoint('model/Inception-7', 1)
arg_params['data'] = data
arg_params['softmax_label'] = np.random.randint(low=1, high=1000, size=(data.shape[0],))
ctx_list = [{'ctx': mx.gpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}},
{'ctx': mx.cpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}}]
gt = check_consistency(sym, ctx_list, arg_params=arg_params, aux_params=aux_params,
tol=1e-3, grad_req='null', raise_on_err=False, ground_truth=gt)
if dump:
np.savez('data/inception-v3-dump.npz', **{n: a.asnumpy() for n, a in gt.items()})
示例2: _flush
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def _flush(self):
"""
Method to flush internal state to disk.
"""
t1, t2 = str(time.time()).split(".")
state_path = os.path.join(self.ep_directory, "state_{}_{}.npz".format(t1, t2))
if hasattr(self.env, "unwrapped"):
env_name = self.env.unwrapped.__class__.__name__
else:
env_name = self.env.__class__.__name__
np.savez(
state_path,
states=np.array(self.states),
action_infos=self.action_infos,
env=env_name,
)
self.states = []
self.action_infos = []
示例3: process_tab
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def process_tab(fname, min_trans=MIN_TRANSCRIPTS):
X, cells, genes = load_tab(fname)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = X[gt_idx, :]
cells = cells[gt_idx]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(fname))
if fname.endswith('.txt'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.txt.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
elif fname.endswith('.tsv'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.tsv.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
else:
sys.stderr.write('Tab files should end with ".txt" or ".tsv"\n')
exit(1)
cache_fname = cache_prefix + '.npz'
np.savez(cache_fname, X=X, genes=genes)
return X, cells, genes
示例4: save_load_means
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def save_load_means(means_filename, image_filenames, recalculate=False):
'''
Calculate and save the means of RGB channels in image dataset if the mean file does not exist.
Otherwise read the means directly from the mean file.
means_filename: npz filename for image channel means
image_filenames: list of image filenames
recalculate: recalculate image channel means regardless the existence of mean file
'''
if (not os.path.isfile(means_filename)) or recalculate:
print('Calculating pixel means for each channel of images...')
channel_means = image_channel_means(image_filenames=image_filenames)
np.savez(means_filename, channel_means=channel_means)
else:
channel_means = np.load(means_filename)['channel_means']
return channel_means
示例5: test_savez_filename_clashes
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
示例6: test_closing_fid
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with suppress_warnings() as sup:
sup.filter(Warning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
示例7: test_npzfile_dict
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
示例8: test_large_archive
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def test_large_archive():
# Regression test for product of saving arrays with dimensions of array
# having a product that doesn't fit in int32. See gh-7598 for details.
try:
a = np.empty((2**30, 2), dtype=np.uint8)
except MemoryError:
pytest.skip("Could not create large file")
fname = os.path.join(tempdir, "large_archive")
with open(fname, "wb") as f:
np.savez(f, arr=a)
with open(fname, "rb") as f:
new_a = np.load(f)["arr"]
assert_(a.shape == new_a.shape)
示例9: save
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def save(self, save_dir):
"""Save trajectories to save_dir in NumPy compressed-array format, per-agent.
Our format consists of a dictionary with keys -- e.g. 'observations', 'actions'
and 'rewards' -- containing lists of NumPy arrays, one for each episode.
:param save_dir: (str) path to save trajectories; will create directory if needed.
:return None
"""
os.makedirs(save_dir, exist_ok=True)
save_paths = []
for dict_idx, agent_idx in enumerate(self.agent_indices):
agent_dicts = self.full_traj_dicts[dict_idx]
dump_dict = {k: np.asarray(v) for k, v in agent_dicts.items()}
save_path = os.path.join(save_dir, f"agent_{agent_idx}.npz")
np.savez(save_path, **dump_dict)
save_paths.append(save_path)
return save_paths
示例10: test_load_refcount
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
assert_(gc.isenabled())
gc.disable()
try:
gc.collect()
np.load(f)
# gc.collect returns the number of unreachable objects in cycles that
# were found -- we are checking that no cycles were created by np.load
n_objects_in_cycles = gc.collect()
finally:
gc.enable()
assert_equal(n_objects_in_cycles, 0)
示例11: test_large_archive
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def test_large_archive():
# Regression test for product of saving arrays with dimensions of array
# having a product that doesn't fit in int32. See gh-7598 for details.
try:
a = np.empty((2**30, 2), dtype=np.uint8)
except MemoryError:
raise SkipTest("Could not create large file")
fname = os.path.join(tempdir, "large_archive")
with open(fname, "wb") as f:
np.savez(f, arr=a)
with open(fname, "rb") as f:
new_a = np.load(f)["arr"]
assert_(a.shape == new_a.shape)
示例12: savez_compressed
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
示例13: extract_features_wrapper
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def extract_features_wrapper(paths, path2gt, model='vggish', save_as=False):
"""Wrapper function for extracting features (MusiCNN, VGGish or OpenL3) per batch.
If a save_as string argument is passed, the features wiil be saved in
the specified file.
"""
if model == 'vggish':
feature_extractor = extract_vggish_features
elif model == 'openl3' or model == 'musicnn':
feature_extractor = extract_other_features
else:
raise NotImplementedError('Current implementation only supports MusiCNN, VGGish and OpenL3 features')
batch_size = config['batch_size']
first_batch = True
for batch_id in tqdm(range(ceil(len(paths)/batch_size))):
batch_paths = paths[(batch_id)*batch_size:(batch_id+1)*batch_size]
[x, y, refs] = feature_extractor(batch_paths, path2gt, model)
if first_batch:
[X, Y, IDS] = [x, y, refs]
first_batch = False
else:
X = np.concatenate((X, x), axis=0)
Y = np.concatenate((Y, y), axis=0)
IDS = np.concatenate((IDS, refs), axis=0)
if save_as: # save data to file
# create a directory where to store the extracted training features
audio_representations_folder = DATA_FOLDER + 'audio_representations/'
if not os.path.exists(audio_representations_folder):
os.makedirs(audio_representations_folder)
np.savez(audio_representations_folder + save_as, X=X, Y=Y, IDS=IDS)
print('Audio features stored: ', save_as)
return [X, Y, IDS]
示例14: load_dataset
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def load_dataset(path):
download_dataset(path)
# training data
data = [np.load(os.path.join(path, 'cifar-10-batches-py',
'data_batch_%d' % (i + 1))) for i in range(5)]
X_train = np.vstack([d['data'] for d in data])
y_train = np.hstack([np.asarray(d['labels'], np.int8) for d in data])
# test data
data = np.load(os.path.join(path, 'cifar-10-batches-py', 'test_batch'))
X_test = data['data']
y_test = np.asarray(data['labels'], np.int8)
# reshape
X_train = X_train.reshape(-1, 3, 32, 32)
X_test = X_test.reshape(-1, 3, 32, 32)
# normalize
try:
mean_std = np.load(os.path.join(path, 'cifar-10-mean_std.npz'))
mean = mean_std['mean']
std = mean_std['std']
except IOError:
mean = X_train.mean(axis=(0, 2, 3), keepdims=True).astype(np.float32)
std = X_train.std(axis=(0, 2, 3), keepdims=True).astype(np.float32)
np.savez(os.path.join(path, 'cifar-10-mean_std.npz'),
mean=mean, std=std)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, y_train, X_test, y_test
示例15: runner
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import savez [as 别名]
def runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs,
stochastic_policy, save=False, reuse=False):
# Setup network
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func("pi", ob_space, ac_space, reuse=reuse)
U.initialize()
# Prepare for rollouts
# ----------------------------------------
U.load_state(load_model_path)
obs_list = []
acs_list = []
len_list = []
ret_list = []
for _ in tqdm(range(number_trajs)):
traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy)
obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret']
obs_list.append(obs)
acs_list.append(acs)
len_list.append(ep_len)
ret_list.append(ep_ret)
if stochastic_policy:
print('stochastic policy:')
else:
print('deterministic policy:')
if save:
filename = load_model_path.split('/')[-1] + '.' + env.spec.id
np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list),
lens=np.array(len_list), rets=np.array(ret_list))
avg_len = sum(len_list)/len(len_list)
avg_ret = sum(ret_list)/len(ret_list)
print("Average length:", avg_len)
print("Average return:", avg_ret)
return avg_len, avg_ret
# Sample one trajectory (until trajectory end)