本文整理汇总了Python中numpy.save方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.save方法的具体用法?Python numpy.save怎么用?Python numpy.save使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.save方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _serialize_data
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def _serialize_data(self, data):
# Default to raw bytes
type_ = _BYTES
if isinstance(data, np.ndarray):
# When the data is a numpy array, use the more compact native
# numpy format.
buf = io.BytesIO()
np.save(buf, data)
data = buf.getvalue()
type_ = _NUMPY
elif not isinstance(data, (bytearray, bytes)):
# Everything else except byte data is serialized in pickle format.
data = pickle.dumps(data)
type_ = _PICKLE
if self.compress:
# Optional compression
data = lz4.frame.compress(data)
return type_, data
示例2: visualize
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def visualize(m, story_buckets, wordlist, answerlist, output_format, outputdir, batch_size=1, seq_len=5, debugmode=False, snap=False):
cur_bucket = random.choice(story_buckets)
sampled_batch = sample_batch(cur_bucket, batch_size, len(answerlist), output_format)
part_sampled_batch = sampled_batch[:3]
with open(os.path.join(outputdir,'stories.txt'),'w') as f:
ggtnn_graph_parse.print_batch(part_sampled_batch, wordlist, answerlist, file=f)
with open(os.path.join(outputdir,'answer_list.txt'),'w') as f:
f.write('\n'.join(answerlist) + '\n')
if debugmode:
args = sampled_batch
fn = m.debug_test_fn
else:
args = part_sampled_batch[:2] + ((seq_len,) if output_format == model.ModelOutputFormat.sequence else ())
fn = m.snap_test_fn if snap else m.fuzzy_test_fn
results = fn(*args)
for i,result in enumerate(results):
np.save(os.path.join(outputdir,'result_{}.npy'.format(i)), result)
示例3: serialize_ndarray_npy
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def serialize_ndarray_npy(o):
"""
Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function.
This produces totally unreadable (and very un-JSON-like) results (in "npy"
format), but it's basically guaranteed to work in 100% of cases.
Args:
o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
with io.BytesIO() as f:
np.save(f, o)
f.seek(0)
serialized = json.dumps(f.read().decode('latin-1'))
return dict(
_type='np.ndarray',
npy=serialized)
示例4: deserialize_ndarray_npy
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def deserialize_ndarray_npy(d):
"""
Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's
:obj:`save` function.
Args:
d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created
using :obj:`numpy.save`.
Returns:
An :obj:`ndarray` object.
"""
with io.BytesIO() as f:
f.write(json.loads(d['npy']).encode('latin-1'))
f.seek(0)
return np.load(f)
示例5: extract_mnist_data
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
# if not os.path.exists(file):
if not tf.gfile.Exists(filename+".npy"):
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (pixel_depth / 2.0)) / pixel_depth
data = data.reshape(num_images, image_size, image_size, 1)
np.save(filename, data)
return data
else:
with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
return np.load(file_obj)
示例6: extractMeanDataStats
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def extractMeanDataStats(size = [200, 200, 100],
postfix = '_200x200x100orig',
main_folder_path = '../../Data/MS2017b/',
):
scan_folders = glob.glob(main_folder_path + 'scans/*')
img_path = 'pre/FLAIR' + postfix + '.nii.gz'
segm_path = 'wmh' + postfix + '.nii.gz'
shape_ = [len(scan_folders), size[0], size[1], size[2]]
arr = np.zeros(shape_)
for i, sf in enumerate(scan_folders):
arr[i, :,:,:] = numpyFromScan(os.path.join(sf,img_path)).squeeze()
arr /= len(scan_folders)
means = np.mean(arr)
stds = np.std(arr, axis = 0)
np.save(main_folder_path + 'extra_data/std' + postfix, stds)
np.save(main_folder_path + 'extra_data/mean' + postfix, means)
示例7: generateImgSlicesFolder
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def generateImgSlicesFolder(data_folder = '../Data/MS2017a/scans/'):
scan_folders = glob.glob(data_folder + '*')
for sf in scan_folders:
slice_dir_path = os.path.join(sf, 'slices/')
if not os.path.exists(slice_dir_path):
print('Creating directory at:' , slice_dir_path)
os.makedirs(slice_dir_path)
img = nib.load(os.path.join(sf, 'pre/FLAIR.nii.gz'))
img_np = img.get_data()
img_affine = img.affine
print(sf)
print('The img shape', img_np.shape[2])
for i in range(img_np.shape[2]):
slice_img_np = img_np[:,:,i]
nft_img = nib.Nifti1Image(slice_img_np, img_affine)
nib.save(nft_img, slice_dir_path + 'FLAIR_' + str(i) + '.nii.gz')
if os.path.basename(sf) == '0':
slice_img = nib.load(slice_dir_path + 'FLAIR_' + str(i) + '.nii.gz').get_data() / 5
print('DID I GET HERE?')
print('Writing to', str(i) + '.jpg')
示例8: generateGTSlicesFolder
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def generateGTSlicesFolder(data_folder = '../Data/MS2017a/scans/'):
scan_folders = glob.glob(data_folder + '*')
for sf in scan_folders:
slice_dir_path = os.path.join(sf, 'gt_slices/')
if not os.path.exists(slice_dir_path):
print('Creating directory at:' , slice_dir_path)
os.makedirs(slice_dir_path)
img = nib.load(os.path.join(sf, 'wmh.nii.gz'))
img_np = img.get_data()
img_affine = img.affine
print(sf)
print('The img shape', img_np.shape[2])
for i in range(img_np.shape[2]):
slice_img_np = img_np[:,:,i]
nft_img = nib.Nifti1Image(slice_img_np, img_affine)
nib.save(nft_img, slice_dir_path + 'wmh_' + str(i) + '.nii.gz')
if os.path.basename(sf) == '0':
slice_img = nib.load(slice_dir_path + 'wmh_' + str(i) + '.nii.gz').get_data() * 256
#cv2.imwrite('temp/' + str(i) + '.jpg', slice_img)
示例9: generateTrainValFile_Slices
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def generateTrainValFile_Slices(train_fraction, main_folder = '../Data/MS2017a/'):
train_folders, val_folders = splitTrainVal_Slices(0.8)
train_folder_names = [train_folders[i].split(main_folder)[1] for i in range(len(train_folders))]
val_folder_names = [val_folders[i].split(main_folder)[1] for i in range(len(val_folders))]
f_train = open(main_folder + 'train_slices.txt', 'w+')
f_val = open(main_folder + 'val_slices.txt', 'w+')
for fn in train_folder_names:
f_train.write(fn + '\n')
for fn in val_folder_names:
f_val.write(fn + '\n')
f_train.close()
f_val.close()
#Use this to save the images quickly (for testing purposes)
示例10: next_batch
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def next_batch(self, whichSet='train'):
if whichSet == 'train':
self.trainBatchCnt += 1
assert self.trainBatchCnt < self.trainMaxBatch
return self.train[self.trainBatchCnt * self.batch_size: (self.trainBatchCnt + 1) * self.batch_size]
elif whichSet == 'validation':
self.validationBatchCnt += 1
assert self.validationBatchCnt < self.validationMaxBatch
return self.validation[self.validationBatchCnt * self.batch_size: (self.validationBatchCnt + 1) * self.batch_size]
elif whichSet == 'test':
self.testBatchCnt += 1
assert self.testBatchCnt < self.testMaxBatch
return self.test[self.testBatchCnt * self.batch_size: (self.testBatchCnt + 1) * self.batch_size]
else:
msg = 'Wrong set name!\n'+ \
'Should be train / validation / test.'
raise Exception(msg)
# Following code copied here:
# https://stackoverflow.com/questions/17219481/save-to-file-and-load-an-instance-of-a-python-class-with-its-attributes
示例11: pred_test_fold
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def pred_test_fold(predictor, fold, test_data):
fold_prediction_dir = PREDICTION_DIR / f'fold_{fold}' / 'test'
fold_prediction_dir.mkdir(parents=True, exist_ok=True)
fname_lst, images_lst = test_data
pred_lst = []
for fname, image in zip(fname_lst, images_lst):
pred = predictor.predict(image)
pred_path = fold_prediction_dir / f'{fname}.npy'
np.save(pred_path, pred)
pred = pred.mean(axis=0)
pred_lst.append(pred)
preds = np.stack(pred_lst, axis=0)
subm_df = pd.DataFrame(data=preds,
index=fname_lst,
columns=config.classes)
subm_df.index.name = 'fname'
subm_df.to_csv(fold_prediction_dir / 'probs.csv')
示例12: calculate_weigths_labels
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def calculate_weigths_labels(dataset, dataloader, num_classes):
# Create an instance from the data loader
z = np.zeros((num_classes,))
# Initialize tqdm
tqdm_batch = tqdm(dataloader)
print('Calculating classes weights')
for sample in tqdm_batch:
y = sample['label']
y = y.detach().cpu().numpy()
mask = (y >= 0) & (y < num_classes)
labels = y[mask].astype(np.uint8)
count_l = np.bincount(labels, minlength=num_classes)
z += count_l
tqdm_batch.close()
total_frequency = np.sum(z)
class_weights = []
for frequency in z:
class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
class_weights.append(class_weight)
ret = np.array(class_weights)
classes_weights_path = os.path.join(Path.db_root_dir(dataset), dataset+'_classes_weights.npy')
np.save(classes_weights_path, ret)
return ret
示例13: MakeBaseIntegrals
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def MakeBaseIntegrals(self, Smh=True, MakeS=False):
"""Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)"""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Outputs = []
Outputs.append(("--save-coreh", "INT1E"))
Outputs.append(("--save-fint2e", "INT2E"))
Outputs.append(("--save-overlap", "OVERLAP"))
CoreH, Int2e, Overlap = self._InvokeBfint(Args, Outputs)
nOrb = CoreH.shape[0]
Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
CoreEnergy = self.Atoms.fCoreRepulsion()
if MakeS:
return CoreEnergy, CoreH, Int2e, Overlap
else:
return CoreEnergy, CoreH, Int2e
示例14: MakeOverlap
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def MakeOverlap(self, OrbBasis2=None):
"""calculate overlap within main orbital basis, and, optionally, between main
orbital basis and a second basis, as described in OrbBasis2.
Returns <1|1>, <1|2>, and <2|2> matrices."""
Args = []
Outputs = []
Outputs.append(("--save-overlap", "OVERLAP_1"))
if OrbBasis2 is not None:
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
else:
MoreBases = None
Overlap, = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap
示例15: MakeRaw2eIntegrals
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import save [as 别名]
def MakeRaw2eIntegrals(self, Smh=True, Kernel2e="coulomb"):
"""compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
where the fitting metric is *not* absorbed into the 2e integrals."""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Args.append("--kernel2e='%s'" % Kernel2e)
Args.append("--solve-fitting-eq=false")
Outputs = []
Outputs.append(("--save-fint2e", "INT2E_3IX"))
Outputs.append(("--save-fitting-metric", "INT2E_METRIC"))
Int2e_Frs, Int2e_FG = self._InvokeBfint(Args, Outputs)
nOrb = int(Int2e_Frs.shape[1]**.5 + .5)
assert(nOrb**2 == Int2e_Frs.shape[1])
Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
assert(Int2e_Frs.shape[0] == Int2e_FG.shape[0])
assert(Int2e_FG.shape[0] == Int2e_FG.shape[1])
return Int2e_FG, Int2e_Frs