本文整理汇总了Python中sklearn.cross_decomposition.PLSRegression.predict方法的典型用法代码示例。如果您正苦于以下问题:Python PLSRegression.predict方法的具体用法?Python PLSRegression.predict怎么用?Python PLSRegression.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.cross_decomposition.PLSRegression
的用法示例。
在下文中一共展示了PLSRegression.predict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
class PLSPredictor:
def __init__(self):
self.pls2 = PLSRegression(n_components=2,
scale=True,
max_iter=500,
tol=1e-06,
copy=True)
def predict(self, values):
self.pls2.predict(values)
def train(self, measured_values, screen_points):
self.pls2.fit(measured_values, screen_points)
示例2: PLSCrossValidation
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
def PLSCrossValidation(n_components, trainSet, validationSet):
pls = PLSRegression(n_components=n_components)
pls.fit(trainSet[predictorList], trainSet['Apps'])
predictPls = pls.predict(validationSet[predictorList])
different = predictPls.flat - validationSet['Apps']
error_rate = np.mean(different ** 2)
return error_rate
示例3: hacerPLS
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
def hacerPLS(X,Y):
pls_wild_b = PLSRegression(n_components = 9)
pls_wild_b.fit(X,Y)
Z = pls_wild_b.transform(X)
scores = list()
scores_std = list()
n_features = np.shape(X)[1]
X,X_test_tot, Y, Y_test_tot = cross_validation.train_test_split(X,Y,test_size = 0.5,random_state = 0)
N = np.shape(X)[0]
for num_comp in range(n_features):
kf = KFold(N,n_folds = 10)
aux_scores = list()
for train, test in kf:
X_train, X_test, y_train, y_test = X[train], X[test], Y[train], Y[test]
if num_comp == 0:
y_pred = np.mean(y_test)
y_pred = y_pred* np.ones(np.shape(y_test))
aux_scores.append(metrics.mean_squared_error(y_test,y_pred))
else:
pls_foo = PLSRegression(n_components = num_comp)
pls_foo.fit(X_train,y_train)
y_pred = pls_foo.predict(X_test)
#obtaing the score
this_score = metrics.mean_squared_error(y_test,y_pred)
aux_scores.append(this_score)
scores.append(np.mean(aux_scores))
scores_std.append(np.std(aux_scores))
plt.plot(scores)
xlabel('Componentes')
ylabel("$MSE$")
title("Animales PLS")
plt.show()
num_comp = np.argmin(scores)
pls_pred = PLSRegression(n_components =2)
pls_pred.fit(X,Y)
y_pred_test = pls_pred.predict(X_test_tot)
print "MSE test = " + str(metrics.mean_squared_error(Y_test_tot,y_pred_test))
示例4: Training
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
def Training(df,seed, yratio, xratio, index = 1):
snp_matrix = np.array(df.values)
xdim, ydim = snp_matrix.shape
ydimlist = range(0,ydim)
xdimlist = range(0,xdim)
random.seed(seed)
random.shuffle(ydimlist) # shuffle the individuals
random.shuffle(xdimlist) # shuffle the SNPs
accuracy = 0
snp_matrix_shuffle = np.copy(snp_matrix[:,ydimlist])
snp_matrix_shuffle = np.copy(snp_matrix[xdimlist,:])
snp_matrix_train = snp_matrix_shuffle[:,0:int(ydim*yratio)]
snp_matrix_test = snp_matrix_shuffle[:,int(ydim*yratio):]
snp_matrix_train_x = snp_matrix_train[0:int(xdim*xratio),:]
snp_matrix_test_x = snp_matrix_test[0:int(xdim*xratio),:]
for i in range(int(xdim*xratio), xdim):
snp_matrix_train_y = snp_matrix_train[i,:]
snp_matrix_test_y = snp_matrix_test[i,:]
if index != 7:
if index == 1:
clf = AdaBoostClassifier(n_estimators= 100)
elif index == 2:
clf = RandomForestClassifier(n_estimators=100)
elif index == 3:
clf = linear_model.LogisticRegression(C=1e5)
elif index == 4:
clf = svm.SVC(kernel = 'rbf')
elif index == 5:
clf = svm.SVC(kernel = 'poly')
else:
clf = svm.SVC(kernel = 'linear')
clf = clf.fit(snp_matrix_train_x.T, snp_matrix_train_y)
Y_pred = clf.predict(snp_matrix_test_x.T)
prediction = snp_matrix_test_y - Y_pred
wrong = np.count_nonzero(prediction)
tmp = 1 - (wrong + 0.0) / len(prediction)
print tmp
accuracy += tmp
accuracy = accuracy / (xdim - int(xdim*xratio))
if index == 7:
pls2 = PLSRegression(n_components = 50, scale=False, max_iter=1000)
snp_matrix_train_y = snp_matrix_train[int(xdim*xratio):,:]
pls2.fit(snp_matrix_train_x.T,snp_matrix_train_y.T)
snp_matrix_test_x = snp_matrix_test[0:int(xdim*xratio),:]
snp_matrix_test_y = snp_matrix_test[int(xdim*xratio):,:]
Y_pred = transform(pls2.predict(snp_matrix_test_x.T))
prediction = snp_matrix_test_y - Y_pred.T
xdim, ydim = prediction.shape
wrong = np.count_nonzero(prediction)
accuracy = 1 - wrong / (xdim * ydim + 0.0)
return accuracy
示例5: PLSRegression
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
# #############################################################################
# CCA (PLS mode B with symmetric deflation)
示例6: train_test_split
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
targets = pd.get_dummies(train.target)
train.drop('target', axis=1, inplace=True)
train = train.apply(np.log1p)
test = pd.read_csv('test.csv', index_col='id')
test = test.apply(np.log1p)
Xt, Xv, yt, yv = train_test_split(train, targets, test_size=0.2, random_state=27)
best = 10.
for n in range(5,16):
clf = PLSRegression(n_components=n)
clf.fit(Xt,yt)
y_pred = clf.predict(Xv)
loss = multiclass_log_loss(np.argmax(y_pred,axis=1),y_pred)
if loss < best:
n_best = n
best = loss
postfix = '(*)'
else:
postfix = ''
print ('comps: {:02d}\tLoss:{:5.4f} {}'.format(n,loss,postfix))
clf = PLSRegression(n_components=n_best)
clf.fit(train,targets)
y_pred = clf.predict(test)
示例7: zip
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
y_levelOne = []
level0Classifier = []
for tid,Xp,yp in zip(subjId_train,X_train,y_train):
print "Predicting subject ", vid, "from subject ", tid
y0 = np.zeros(yp.shape)
y1 = np.ones(Xt.shape[0])
X = np.vstack([Xp,Xt])
yd = np.concatenate([y0,y1])
pls = PLSRegression(n_components)
Xp_t, Xp_v, yp_t, yp_v = tts(Xp.copy(),yp.copy(),train_size=0.9)
yp_t = yp_t.astype(bool)
yp_t_not = np.vstack((yp_t,~yp_t)).T
#print "yp_t_not ", yp_t_not.shape
pls.fit(Xp_t,yp_t_not.astype(int))
yp_new = pls.predict(Xp_t, copy=True)
yp_pred = (yp_new[:,0] > yp_new[:,1]).astype(int)
yp_t = yp_t.astype(int)
#print y_new,y_pred, y_t
error = ((yp_t - yp_pred) ** 2).sum()
print "PLS Training error " , float(error)/yp_t.shape[0]
yp_new = pls.predict(Xp_v, copy=True)
yp_pred = (yp_new[:,0] > yp_new[:,1]).astype(int)
#print y_new, y_pred, y_v
#print ((y_v - y_pred) ** 2).sum(), y_v.shape[0]
error = ((yp_v - yp_pred) ** 2).sum()
print "PLS Validation error " , float(error)/yp_v.shape[0]
X_new = pls.transform(X)
rf = RandomForestClassifier(n_estimators=500, max_depth=None, max_features=int(math.sqrt(n_components)), min_samples_split=100, random_state=144, n_jobs=4)
#print "shapes ", X_new.shape, y.shape
示例8: PLSRegression
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
for i in np.arange(1,17):
plsregr = PLSRegression(n_components=i, scale=False)
plsregr.fit(X_train_scaled,y_train)
score = -1*cross_validation.cross_val_score(plsregr, X_train_scaled, y_train, cv=kf_10, scoring='mean_squared_error').mean()
mse.append(score)
plt.plot(np.arange(1,17), np.array(mse), '-v')
plt.title("PLS: MSE vs. Principal Components")
plt.xlabel('Number of principal components in PLS regression')
plt.ylabel('MSE')
plt.xlim((-0.2, 17.2))
#Based off of the plot, 12 principal components minimized MSE
plsregr_test = PLSRegression(n_components=12, scale=False)
plsregr_test.fit(X_train_scaled, y_train)
MSE_PLS = np.mean((plsregr_test.predict(X_test_scaled) - y_test) ** 2)
# print "Mean Squared Error: ", MSE_PLS
#Compare the results from above. We use (R)^2 for all models
Test_avg= np.mean(y_test)
LS_R2 = 1 - MSE_LS/(np.mean((Test_avg-y_test)**2))
R_R2 = 1 - MSE_R/(np.mean((Test_avg-y_test)**2))
LA_R2 = 1 - MSE_LA/(np.mean((Test_avg-y_test)**2))
PCA_R2 = 1 - MSE_PCA/(np.mean((Test_avg-y_test)**2))
PLS_R2 = 1 - MSE_PLS/(np.mean((Test_avg-y_test)**2))
print "Least Squares Regression (R)^2: ", LS_R2
print "Ridge Regression (R)^2: ", R_R2
print "Lasso Regression (R)^2: ", LA_R2
print "Principal Component Analysis Regression (R)^2: ", PCA_R2
开发者ID:Johnwang2461,项目名称:Introduction_Statistical_Learning,代码行数:33,代码来源:Linear+Model+Selection+and+Regularization+Applied.py
示例9: print
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
print(clf.coef_)
yvalid_scaled = clf.predict(xvalid_scaled)
err1= MAPE(y, scalery.inverse_transform(clf.predict(x_scaled)).reshape(-1,1))
err = MAPE(yvalid, scalery.inverse_transform(yvalid_scaled).reshape(-1,1))
'''
General Linear Model -- Elastic Net
'''
from sklearn.cross_decomposition import PLSRegression
pls = PLSRegression(n_components=20)
pls.fit(x_scaled, y_scaled)
print(pls.coef_)
yvalid_scaled = pls.predict(xvalid_scaled)
err1= MAPE(y, scalery.inverse_transform(pls.predict(x_scaled)).reshape(-1,1))
err = MAPE(yvalid, scalery.inverse_transform(yvalid_scaled).reshape(-1,1))
from sklearn.decomposition import PCA
reduced_data = PCA(n_components=2).fit_transform(xtrain_minmax)
pca = PCA(n_components=2)
pca.fit(xtrain_minmax)
print(pca.explained_variance_ratio_)
data_trainO.head(10)
示例10: loadData
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
(Xtrain, ytrain) = loadData(xtrainpath, ytrainpath)
(Xtest, ytest) = loadData(xtestpath, ytestpath)
#trim off background and scale
ytrain=ytrain[:,1:]
#ytrain=scale(ytrain)
Xtrain=standardize(Xtrain)
#trim off background and scale
ytest = ytest[:,1:]
#ytest = scale(ytest)
Xtest = standardize(Xtest)
pls = PLSRegression(n_components=10)
pls.fit(Xtrain, ytrain)
y_pls = pls.predict(Xtest)
print 1 + pls.score(Xtest, ytest)
pls_rmse=[]
pls_rmse.append(sqrt(mean_squared_error(ytest[:,0], y_pls[:,0])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,1], y_pls[:,1])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,2], y_pls[:,2])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,3], y_pls[:,3])))
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(241)
ax1.plot(y_pls[:,0], c='r', label='PLS Fit')
ax1.plot(ytest[:,0], c='grey', label='Target')
ax1.set_xlabel('Time')
示例11: print
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
print "\n"
SVRr2.append(optSVR.score(XTest, yTest))
SVRmse.append( metrics.mean_squared_error(yTest,SVRpreds))
SVRrmse.append(math.sqrt(SVRmse[metcount]))
print ("Support Vector Regression prediction statistics for fold %d are; MSE = %5.2f RMSE = %5.2f R2 = %5.2f\n\n" % (metcount+1, SVRmse[metcount], SVRrmse[metcount],SVRr2[metcount]))
with open(train_name,'a') as ftrain :
ftrain.write("Support Vector Regression prediction statistics for fold %d are, MSE =, %5.2f, RMSE =, %5.2f, R2 =, %5.2f,\n\n" % (metcount+1, SVRmse[metcount], SVRrmse[metcount],SVRr2[metcount]))
ftrain.close()
# Train partial least squares and predict with optimised parameters
print("\n\n------------------- Starting opitimised PLS training -------------------")
optPLS = PLSRegression(n_components = nc)
optPLS.fit(XTrain, yTrain) # Train the model
print("Training R2 = %5.2f" % optPLS.score(XTrain,yTrain))
print("Starting optimised PLS prediction")
PLSpreds = optPLS.predict(XTest)
print("The predicted values now follow :")
PLSpredsdim = PLSpreds.shape[0]
i = 0
if PLSpredsdim%5 == 0:
while i < PLSpredsdim:
print round(PLSpreds[i],2),'\t', round(PLSpreds[i+1],2),'\t', round(PLSpreds[i+2],2),'\t', round(PLSpreds[i+3],2),'\t', round(PLSpreds[i+4],2)
i += 5
elif PLSpredsdim%4 == 0:
while i < PLSpredsdim:
print round(PLSpreds[i],2),'\t', round(PLSpreds[i+1],2),'\t', round(PLSpreds[i+2],2),'\t', round(PLSpreds[i+3],2)
i += 4
elif PLSpredsdim%3 == 0 :
while i < PLSpredsdim :
print round(PLSpreds[i],2),'\t', round(PLSpreds[i+1],2),'\t', round(PLSpreds[i+2],2)
i += 3
示例12: pls_train
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
def pls_train(groups, varname='valence', arrayname='norm', scale=True,
ncomps=2, cv_folds=None, cv_repeats=None, skip_cv=False,
xmin=-np.inf, xmax=np.inf, _larch=None, **kws):
"""use a list of data groups to train a Partial Least Squares model
Arguments
---------
groups list of groups to use as components
varname name of characteristic value to model ['valence']
arrayname string of array name to be fit (see Note 3) ['norm']
xmin x-value for start of fit range [-inf]
xmax x-value for end of fit range [+inf]
scale bool to scale data [True]
cv_folds None or number of Cross-Validation folds (Seee Note 4) [None]
cv_repeats None or number of Cross-Validation repeats (Seee Note 4) [None]
skip_cv bool to skip doing Cross-Validation [None]
ncomps number of independent components (See Note 5) [2]
Returns
-------
group with trained PSLResgession, to be used with pls_predict
Notes
-----
1. The group members for the components must match each other
in data content and array names.
2. all grouops must have an attribute (scalar value) for `varname`
3. arrayname can be one of `norm` or `dmude`
4. Cross-Validation: if cv_folds is None, sqrt(len(groups)) will be used
(rounded to integer). if cv_repeats is None, sqrt(len(groups))-1
will be used (rounded).
5. The optimal number of components may be best found from PCA. If set to None,
a search will be done for ncomps that gives the lowest RMSE_CV.
"""
xdat, spectra = groups2matrix(groups, arrayname, xmin=xmin, xmax=xmax)
groupnames = []
ydat = []
for g in groups:
groupnames.append(getattr(g, 'filename',
getattr(g, 'groupname', repr(g))))
val = getattr(g, varname, None)
if val is None:
raise Value("group '%s' does not have attribute '%s'" % (g, varname))
ydat.append(val)
ydat = np.array(ydat)
nvals = len(groups)
kws['scale'] = scale
kws['n_components'] = ncomps
model = PLSRegression(**kws)
rmse_cv = None
if not skip_cv:
if cv_folds is None:
cv_folds = int(round(np.sqrt(nvals)))
if cv_repeats is None:
cv_repeats = int(round(np.sqrt(nvals)) - 1)
resid = []
cv = RepeatedKFold(n_splits=cv_folds, n_repeats=cv_repeats)
for ctrain, ctest in cv.split(range(nvals)):
model.fit(spectra[ctrain, :], ydat[ctrain])
ypred = model.predict(spectra[ctest, :])[:, 0]
resid.extend((ypred - ydat[ctest]).tolist())
resid = np.array(resid)
rmse_cv = np.sqrt( (resid**2).mean() )
# final fit without cross-validation
model = PLSRegression(**kws)
out = model.fit(spectra, ydat)
ypred = model.predict(spectra)[:, 0]
rmse = np.sqrt(((ydat - ypred)**2).mean())
return Group(x=xdat, spectra=spectra, ydat=ydat, ypred=ypred,
coefs=model.x_weights_, loadings=model.x_loadings_,
cv_folds=cv_folds, cv_repeats=cv_repeats, rmse_cv=rmse_cv,
rmse=rmse, model=model, varname=varname,
arrayname=arrayname, scale=scale, groupnames=groupnames,
keywords=kws)
示例13: calculator
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
from rdkit.Chem import Descriptors
from rdkit.ML.Descriptors import MoleculeDescriptors
nms = [ x[0] for x in Descriptors._descList ]
def calculator( mols ):
calc = MoleculeDescriptors.MolecularDescriptorCalculator( nms )
res = [ calc.CalcDescriptors( mol ) for mol in mols ]
return res
trainMols = [ mol for mol in Chem.SDMolSupplier("solubility.train.sdf") ]
testMols = [ mol for mol in Chem.SDMolSupplier("solubility.test.sdf") ]
trainDescrs = calculator( trainMols )
testDescrs = calculator( testMols )
trainActs = np.array([ float( mol.GetProp('SOL') ) for mol in trainMols ])
testActs = np.array([ float( mol.GetProp('SOL') ) for mol in testMols ])
pls2 = PLSRegression( n_components = 15 )
pls2.fit( trainDescrs, trainActs )
sol_pred = pls2.predict( testDescrs )
print type(sol_pred)
print type(trainActs)
print metrics.r2_score( testActs, sol_pred )
"""
for i in range(len(sol_pred)):
print testActs[i], sol_pred[i]
"""
示例14: print
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
#Xpls = pls.x_scores_
#Ypls = pls.y_scores_
#CorrCoef = np.corrcoef(Xpls,Ypls,rowvar=0)
#print('')
#print('Correlation between the two datasets in component 1: {:.3}'.format(CorrCoef[2,0]))
#print('Correlation between the two datasets in component 2: {:.3}'.format(CorrCoef[1,3]))
### Determine cross-validation scores using k-folds repeated n_iter times with a new random sorting
cvPLS = cross_validation.StratifiedShuffleSplit(y, n_iter=10, test_size=0.2, random_state=None) # Stratified k-folds of 1/test_size or 5 typically
### Find CV scores using root means square error for PLS to help determine appropriate number of components
print('')
predPLS = np.array(pls.predict(Data), dtype='int')
msepPLS = mean_squared_error(predPLS,y)
print('PLS MSEP with {:} PLS components: {:.2e}'.format(nPLS, msepPLS))
msePLSScores = cross_validation.cross_val_score(
pls, Data, y, cv=cvPLS, scoring='mean_squared_error') # bug- returns negative values
print('k-folds PLS MSEP: {:.2e}'.format(abs(np.mean(msePLSScores))))
### Perform classification then transform PLS data to LDA basis
nLDA = 2
clfLDA = lda.LDA(n_components = nLDA)
Xlda = clfLDA.fit_transform(TrnsfrmPls[0],ExampleClasses)
# Predict and calculate misclassification rate
示例15: PLSRegression
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import predict [as 别名]
regression_params = pandas.DataFrame(0, index=norm.columns, columns=concepts)
predicted_nii1 = pandas.DataFrame(0, index=norm.columns, columns=["nii"])
predicted_nii2 = pandas.DataFrame(0, index=norm.columns, columns=["nii"])
print "Training voxels and building predicted images..."
for voxel in norm.columns:
train = [x for x in X.index if x not in [image1_holdout, image2_holdout] and x in norm.index]
Y = norm.loc[train, voxel].tolist()
Xtrain = X.loc[train, :]
# Use pls instead of regularized regression
clf = PLSRegression(n_components=number_components)
clf.fit(Xtrain, Y)
# Need to find where regression/intercept params are in this model
regression_params.loc[voxel, :] = [x[0] for x in clf.coef_]
predicted_nii1.loc[voxel, "nii"] = clf.predict(holdout1Y.reshape(1, -1))[0][0]
predicted_nii2.loc[voxel, "nii"] = clf.predict(holdout2Y.reshape(1, -1))[0][0]
predicted_nii1 = predicted_nii1["nii"].tolist()
predicted_nii2 = predicted_nii2["nii"].tolist()
# Turn into nifti images
nii1 = numpy.zeros(standard_mask.shape)
nii2 = numpy.zeros(standard_mask.shape)
nii1[standard_mask.get_data() != 0] = predicted_nii1
nii2[standard_mask.get_data() != 0] = predicted_nii2
nii1 = nibabel.Nifti1Image(nii1, affine=standard_mask.get_affine())
nii2 = nibabel.Nifti1Image(nii2, affine=standard_mask.get_affine())
# Turn the holdout image data back into nifti