本文整理汇总了Java中water.fvec.Frame.add方法的典型用法代码示例。如果您正苦于以下问题:Java Frame.add方法的具体用法?Java Frame.add怎么用?Java Frame.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类water.fvec.Frame
的用法示例。
在下文中一共展示了Frame.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: unifyFrame
import water.fvec.Frame; //导入方法依赖的package包/类
static Vec unifyFrame(DRFModel.DRFParameters drf, Frame fr, PrepData prep, boolean classification) {
int idx = prep.prep(fr);
if( idx < 0 ) { idx = ~idx; }
String rname = fr._names[idx];
drf._response_column = fr.names()[idx];
Vec resp = fr.vecs()[idx];
Vec ret = null;
if (classification) {
ret = fr.remove(idx);
fr.add(rname,resp.toCategoricalVec());
} else {
fr.remove(idx);
fr.add(rname,resp);
}
return ret;
}
示例2: unifyFrame
import water.fvec.Frame; //导入方法依赖的package包/类
static Vec unifyFrame(DeepLearningParameters drf, Frame fr, PrepData prep, boolean classification) {
int idx = prep.prep(fr);
if( idx < 0 ) { idx = ~idx; }
String rname = fr._names[idx];
drf._response_column = fr.names()[idx];
Vec resp = fr.vecs()[idx];
Vec ret = null;
if (classification) {
ret = fr.remove(idx);
fr.add(rname,resp.toCategoricalVec());
} else {
fr.remove(idx);
fr.add(rname,resp);
}
return ret;
}
示例3: transformImpl
import water.fvec.Frame; //导入方法依赖的package包/类
@Override protected Frame transformImpl(Frame f) {
((ASTExec)_ast._asts[1])._asts[1] = AST.newASTFrame(f);
Frame fr = Exec.execute(_ast).getFrame();
_newCol = _newNames==null?new String[fr.numCols()]:_newNames;
_newColTypes = toJavaPrimitive(fr.anyVec().get_type_str());
if( (_multiColReturn=fr.numCols() > 1) ) {
for(int i=0;i<_newCol.length;i++) {
if(_newNames==null) _newCol[i] = f.uniquify(i > 0 ? _newCol[i - 1] : _oldCol);
f.add(_newCol[i], fr.vec(i));
}
if( _inplace ) f.remove(f.find(_oldCol)).remove();
} else {
_newCol = new String[]{_inplace ? _oldCol : f.uniquify(_oldCol)};
if( _inplace ) f.replace(f.find(_oldCol), fr.anyVec()).remove();
else f.add(_newNames==null?_newCol[0]:_newNames[0], fr.anyVec());
}
DKV.put(f);
return f;
}
示例4: testModelAdaptMissing
import water.fvec.Frame; //导入方法依赖的package包/类
@Test public void testModelAdaptMissing() {
AModel.AParms p = new AModel.AParms();
AModel.AOutput o = new AModel.AOutput();
Vec cat = vec(new String[]{"A","B"},0,1,0,1);
Frame trn = new Frame();
trn.add("cat",cat);
o._names = trn.names();
o._domains = trn.domains();
trn.remove();
AModel am = new AModel(Key.make(),p,o);
Frame tst = new Frame();
tst.add("cat", cat.makeCon(Double.NaN)); // All NAN/missing column
Frame adapt = new Frame(tst);
String[] warns = am.adaptTestForTrain(adapt,true, true);
Assert.assertTrue(warns.length == 0); // No errors during adaption
Model.cleanup_adapt( adapt, tst );
tst.remove();
}
示例5: testModelAdaptConvert
import water.fvec.Frame; //导入方法依赖的package包/类
@Test public void testModelAdaptConvert() {
AModel.AParms p = new AModel.AParms();
AModel.AOutput o = new AModel.AOutput();
Frame trn = new Frame();
trn.add("dog",vec(new String[]{"A","B"},0,1,0,1));
o._names = trn.names();
o._domains = trn.domains();
trn.remove();
AModel am = new AModel(Key.make(),p,o);
Frame tst = new Frame();
tst.add("dog",vec(2, 3, 2, 3));
Frame adapt = new Frame(tst);
boolean saw_iae = false;
try { am.adaptTestForTrain(adapt, true, true); }
catch( IllegalArgumentException iae ) { saw_iae = true; }
Assert.assertTrue(saw_iae);
Model.cleanup_adapt( adapt, tst );
tst.remove();
}
示例6: buildModelOutput
import water.fvec.Frame; //导入方法依赖的package包/类
public void buildModelOutput() {
final int vecSize = _parms._vecSize;
Futures fs = new Futures();
String[] colNames = new String[vecSize];
Vec[] vecs = new Vec[vecSize];
Key keys[] = Vec.VectorGroup.VG_LEN1.addVecs(vecs.length);
//allocate
NewChunk cs[] = new NewChunk[vecs.length];
AppendableVec avs[] = new AppendableVec[vecs.length];
for (int i = 0; i < vecs.length; i++) {
avs[i] = new AppendableVec(keys[i], Vec.T_NUM);
cs[i] = new NewChunk(avs[i], 0);
}
//fill in vector values
for( int i = 0; i < _modelInfo._vocabSize; i++ ) {
for (int j=0; j < vecSize; j++) {
cs[j].addNum(_modelInfo._syn0[i * vecSize + j]);
}
}
//finalize vectors
final int rowLayout = avs[0].compute_rowLayout();
for (int i = 0; i < vecs.length; i++) {
colNames[i] = new String("V"+i);
cs[i].close(0, fs);
vecs[i] = avs[i].close(rowLayout,fs);
}
fs.blockForPending();
Frame fr = new Frame(_w2vKey = Key.make("w2v"));
//FIXME this ties the word count frame to this one which means
//FIXME one can't be deleted without destroying the other
fr.add("Word", (_parms._vocabKey.get()).vec(0));
fr.add(colNames, vecs);
DKV.put(_w2vKey, fr);
}
示例7: testCollisionOfDRFParamsChecksum
import water.fvec.Frame; //导入方法依赖的package包/类
@Test
public void testCollisionOfDRFParamsChecksum() {
Frame fr = null;
try {
fr = parse_test_file("smalldata/junit/cars.csv");
fr.remove("name").remove();
Vec old = fr.remove("economy (mpg)");
fr.add("economy (mpg)", old); // response to last column
DKV.put(fr);
// {"_model_id":null,"_train":{"name":"_83da9e0754c5eb9f6b812fe17e7945e5","type":"Key"},"_valid":null,"_nfolds":0,"_keep_cross_validation_predictions":false,"_fold_assignment":"AUTO","_distribution":"AUTO","_tweedie_power":1.5,"_ignored_columns":null,"_ignore_const_cols":true,"_weights_column":null,"_offset_column":null,"_fold_column":null,"_score_each_iteration":false,"_response_column":"economy (mpg)","_balance_classes":false,"_max_after_balance_size":5.0,"_class_sampling_factors":null,"_max_hit_ratio_k":10,"_max_confusion_matrix_size":20,"_checkpoint":null,"_ntrees":9,"_max_depth":15,"_min_rows":1.0,"_nbins":20,"_nbins_cats":1024,"_r2_stopping":0.999999,"_seed":-4522296119273841674,"_nbins_top_level":1024,"_build_tree_one_node":false,"_initial_score_interval":4000,"_score_interval":4000,"_mtries":3,"_sample_rate":0.6499997,"_binomial_double_trees":false}
DRFModel.DRFParameters params1 = new DRFModel.DRFParameters();
params1._train = fr._key;
params1._response_column = "economy (mpg)";
params1._seed = -4522296119273841674L;
params1._mtries = 3;
params1._max_depth = 15;
params1._ntrees = 9;
params1._sample_rate = 0.6499997f;
// {"_model_id":null,"_train":{"name":"_83da9e0754c5eb9f6b812fe17e7945e5","type":"Key"},"_valid":null,"_nfolds":0,"_keep_cross_validation_predictions":false,"_fold_assignment":"AUTO","_distribution":"AUTO","_tweedie_power":1.5,"_ignored_columns":null,"_ignore_const_cols":true,"_weights_column":null,"_offset_column":null,"_fold_column":null,"_score_each_iteration":false,"_response_column":"economy (mpg)","_balance_classes":false,"_max_after_balance_size":5.0,"_class_sampling_factors":null,"_max_hit_ratio_k":10,"_max_confusion_matrix_size":20,"_checkpoint":null,"_ntrees":13,"_max_depth":1,"_min_rows":1.0,"_nbins":20,"_nbins_cats":1024,"_r2_stopping":0.999999,"_seed":-4522296119273841674,"_nbins_top_level":1024,"_build_tree_one_node":false,"_initial_score_interval":4000,"_score_interval":4000,"_mtries":1,"_sample_rate":0.6499997,"_binomial_double_trees":false}
DRFModel.DRFParameters params2 = new DRFModel.DRFParameters();
params2._train = fr._key;
params2._response_column = "economy (mpg)";
params2._seed = -4522296119273841674L;
params2._mtries = 1;
params2._max_depth = 1;
params2._ntrees = 13;
params2._sample_rate = 0.6499997f;
long csum1 = params1.checksum();
long csum2 = params2.checksum();
Assert.assertNotEquals("Checksums shoudl be different", csum1, csum2);
} finally {
if (fr != null) {
fr.remove();
}
}
}
示例8: exec_check
import water.fvec.Frame; //导入方法依赖的package包/类
Val exec_check( Env env, Env.StackHelp stk, Frame tst, AST ast, Frame xfr ) {
Val val = ast.exec(env);
if( val.isFrame() ) {
Frame fr = stk.track(val).getFrame();
if( tst.numCols() != fr.numCols() || tst.numRows() != fr.numRows() )
throw new IllegalArgumentException("ifelse test frame and other frames must match dimensions, found "+tst+" and "+fr);
xfr.add(fr);
}
return val;
}
示例9: randSubIterInPlace
import water.fvec.Frame; //导入方法依赖的package包/类
private Frame randSubIterInPlace(DataInfo dinfo, SVDModel model) {
DataInfo yinfo = null;
Frame yqfrm = null;
try {
// 1) Initialize Y = AG where G ~ N(0,1) and compute Y = QR factorization
update(1, "Initializing random subspace of training data Y");
double[][] gt = ArrayUtils.gaussianArray(_parms._nv, _ncolExp, _parms._seed);
RandSubInit rtsk = new RandSubInit(self(), dinfo, gt);
rtsk.doAll_numericResult(_parms._nv, dinfo._adaptedFrame);
yqfrm = rtsk.outputFrame(Key.make(), null, null); // Alternates between Y and Q from Y = QR
// Make input frame [A,Q] where A = read-only training data, Y = A \tilde{Q}, Q from Y = QR factorization
// Note: If A is n by p (p = num cols with categoricals expanded), then \tilde{Q} is p by k and Q is n by k
// Q frame is used to save both intermediate Y calculation and final orthonormal Q matrix
Frame aqfrm = new Frame(dinfo._adaptedFrame);
aqfrm.add(yqfrm);
// Calculate Cholesky of Y Gram to get R' = L matrix
update(1, "Computing QR factorization of Y");
yinfo = new DataInfo(Key.make(), yqfrm, null, true, DataInfo.TransformType.NONE, true, false, false);
DKV.put(yinfo._key, yinfo);
LinearAlgebraUtils.computeQInPlace(self(), yinfo);
model._output._iterations = 0;
while (model._output._iterations < _parms._max_iterations) {
if(!isRunning()) break;
update(1, "Iteration " + String.valueOf(model._output._iterations+1) + " of randomized subspace iteration");
// 2) Form \tilde{Y}_j = A'Q_{j-1} and compute \tilde{Y}_j = \tilde{Q}_j \tilde{R}_j factorization
SMulTask stsk = new SMulTask(dinfo, _parms._nv);
stsk.doAll(aqfrm);
Matrix ysmall = new Matrix(stsk._atq);
QRDecomposition ysmall_qr = new QRDecomposition(ysmall);
double[][] qtilde = ysmall_qr.getQ().getArray();
// 3) [A,Q_{j-1}] -> [A,Y_j]: Form Y_j = A\tilde{Q}_j and compute Y_j = Q_jR_j factorization
BMulInPlaceTask tsk = new BMulInPlaceTask(dinfo, ArrayUtils.transpose(qtilde));
tsk.doAll(aqfrm);
LinearAlgebraUtils.computeQInPlace(self(), yinfo);
model._output._iterations++;
model.update(self());
}
} catch( Throwable t ) {
Job thisJob = DKV.getGet(_key);
if (thisJob._state == JobState.CANCELLED) {
Log.info("Job cancelled by user.");
} else {
t.printStackTrace();
failed(t);
throw t;
}
} finally {
if( yinfo != null ) yinfo.remove();
}
return yqfrm;
}
示例10: testDuplicatesCarsGrid
import water.fvec.Frame; //导入方法依赖的package包/类
@Test
public void testDuplicatesCarsGrid() {
Grid grid = null;
Frame fr = null;
Vec old = null;
try {
fr = parse_test_file("smalldata/junit/cars_20mpg.csv");
fr.remove("name").remove(); // Remove unique id
old = fr.remove("economy");
fr.add("economy", old); // response to last column
DKV.put(fr);
// Setup random hyperparameter search space
HashMap<String, Object[]> hyperParms = new HashMap<>();
hyperParms.put("_distribution", new Distribution.Family[]{Distribution.Family.gaussian});
hyperParms.put("_ntrees", new Integer[]{5, 5});
hyperParms.put("_max_depth", new Integer[]{2, 2});
hyperParms.put("_learn_rate", new Float[]{.1f, .1f});
// Fire off a grid search
GBMModel.GBMParameters params = new GBMModel.GBMParameters();
params._train = fr._key;
params._response_column = "economy";
GridSearch gs = GridSearch.startGridSearch(params, hyperParms, GBM_MODEL_FACTORY);
grid = (Grid) gs.get();
// Check that duplicate model have not been constructed
Model[] models = grid.getModels();
assertTrue("Number of returned models has to be > 0", models.length > 0);
// But all off them should be same
Key<Model> modelKey = models[0]._key;
for (Model m : models) {
assertTrue("Number of constructed models has to be equal to 1", modelKey == m._key);
}
} finally {
if (old != null) {
old.remove();
}
if (fr != null) {
fr.remove();
}
if (grid != null) {
grid.remove();
}
}
}
示例11: testCheckpointReconstruction4BinomialPUBDEV1829
import water.fvec.Frame; //导入方法依赖的package包/类
@Ignore("PUBDEV-1829")
public void testCheckpointReconstruction4BinomialPUBDEV1829() {
Frame tr = parse_test_file("smalldata/jira/gbm_checkpoint_train.csv");
Frame val = parse_test_file("smalldata/jira/gbm_checkpoint_valid.csv");
Vec old = null;
tr.remove("name").remove();
tr.remove("economy").remove();
val.remove("name").remove();
val.remove("economy").remove();
old = tr.remove("economy_20mpg");
tr.add("economy_20mpg", old);
DKV.put(tr);
old = val.remove("economy_20mpg");
val.add("economy_20mpg", old);
DKV.put(val);
GBMModel model = null;
GBMModel modelFromCheckpoint = null;
GBMModel modelFinal = null;
try {
GBMModel.GBMParameters gbmParams = new GBMModel.GBMParameters();
gbmParams._model_id = Key.make("Initial model");
gbmParams._train = tr._key;
gbmParams._valid = val._key;
gbmParams._response_column = "economy_20mpg";
gbmParams._ntrees = 5;
gbmParams._max_depth = 5;
gbmParams._min_rows = 10;
gbmParams._score_each_iteration = true;
gbmParams._seed = 42;
model = new GBM(gbmParams).trainModel().get();
GBMModel.GBMParameters gbmFromCheckpointParams = new GBMModel.GBMParameters();
gbmFromCheckpointParams._model_id = Key.make("Model from checkpoint");
gbmFromCheckpointParams._train = tr._key;
gbmFromCheckpointParams._valid = val._key;
gbmFromCheckpointParams._response_column = "economy_20mpg";
gbmFromCheckpointParams._ntrees = 10;
gbmFromCheckpointParams._checkpoint = model._key;
gbmFromCheckpointParams._score_each_iteration = true;
gbmFromCheckpointParams._max_depth = 5;
gbmFromCheckpointParams._min_rows = 10;
gbmFromCheckpointParams._seed = 42;
modelFromCheckpoint = new GBM(gbmFromCheckpointParams).trainModel().get();
// Compute a separated model containing the same number of trees as a model built from checkpoint
GBMModel.GBMParameters gbmFinalParams = new GBMModel.GBMParameters();
gbmFinalParams._model_id = Key.make("Validation model");
gbmFinalParams._train = tr._key;
gbmFinalParams._valid = val._key;
gbmFinalParams._response_column = "economy_20mpg";
gbmFinalParams._ntrees = 10;
gbmFinalParams._score_each_iteration = true;
gbmFinalParams._max_depth = 5;
gbmFinalParams._min_rows = 10;
gbmFinalParams._seed = 42;
modelFinal = new GBM(gbmFinalParams).trainModel().get();
CompressedTree[][] treesFromCheckpoint = getTrees(modelFromCheckpoint);
CompressedTree[][] treesFromFinalModel = getTrees(modelFinal);
assertTreeEquals("The model created from checkpoint and corresponding model created from scratch should have the same trees!",
treesFromCheckpoint, treesFromFinalModel, true);
} finally {
if (tr!=null) tr.delete();
if (val!=null) val.delete();
if (old != null) old.remove();
if (model!=null) model.delete();
if (modelFromCheckpoint!=null) modelFromCheckpoint.delete();
if (modelFinal!=null) modelFinal.delete();
}
}
示例12: testNfoldsConsecutiveModelsSame
import water.fvec.Frame; //导入方法依赖的package包/类
@Test
public void testNfoldsConsecutiveModelsSame() {
Frame tfr = null;
Vec old = null;
DRFModel drf1 = null;
DRFModel drf2 = null;
Scope.enter();
try {
tfr = parse_test_file("smalldata/junit/cars_20mpg.csv");
tfr.remove("name").remove(); // Remove unique id
tfr.remove("economy").remove();
old = tfr.remove("economy_20mpg");
tfr.add("economy_20mpg", old.toCategoricalVec()); // response to last column
DKV.put(tfr);
DRFModel.DRFParameters parms = new DRFModel.DRFParameters();
parms._train = tfr._key;
parms._response_column = "economy_20mpg";
parms._min_rows = 2;
parms._max_depth = 2;
parms._nfolds = 3;
parms._ntrees = 3;
parms._seed = 77777;
DRF job1 = new DRF(parms);
drf1 = job1.trainModel().get();
DRF job2 = new DRF(parms);
drf2 = job2.trainModel().get();
ModelMetricsBinomial mm1 = (ModelMetricsBinomial)drf1._output._cross_validation_metrics;
ModelMetricsBinomial mm2 = (ModelMetricsBinomial)drf2._output._cross_validation_metrics;
assertEquals(mm1.auc()._auc, mm2.auc()._auc, 1e-12);
assertEquals(mm1.mse(), mm2.mse(), 1e-12);
assertEquals(mm1.r2(), mm2.r2(), 1e-12);
assertEquals(mm1.logloss(), mm2.logloss(), 1e-12);
job1.remove();
job2.remove();
} finally {
if (tfr != null) tfr.remove();
if (old != null) old.remove();
if (drf1 != null) {
drf1.deleteCrossValidationModels();
drf1.delete();
}
if (drf2 != null) {
drf2.deleteCrossValidationModels();
drf2.delete();
}
Scope.exit();
}
}
示例13: testMTrys
import water.fvec.Frame; //导入方法依赖的package包/类
@Test
public void testMTrys() {
Frame tfr = null;
Vec old = null;
DRFModel drf1 = null;
for (int i=1; i<=6; ++i) {
Scope.enter();
try {
tfr = parse_test_file("smalldata/junit/cars_20mpg.csv");
tfr.remove("name").remove(); // Remove unique id
tfr.remove("economy").remove();
old = tfr.remove("economy_20mpg");
tfr.add("economy_20mpg", old.toCategoricalVec()); // response to last column
DKV.put(tfr);
DRFModel.DRFParameters parms = new DRFModel.DRFParameters();
parms._train = tfr._key;
parms._response_column = "economy_20mpg";
parms._min_rows = 2;
parms._ntrees = 5;
parms._max_depth = 5;
parms._nfolds = 3;
parms._mtries = i;
DRF job1 = new DRF(parms);
drf1 = job1.trainModel().get();
ModelMetricsBinomial mm1 = (ModelMetricsBinomial) drf1._output._cross_validation_metrics;
Assert.assertTrue(mm1._auc != null);
job1.remove();
} finally {
if (tfr != null) tfr.remove();
if (old != null) old.remove();
if (drf1 != null) {
drf1.deleteCrossValidationModels();
drf1.delete();
}
Scope.exit();
}
}
}
示例14: testDuplicatesCarsGrid
import water.fvec.Frame; //导入方法依赖的package包/类
@Test
public void testDuplicatesCarsGrid() {
Grid grid = null;
Frame fr = null;
Vec old = null;
try {
fr = parse_test_file("smalldata/junit/cars_20mpg.csv");
fr.remove("name").remove(); // Remove unique id
old = fr.remove("economy");
fr.add("economy", old); // response to last column
DKV.put(fr);
// Setup random hyperparameter search space
HashMap<String, Object[]> hyperParms = new HashMap<>();
hyperParms.put("_ntrees", new Integer[]{5, 5});
hyperParms.put("_max_depth", new Integer[]{2, 2});
hyperParms.put("_mtries", new Integer[]{-1, -1});
hyperParms.put("_sample_rate", new Float[]{.1f, .1f});
// Fire off a grid search
DRFModel.DRFParameters params = new DRFModel.DRFParameters();
params._train = fr._key;
params._response_column = "economy";
// Get the Grid for this modeling class and frame
GridSearch gs = GridSearch.startGridSearch(params, hyperParms, DRF_MODEL_FACTORY);
grid = (Grid) gs.get();
// Check that duplicate model have not been constructed
Model[] models = grid.getModels();
assertTrue("Number of returned models has to be > 0", models.length > 0);
// But all off them should be same
Key<Model> modelKey = models[0]._key;
for (Model m : models) {
assertTrue("Number of constructed models has to be equal to 1", modelKey == m._key);
}
} finally {
if (old != null) {
old.remove();
}
if (fr != null) {
fr.remove();
}
if (grid != null) {
grid.remove();
}
}
}
示例15: elasticAveragingTrivial
import water.fvec.Frame; //导入方法依赖的package包/类
@Test public void elasticAveragingTrivial() {
DeepLearningParameters dl;
Frame frTrain;
int N = 2;
DeepLearningModel [] models = new DeepLearningModel[N];
dl = new DeepLearningParameters();
Scope.enter();
try {
for (int i = 0; i < N; ++i) {
frTrain = parse_test_file("./smalldata/covtype/covtype.20k.data");
Vec resp = frTrain.lastVec().toCategoricalVec();
frTrain.remove(frTrain.vecs().length - 1).remove();
frTrain.add("Response", resp);
DKV.put(frTrain);
dl._train = frTrain._key;
dl._response_column = ((Frame) DKV.getGet(dl._train)).lastVecName();
dl._export_weights_and_biases = true;
dl._hidden = new int[]{17, 11};
dl._quiet_mode = false;
// make it reproducible
dl._seed = 1234;
dl._reproducible = true;
// only do one M/R iteration, and there's no elastic average yet - so the two paths below should be identical
dl._epochs = 1;
dl._train_samples_per_iteration = -1;
if (i == 0) {
// no elastic averaging
dl._elastic_averaging = false;
dl._elastic_averaging_moving_rate = 0.5; //ignored
dl._elastic_averaging_regularization = 0.9; //ignored
} else {
// no-op elastic averaging
dl._elastic_averaging = true; //go different path, but don't really do anything because of epochs=1 and train_samples_per_iteration=-1
dl._elastic_averaging_moving_rate = 0.5; //doesn't matter, it's not used since we only do one M/R iteration and there's no time average
dl._elastic_averaging_regularization = 0.1; //doesn't matter, since elastic average isn't yet available in first iteration
}
// Invoke DL and block till the end
DeepLearning job = null;
try {
job = new DeepLearning(dl);
// Get the model
models[i] = job.trainModel().get();
} finally {
if (job != null) job.remove();
}
frTrain.remove();
}
for (int i = 0; i < N; ++i) {
Log.info(models[i]._output._training_metrics.cm().table().toString());
Assert.assertEquals(models[i]._output._training_metrics._MSE, models[0]._output._training_metrics._MSE, 1e-6);
}
}finally{
for (int i=0; i<N; ++i)
if (models[i] != null)
models[i].delete();
Scope.exit();
}
}