本文整理汇总了Java中ncsa.hdf.object.Dataset类的典型用法代码示例。如果您正苦于以下问题:Java Dataset类的具体用法?Java Dataset怎么用?Java Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Dataset类属于ncsa.hdf.object包,在下文中一共展示了Dataset类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: _writeArray
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
protected Dataset _writeArray(String name, Group parent, H5Datatype type,
long[] dims,
Object data)
throws Exception
{
boolean chunked = ArrayUtil.product(dims) > 0;
/* chunking and/or compression are broken for empty arrays */
log.debug("Creating {} with dims=[{}] size=[{}] chunks=[{}]...",
name, xJoined(dims), "", chunked ? xJoined(dims) : "");
Dataset ds = this.output.createScalarDS(name, parent, type,
dims,
chunked ? dims.clone() : null,
chunked ? dims.clone() : null,
chunked ? compression_level : 0,
data);
log.info("Created {} with dims=[{}] size=[{}] chunks=[{}]",
name, xJoined(dims), "", chunked ? xJoined(dims) : "");
return ds;
}
示例2: writeSpeciesVector
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
protected void writeSpeciesVector(String name, String title,
Group parent, String[] species, int[] which)
throws Exception
{
final String[] specout;
if (which == null)
specout = species;
else {
specout = new String[which.length];
for (int i = 0; i < which.length; i++)
specout[i] = species[which[i]];
}
Dataset ds = writeVector(name, parent, specout);
setAttribute(ds, "TITLE", title);
setAttribute(ds, "LAYOUT", "[nspecies]");
setAttribute(ds, "UNITS", "text");
}
示例3: _writeGrid
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
protected void _writeGrid(VolumeGrid vgrid, double startTime, IGridCalc source)
throws Exception
{
Trial t = this.getTrial(source.trial());
t.writeSimulationData(source);
/* Only write stuff for the first trial to save time and space */
if (source.trial() > 0)
return;
Group model = this.model();
t._writeGrid(vgrid, startTime, source);
writeSpeciesVector("species", "names of all species", model, species, null);
t.writeRegionLabels(model, source);
t.writeStimulationData(model, source);
t.writeReactionData(model, source);
t.writeEventData(model, source);
{
Group output_info = this.output.createGroup("output", model);
setAttribute(output_info, "TITLE", "output species");
t.writeOutputInfo(output_info);
}
{
String s = source.getSource().serialize();
Dataset ds = writeVector("serialized_config", model, s);
setAttribute(ds, "TITLE", "serialized config");
setAttribute(ds, "LAYOUT", "XML");
}
}
示例4: writeRegionLabels
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
protected void writeRegionLabels(Group parent, IGridCalc source)
throws Exception
{
String[] regions = source.getSource().getVolumeGrid().getRegionLabels();
Dataset ds = writeVector("regions", parent, regions);
setAttribute(ds, "TITLE", "names of regions");
setAttribute(ds, "LAYOUT", "[nregions]");
setAttribute(ds, "UNITS", "text");
}
示例5: writeOutputInfo
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
protected void writeOutputInfo(Group parent, String identifier,
int[] which, int[] elements)
throws Exception
{
Group group = output.createGroup(identifier, parent);
writeSpeciesVector("species", "names of output species", group, species, which);
Dataset ds = writeVector("elements", group, elements);
setAttribute(ds, "TITLE", "indices of output elements");
setAttribute(ds, "LAYOUT", "[nelements]");
setAttribute(ds, "UNITS", "indices");
}
示例6: getSomething
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
/***********************************************************************
*************** Model loading ******************
***********************************************************************/
private static <T> T getSomething(H5File h5, String path)
throws Exception
{
Dataset obj = (Dataset) h5.get(path);
if (obj == null) {
log.error("Failed to retrieve \"{}\"", path);
throw new Exception("Path \"" + path + "\" not found");
}
return (T) obj.getData();
}
示例7: writeArray
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
protected Dataset writeArray(String name, Group parent, double[][] items)
throws Exception
{
int maxlength = ArrayUtil.maxLength(items);
long[] dims = {items.length, maxlength};
double[] flat = ArrayUtil.flatten(items, maxlength);
return _writeArray(name, parent, double_t, dims, flat);
}
示例8: writeVector
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
protected Dataset writeVector(String name, Group parent, String... items)
throws Exception
{
int maxlength = Math.max(ArrayUtil.maxLength(items) * 4, 1);
long[] dims = {items.length};
H5Datatype string_t = new H5Datatype(Datatype.CLASS_STRING, maxlength,
Datatype.NATIVE, Datatype.NATIVE);
return _writeArray(name, parent, string_t, dims, items);
}
示例9: loadPopulationFromTime
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
private static int[][] loadPopulationFromTime(H5File h5,
int trial,
String output_set,
double pop_from_time)
throws Exception
{
String path = "/trial" + trial + "/output/" + output_set;
final int index;
{
double[] times = getSomething(h5, path + "/times");
if (pop_from_time == -1)
index = times.length - 1;
else if (pop_from_time < 0)
throw new Exception("Time must be nonnegative or -1");
else {
index = Arrays.binarySearch(times, pop_from_time);
if (index < 0)
throw new Exception("time= " + pop_from_time + " not found "
+ "in " + path + "/times");
}
}
String poppath = path + "/population";
Dataset obj = (Dataset) h5.get(poppath);
if (obj == null) {
log.error("Failed to retrieve \"{}\"", path);
throw new Exception("Path \"" + path + "\" not found");
}
/* This is necessary to retrieve dimensions */
obj.init();
int rank = obj.getRank();
long[] dims = obj.getDims();
long[] start = obj.getStartDims();
long[] selected = obj.getSelectedDims();
int[] selectedIndex = obj.getSelectedIndex();
log.info("Retrieving population from {}:{} row {}", h5, poppath, index);
log.debug("pristine rank={} dims={} start={} selected={} selectedIndex={}",
rank, dims, start, selected, selectedIndex);
start[0] = index;
selected[0] = 1;
selected[1] = dims[1];
selected[2] = dims[2];
log.debug("selected rank={} dims={} start={} selected={} selectedIndex={}",
rank, dims, start, selected, selectedIndex);
int[] data = (int[]) obj.getData();
int[][] pop = ArrayUtil.reshape(data, (int) dims[1], (int) dims[2]);
// log.debug("{}", (Object) pop);
return pop;
}
示例10: createExtensibleArray
import ncsa.hdf.object.Dataset; //导入依赖的package包/类
protected H5ScalarDS createExtensibleArray(String name, Group parent, Datatype type,
String TITLE, String LAYOUT, String UNITS,
long... dims)
throws Exception
{
long[] maxdims = dims.clone();
maxdims[0] = H5F_UNLIMITED;
long[] chunks = dims.clone();
/* avoid too small chunks */
chunks[0] = 1;
if (ArrayUtil.product(chunks) == 0)
throw new RuntimeException("Empty chunks: " + xJoined(chunks));
while (ArrayUtil.product(chunks) < 1024)
chunks[0] *= 2;
/* do not write any data in the beginning */
dims[0] = 0;
/* Create dataspace */
int filespace_id = H5.H5Screate_simple(dims.length, dims, maxdims);
/* Create the dataset creation property list, add the shuffle filter
* and the gzip compression filter. The order in which the filters
* are added here is significant — we will see much greater results
* when the shuffle is applied first. The order in which the filters
* are added to the property list is the order in which they will be
* invoked when writing data. */
int dcpl_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_CREATE);
H5.H5Pset_shuffle(dcpl_id);
H5.H5Pset_deflate(dcpl_id, compression_level);
H5.H5Pset_chunk(dcpl_id, dims.length, chunks);
/* Create the dataset */
final String path = parent.getFullName() + "/" + name;
H5.H5Dcreate(this.output.getFID(), path,
type.toNative(), filespace_id,
HDF5Constants.H5P_DEFAULT, dcpl_id, HDF5Constants.H5P_DEFAULT);
Dataset ds = new H5ScalarDS(this.output, path, "/");
ds.init();
log.info("Created {} with dims=[{}] size=[{}] chunks=[{}]",
name, xJoined(dims), xJoined(maxdims), xJoined(chunks));
setAttribute(ds, "TITLE", TITLE);
setAttribute(ds, "LAYOUT", LAYOUT);
setAttribute(ds, "UNITS", UNITS);
return (H5ScalarDS) ds;
}