本文整理汇总了Java中org.deidentifier.arx.DataDefinition.getHierarchy方法的典型用法代码示例。如果您正苦于以下问题:Java DataDefinition.getHierarchy方法的具体用法?Java DataDefinition.getHierarchy怎么用?Java DataDefinition.getHierarchy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deidentifier.arx.DataDefinition
的用法示例。
在下文中一共展示了DataDefinition.getHierarchy方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSolutionSpaceSize
import org.deidentifier.arx.DataDefinition; //导入方法依赖的package包/类
/**
* Returns the size of the solution space
* @return
* @throws IOException
*/
private static int getSolutionSpaceSize(BenchmarkDataset dataset) throws IOException {
int size = 1;
DataDefinition definition = BenchmarkSetup.getData(dataset).getDefinition();
for (String qi : definition.getQuasiIdentifiersWithGeneralization()) {
size *= definition.getHierarchy(qi)[0].length;
}
return size;
}
示例2: computeUpperBounds
import org.deidentifier.arx.DataDefinition; //导入方法依赖的package包/类
/**
* Computes the upper bounds
* @param dataset
* @throws IOException
*/
private void computeUpperBounds(BenchmarkDataset dataset) throws IOException {
// Prepare
Data data = BenchmarkSetup.getData(dataset);
DataDefinition definition = data.getDefinition();
DataHandle inputHandle = data.getHandle();
// Convert to completely suppressed output data
DataConverter converter = new DataConverter();
String[][] input = converter.toArray(inputHandle);
String[][] output = new String[inputHandle.getNumRows()][inputHandle.getNumColumns()];
for (int i = 0; i < inputHandle.getNumRows(); i++) {
Arrays.fill(output[i], "*");
}
Map<String, String[][]> hierarchies = converter.toMap(definition);
String[] header = converter.getHeader(inputHandle);
int[] transformation = new int[definition.getQuasiIdentifyingAttributes().size()];
for (String attr : definition.getQuasiIdentifyingAttributes()) {
int maxLevel = definition.getHierarchy(attr)[0].length - 1;
transformation[inputHandle.getColumnIndexOf(attr)] = maxLevel;
}
// Compute metrics
double outputLoss = new UtilityMeasureLoss<Double>(header, hierarchies, AggregateFunction.GEOMETRIC_MEAN).evaluate(output).getUtility();
double outputEntropy = new UtilityMeasureNonUniformEntropyWithLowerBoundNormalized<Double>(header, input, hierarchies).evaluate(output, transformation).getUtility();
// Store results
if (!upper.containsKey(dataset)) {
upper.put(dataset, new HashMap<BenchmarkUtilityMeasure, Double>());
}
upper.get(dataset).put(BenchmarkUtilityMeasure.LOSS, outputLoss);
upper.get(dataset).put(BenchmarkUtilityMeasure.ENTROPY, outputEntropy);
}
示例3: initializeInternal
import org.deidentifier.arx.DataDefinition; //导入方法依赖的package包/类
@Override
protected void initializeInternal(final DataManager manager,
final DataDefinition definition,
final Data input,
final GeneralizationHierarchy[] hierarchies,
final ARXConfiguration config) {
// Prepare weights
super.initializeInternal(manager, definition, input, hierarchies, config);
// Compute domain shares
this.max = 1d;
this.shares = new DomainShare[hierarchies.length];
for (int i = 0; i < shares.length; i++) {
// Extract info
String attribute = input.getHeader()[i];
String[][] hierarchy = definition.getHierarchy(attribute);
this.shares[i] = new DomainShareMaterialized(hierarchy,
input.getDictionary().getMapping()[i],
hierarchies[i].getArray());
this.max *= hierarchy.length;
}
// Determine total number of tuples
this.tuples = (double)super.getNumRecords(config, input);
this.max *= this.tuples;
}
示例4: toXML
import org.deidentifier.arx.DataDefinition; //导入方法依赖的package包/类
/**
* Returns an XML representation of the data definition.
*
* @param config
* @param handle
* @param definition
* @return
* @throws IOException
*/
private String toXML(final ModelConfiguration config,
final DataHandle handle,
final DataDefinition definition) throws IOException {
XMLWriter writer = new XMLWriter();
writer.indent(vocabulary.getDefinition());
for (int i = 0; i < handle.getNumColumns(); i++) {
final String attr = handle.getAttributeName(i);
AttributeType t = definition.getAttributeType(attr);
DataType<?> dt = definition.getDataType(attr);
if (t == null) t = AttributeType.IDENTIFYING_ATTRIBUTE;
if (dt == null) dt = DataType.STRING;
writer.indent(vocabulary.getAssigment());
writer.write(vocabulary.getName(), attr);
writer.write(vocabulary.getType(), t.toString());
writer.write(vocabulary.getDatatype(), dt.getDescription().getLabel());
if (dt.getDescription().hasFormat()){
String format = ((DataTypeWithFormat)dt).getFormat();
if (format != null){
writer.write(vocabulary.getFormat(), format);
}
Locale locale = ((DataTypeWithFormat)dt).getLocale();
if (locale != null){
writer.write(vocabulary.getLocale(), locale.getLanguage().toUpperCase());
}
}
// Do we have a hierarchy
if (definition.getHierarchy(attr) != null && definition.getHierarchy(attr).length != 0 &&
definition.getHierarchy(attr)[0].length != 0) {
writer.write(vocabulary.getRef(), "hierarchies/" + toFileName(attr) + ".csv"); //$NON-NLS-1$ //$NON-NLS-2$
Integer min = config.getMinimumGeneralization(attr);
Integer max = config.getMaximumGeneralization(attr);
writer.write(vocabulary.getMin(), min == null ? "All" : String.valueOf(min)); //$NON-NLS-1$
writer.write(vocabulary.getMax(), max == null ? "All" : String.valueOf(max)); //$NON-NLS-1$
}
// Do we have a microaggregate function
if (definition.getMicroAggregationFunction(attr) != null) {
writer.write(vocabulary.getMicroAggregationFunction(), config.getMicroAggregationFunction(attr).getLabel());
writer.write(vocabulary.getMicroAggregationIgnoreMissingData(), config.getMicroAggregationIgnoreMissingData(attr));
}
writer.unindent();
}
writer.unindent();
return writer.toString();
}