本文整理汇总了Java中gnu.trove.TIntHashSet.addAll方法的典型用法代码示例。如果您正苦于以下问题:Java TIntHashSet.addAll方法的具体用法?Java TIntHashSet.addAll怎么用?Java TIntHashSet.addAll使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gnu.trove.TIntHashSet
的用法示例。
在下文中一共展示了TIntHashSet.addAll方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getFragmentStartingLines
import gnu.trove.TIntHashSet; //导入方法依赖的package包/类
public int[] getFragmentStartingLines() {
TIntHashSet beginnings = new TIntHashSet();
if (myMergeList != null) {
for (int i = 0; i < 2; i++) {
FragmentSide branchSide = FragmentSide.fromIndex(i);
beginnings.addAll(myMergeList.getChanges(branchSide).getNonAppliedLineBlocks().getBeginnings(MergeList.BASE_SIDE));
}
}
int[] result = beginnings.toArray();
Arrays.sort(result);
return result;
}
示例2: dirichletMultinomialLikelihoodRatio
import gnu.trove.TIntHashSet; //导入方法依赖的package包/类
/** What is the probability that these two observations were drawn from
* the same multinomial with symmetric Dirichlet prior alpha, relative
* to the probability that they were drawn from different multinomials
* both drawn from this Dirichlet?
*/
public static double dirichletMultinomialLikelihoodRatio(TIntIntHashMap countsX,
TIntIntHashMap countsY,
double alpha, double alphaSum) {
// The likelihood for one DCM is
// Gamma( alpha_sum ) prod Gamma( alpha + N_i )
// prod Gamma ( alpha ) Gamma ( alpha_sum + N )
// When we divide this by the product of two other DCMs with the same
// alpha parameter, the first term in the numerator cancels with the
// first term in the denominator. Then moving the remaining alpha-only
// term to the numerator, we get
// prod Gamma(alpha) prod Gamma( alpha + X_i + Y_i )
// Gamma (alpha_sum) Gamma( alpha_sum + X_sum + Y_sum )
// ----------------------------------------------------------
// prod Gamma(alpha + X_i) prod Gamma(alpha + Y_i)
// Gamma( alpha_sum + X_sum ) Gamma( alpha_sum + Y_sum )
double logLikelihood = 0.0;
double logGammaAlpha = logGamma(alpha);
int totalX = 0;
int totalY = 0;
int key, x, y;
TIntHashSet distinctKeys = new TIntHashSet();
distinctKeys.addAll(countsX.keys());
distinctKeys.addAll(countsY.keys());
TIntIterator iterator = distinctKeys.iterator();
while (iterator.hasNext()) {
key = iterator.next();
x = 0;
if (countsX.containsKey(key)) {
x = countsX.get(key);
}
y = 0;
if (countsY.containsKey(key)) {
y = countsY.get(key);
}
totalX += x;
totalY += y;
logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
- logGamma(alpha + x) - logGamma(alpha + y);
}
logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY)
- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);
return logLikelihood;
}
示例3: mean
import gnu.trove.TIntHashSet; //导入方法依赖的package包/类
/**
* Returns a <CODE>SparseVector</CODE> whose entries (taken from the union of
* those in the instances) are the expected values of those in the
* <CODE>InstanceList</CODE>. This implies the returned vector will not have
* binary values.
*/
public static SparseVector mean(InstanceList instances) {
if (instances == null || instances.size() == 0)
return null;
Iterator<Instance> instanceItr = instances.iterator();
SparseVector v;
Instance instance;
int indices[];
int maxSparseIndex = -1;
int maxDenseIndex = -1;
// First, we find the union of all the indices used in the instances
TIntHashSet hIndices = new TIntHashSet(instances.getDataAlphabet().size());
while (instanceItr.hasNext()) {
instance = (Instance) instanceItr.next();
v = (SparseVector) (instance.getData());
indices = v.getIndices();
if (indices != null) {
hIndices.addAll(indices);
if (indices[indices.length - 1] > maxSparseIndex)
maxSparseIndex = indices[indices.length - 1];
} else // dense
if (v.numLocations() > maxDenseIndex)
maxDenseIndex = v.numLocations() - 1;
}
if (maxDenseIndex > -1) // dense vectors were present
{
if (maxSparseIndex > maxDenseIndex)
// sparse vectors were present and they had greater indices than
// the dense vectors
{
// therefore, we create sparse vectors and
// add all the dense indices
for (int i = 0; i <= maxDenseIndex; i++)
hIndices.add(i);
} else
// sparse indices may have been present, but we don't care
// since they never had indices that exceeded those of the
// dense vectors
{
return mean(instances, maxDenseIndex + 1);
}
}
// reaching this statement implies we can create a sparse vector
return mean(instances, hIndices.toArray());
}
示例4: ClassIDs
import gnu.trove.TIntHashSet; //导入方法依赖的package包/类
/**
* Given a path to search for the id file, loads the mappings from the file if found
*
* @param classIDsFile
* @throws Exception
*/
public ClassIDs(File classIDsFile) throws IOException {
// Load the class ids file for this trace.
this.classIDFile = classIDsFile;
BufferedReader reader = null;
if(classIDFile.isFile()) {
reader = new BufferedReader(new FileReader(classIDFile));
if(classIDFile.length() > 0) {
while(true) {
String entry = reader.readLine();
if(entry == null) break;
ClassID id = new ClassID(entry);
if(classesByID.containsKey(id.id)) throw new RuntimeException("This classid cache is corrupt! " + classesByID.get(id.id) + " is already assigned id " + id);
classesByName.put(id.name, id);
classesByID.put(id.id, id);
if(id.id > nextClassID) nextClassID = id.id;
}
nextClassID++;
}
reader.close();
}
else throw new IOException("Couldn't find file " + classIDsFile);
subclassesOfTextualOutputProducers = new TIntHashSet();
subclassesOfTextualOutputProducers.add(QualifiedClassName.STRING_BUILDER.getID());
subclassesOfTextualOutputProducers.addAll(getSubclassesOf(QualifiedClassName.STRING_BUILDER).toArray());
subclassesOfTextualOutputProducers.add(QualifiedClassName.OUTPUT_STREAM.getID());
subclassesOfTextualOutputProducers.addAll(getSubclassesOf(QualifiedClassName.OUTPUT_STREAM).toArray());
subclassesOfTextualOutputProducers.add(QualifiedClassName.WRITER.getID());
subclassesOfTextualOutputProducers.addAll(getSubclassesOf(QualifiedClassName.WRITER).toArray());
}