本文整理匯總了Java中gnu.trove.set.hash.TIntHashSet.addAll方法的典型用法代碼示例。如果您正苦於以下問題:Java TIntHashSet.addAll方法的具體用法?Java TIntHashSet.addAll怎麽用?Java TIntHashSet.addAll使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類gnu.trove.set.hash.TIntHashSet
的用法示例。
在下文中一共展示了TIntHashSet.addAll方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: TargetRegisterClass
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
protected TargetRegisterClass(int id,
String name,
EVT[] vts,
TargetRegisterClass[] subcs,
TargetRegisterClass[] supercs,
TargetRegisterClass[] subregcs,
TargetRegisterClass[] superregcs,
int regsz, int regAlign,
int copyCost,
int[] regs)
{
this.id = id;
this.name = name;
this.vts = vts;
subClasses = subcs;
superClasses = supercs;
subRegClasses = subregcs;
superRegClasses = superregcs;
regSize = regsz;
this.regAlign = regAlign;
this.regs = regs;
this.copyCost = copyCost;
regSet = new TIntHashSet();
regSet.addAll(regs);
}
示例2: jaccardIndex
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
public static double jaccardIndex(TIntHashSet a, TIntHashSet b) {
TIntHashSet union = new TIntHashSet(a);
union.addAll(b);
// count up intersection:
AtomicInteger count = new AtomicInteger();
a.forEach(x -> {
if(b.contains(x)) {
count.incrementAndGet();
}
return true;
});
double unionSize = union.size();
double isectSize = count.get();
return isectSize / unionSize;
}
示例3: getNeighborsAtRadius
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
public TIntHashSet getNeighborsAtRadius(final int placeIndex, final int radius, final int xSize,
final int ySize, final boolean isTorus) {
final TIntHashSet currentNeigh = new TIntHashSet();
currentNeigh.add(placeIndex);
for ( int i = 0; i < radius; i++ ) {
final TIntHashSet newNeigh = new TIntHashSet();
TIntIterator it = currentNeigh.iterator();
while (it.hasNext()) {
newNeigh.addAll(getNeighborsAtRadius1(it.next(), xSize, ySize, isTorus));
}
currentNeigh.addAll(newNeigh);
}
currentNeigh.remove(placeIndex);
return currentNeigh;
}
示例4: testUniquenessAndDeterminance
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
@Test
public void testUniquenessAndDeterminance() {
Random r = new FastRandom(42);
TIntHashSet set = new TIntHashSet();
TIntHashSet knownExpectedRepeats = new TIntHashSet();
knownExpectedRepeats.addAll(new int[] { 9368, 149368, 193310, 194072, 202906, 241908, 249466, 266101, 276853, 289339, 293737 } );
for(int i = 0;i < 300000;i++) {
int rndInt = r.nextInt();
if(set.contains(rndInt) && !knownExpectedRepeats.contains(i)) {
fail();
}
set.add(rndInt);
}
}
示例5: getLinks
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
/**
* Returns all the links where the process with the given ID is either
* provider or recipient.
*/
public List<ProcessLink> getLinks(long processId) {
// because this method could be called quite often in graphical
// presentations of product systems and there can be a lot of links
// in some kind of product systems (e.g. from IO-databases) we do
// not just merge the incoming and outgoing links here
TIntHashSet intSet = new TIntHashSet(Constants.DEFAULT_CAPACITY,
Constants.DEFAULT_LOAD_FACTOR, -1);
TIntArrayList list = providerIndex.get(processId);
if (list != null)
intSet.addAll(list);
list = recipientIndex.get(processId);
if (list != null)
intSet.addAll(list);
return getLinks(intSet.iterator());
}
示例6: analyzeTrainingFeatures
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
private void analyzeTrainingFeatures(Map<Integer, Float> ratings) {
selectedFeatures = new TIntHashSet();
for (int id : ratings.keySet()) {
if (map_item_intFeatures.containsKey(id)) {
selectedFeatures.addAll(map_item_intFeatures.get(id).keySet());
}
}
// System.out.println(u + " train features " + selectedFeatures.size());
}
示例7: cmpCosineSim
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
private float cmpCosineSim(TIntFloatHashMap v1, TIntFloatHashMap v2) {
TIntHashSet inters = new TIntHashSet();
inters.addAll(v1.keySet());
inters.retainAll(v2.keySet());
if (inters.size() == 0)
return 0;
else {
int i = 0;
TIntIterator it = inters.iterator();
float num = 0;
float norm_v1 = 0;
float norm_v2 = 0;
while (it.hasNext()) {
i = it.next();
num += v1.get(i) * v2.get(i);
}
for (int k1 : v1.keys())
norm_v1 += (v1.get(k1) * v1.get(k1));
for (int k2 : v2.keys())
norm_v2 += (v2.get(k2) * v2.get(k2));
return num / (float) (Math.sqrt(norm_v1) * Math.sqrt(norm_v2));
}
}
示例8: search
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
/**
* Search for a similar data item in the underlying tables and return all
* matches
*
* @param data
* the point
* @return matched ids
*/
public TIntHashSet search(OBJECT data) {
final TIntHashSet pl = new TIntHashSet();
for (final Table<OBJECT> table : tables) {
final TIntArrayList result = table.searchPoint(data);
if (result != null)
pl.addAll(result);
}
return pl;
}
示例9: getLinks
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
/**
* Returns all the links where the process with the given ID is either
* provider or connected with a provider.
*/
public List<ProcessLink> getLinks(long processId) {
// because this method could be called quite often in graphical
// presentations of product systems and there can be a lot of links
// in some kind of product systems (e.g. from IO-databases) we do
// not just merge the incoming and outgoing links here
TIntHashSet intSet = new TIntHashSet(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, -1);
TIntArrayList list = providerIndex.get(processId);
if (list != null)
intSet.addAll(list);
list = connectionIndex.get(processId);
if (list != null)
intSet.addAll(list);
return getLinks(intSet.iterator());
}
示例10: dirichletMultinomialLikelihoodRatio
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
/** What is the probability that these two observations were drawn from
* the same multinomial with symmetric Dirichlet prior alpha, relative
* to the probability that they were drawn from different multinomials
* both drawn from this Dirichlet?
*/
public static double dirichletMultinomialLikelihoodRatio(TIntIntHashMap countsX,
TIntIntHashMap countsY,
double alpha, double alphaSum) {
// The likelihood for one DCM is
// Gamma( alpha_sum ) prod Gamma( alpha + N_i )
// prod Gamma ( alpha ) Gamma ( alpha_sum + N )
// When we divide this by the product of two other DCMs with the same
// alpha parameter, the first term in the numerator cancels with the
// first term in the denominator. Then moving the remaining alpha-only
// term to the numerator, we get
// prod Gamma(alpha) prod Gamma( alpha + X_i + Y_i )
// Gamma (alpha_sum) Gamma( alpha_sum + X_sum + Y_sum )
// ----------------------------------------------------------
// prod Gamma(alpha + X_i) prod Gamma(alpha + Y_i)
// Gamma( alpha_sum + X_sum ) Gamma( alpha_sum + Y_sum )
double logLikelihood = 0.0;
double logGammaAlpha = logGamma(alpha);
int totalX = 0;
int totalY = 0;
int key, x, y;
TIntHashSet distinctKeys = new TIntHashSet();
distinctKeys.addAll(countsX.keys());
distinctKeys.addAll(countsY.keys());
TIntIterator iterator = distinctKeys.iterator();
while (iterator.hasNext()) {
key = iterator.next();
x = 0;
if (countsX.containsKey(key)) {
x = countsX.get(key);
}
y = 0;
if (countsY.containsKey(key)) {
y = countsY.get(key);
}
totalX += x;
totalY += y;
logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
- logGamma(alpha + x) - logGamma(alpha + y);
}
logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY)
- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);
return logLikelihood;
}
示例11: mean
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
/**
* Returns a <CODE>SparseVector</CODE> whose entries (taken from the union of
* those in the instances) are the expected values of those in the
* <CODE>InstanceList</CODE>. This implies the returned vector will not have
* binary values.
*/
public static SparseVector mean(InstanceList instances) {
if (instances == null || instances.size() == 0)
return null;
Iterator<Instance> instanceItr = instances.iterator();
SparseVector v;
Instance instance;
int indices[];
int maxSparseIndex = -1;
int maxDenseIndex = -1;
// First, we find the union of all the indices used in the instances
TIntHashSet hIndices = new TIntHashSet(instances.getDataAlphabet().size());
while (instanceItr.hasNext()) {
instance = (Instance) instanceItr.next();
v = (SparseVector) (instance.getData());
indices = v.getIndices();
if (indices != null) {
hIndices.addAll(indices);
if (indices[indices.length - 1] > maxSparseIndex)
maxSparseIndex = indices[indices.length - 1];
} else // dense
if (v.numLocations() > maxDenseIndex)
maxDenseIndex = v.numLocations() - 1;
}
if (maxDenseIndex > -1) // dense vectors were present
{
if (maxSparseIndex > maxDenseIndex)
// sparse vectors were present and they had greater indices than
// the dense vectors
{
// therefore, we create sparse vectors and
// add all the dense indices
for (int i = 0; i <= maxDenseIndex; i++)
hIndices.add(i);
} else
// sparse indices may have been present, but we don't care
// since they never had indices that exceeded those of the
// dense vectors
{
return mean(instances, maxDenseIndex + 1);
}
}
// reaching this statement implies we can create a sparse vector
return mean(instances, hIndices.toArray());
}
示例12: testMapPotential2D
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
@Test
public void testMapPotential2D() {
setupParameters();
parameters.setInputDimensions(new int[] { 6, 12 });
parameters.setColumnDimensions(new int[] { 2, 4 });
parameters.setPotentialRadius(1);
parameters.setPotentialPct(1);
initSP();
//Test without wrapAround
int[] mask = sp.mapPotential(mem, 0, false);
TIntHashSet trueIndices = new TIntHashSet(new int[] { 0, 1, 2, 12, 13, 14, 24, 25, 26 });
TIntHashSet maskSet = new TIntHashSet(mask);
assertTrue(trueIndices.equals(maskSet));
trueIndices.clear();
maskSet.clear();
trueIndices.addAll(new int[] { 6, 7, 8, 18, 19, 20, 30, 31, 32 });
mask = sp.mapPotential(mem, 2, false);
maskSet.addAll(mask);
assertTrue(trueIndices.equals(maskSet));
//Test with wrapAround
trueIndices.clear();
maskSet.clear();
parameters.setPotentialRadius(2);
initSP();
trueIndices.addAll(
new int[] { 0, 1, 2, 3, 11,
12, 13, 14, 15, 23,
24, 25, 26, 27, 35,
36, 37, 38, 39, 47,
60, 61, 62, 63, 71 });
mask = sp.mapPotential(mem, 0, true);
maskSet.addAll(mask);
assertTrue(trueIndices.equals(maskSet));
trueIndices.clear();
maskSet.clear();
trueIndices.addAll(
new int[] { 0, 8, 9, 10, 11,
12, 20, 21, 22, 23,
24, 32, 33, 34, 35,
36, 44, 45, 46, 47,
60, 68, 69, 70, 71 });
mask = sp.mapPotential(mem, 3, true);
maskSet.addAll(mask);
assertTrue(trueIndices.equals(maskSet));
}
示例13: setCacheableResponseCodes
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
public MemcachedStorageConfigBuilder setCacheableResponseCodes(int... statusCodes) {
cacheableResponseCodes = new TIntHashSet(statusCodes.length,1.0f);
cacheableResponseCodes.addAll(statusCodes);
return this;
}
示例14: dirichletMultinomialLikelihoodRatio
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
/** What is the probability that these two observations were drawn from
* the same multinomial with symmetric Dirichlet prior alpha, relative
* to the probability that they were drawn from different multinomials
* both drawn from this Dirichlet?
*/
public static double dirichletMultinomialLikelihoodRatio(TIntIntHashMap countsX,
TIntIntHashMap countsY,
double alpha, double alphaSum) {
// The likelihood for one DCM is
// Gamma( alpha_sum ) prod Gamma( alpha + N_i )
// prod Gamma ( alpha ) Gamma ( alpha_sum + N )
// When we divide this by the product of two other DCMs with the same
// alpha parameter, the first term in the numerator cancels with the
// first term in the denominator. Then moving the remaining alpha-only
// term to the numerator, we get
// prod Gamma(alpha) prod Gamma( alpha + X_i + Y_i )
// Gamma (alpha_sum) Gamma( alpha_sum + X_sum + Y_sum )
// ----------------------------------------------------------
// prod Gamma(alpha + X_i) prod Gamma(alpha + Y_i)
// Gamma( alpha_sum + X_sum ) Gamma( alpha_sum + Y_sum )
double logLikelihood = 0.0;
//double logGammaAlpha = logGamma(alpha);
int totalX = 0;
int totalY = 0;
int key, x, y;
TIntHashSet distinctKeys = new TIntHashSet();
distinctKeys.addAll(countsX.keys());
distinctKeys.addAll(countsY.keys());
TIntIterator iterator = distinctKeys.iterator();
while (iterator.hasNext()) {
key = iterator.next();
x = 0;
if (countsX.containsKey(key)) {
x = countsX.get(key);
}
y = 0;
if (countsY.containsKey(key)) {
y = countsY.get(key);
}
totalX += x;
totalY += y;
logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
- logGamma(alpha + x) - logGamma(alpha + y);
}
logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY)
- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);
return logLikelihood;
}
示例15: dirichletMultinomialLikelihoodRatio
import gnu.trove.set.hash.TIntHashSet; //導入方法依賴的package包/類
/**
* What is the probability that these two observations were drawn from the
* same multinomial with symmetric Dirichlet prior alpha, relative to the
* probability that they were drawn from different multinomials both drawn
* from this Dirichlet?
*/
public static double dirichletMultinomialLikelihoodRatio(TIntIntHashMap countsX,
TIntIntHashMap countsY,
double alpha, double alphaSum) {
// The likelihood for one DCM is
// Gamma( alpha_sum ) prod Gamma( alpha + N_i )
// prod Gamma ( alpha ) Gamma ( alpha_sum + N )
// When we divide this by the product of two other DCMs with the same
// alpha parameter, the first term in the numerator cancels with the
// first term in the denominator. Then moving the remaining alpha-only
// term to the numerator, we get
// prod Gamma(alpha) prod Gamma( alpha + X_i + Y_i )
// Gamma (alpha_sum) Gamma( alpha_sum + X_sum + Y_sum )
// ----------------------------------------------------------
// prod Gamma(alpha + X_i) prod Gamma(alpha + Y_i)
// Gamma( alpha_sum + X_sum ) Gamma( alpha_sum + Y_sum )
double logLikelihood = 0.0;
double logGammaAlpha = logGamma(alpha);
int totalX = 0;
int totalY = 0;
int key, x, y;
TIntHashSet distinctKeys = new TIntHashSet();
distinctKeys.addAll(countsX.keys());
distinctKeys.addAll(countsY.keys());
TIntIterator iterator = distinctKeys.iterator();
while (iterator.hasNext()) {
key = iterator.next();
x = 0;
if (countsX.containsKey(key)) {
x = countsX.get(key);
}
y = 0;
if (countsY.containsKey(key)) {
y = countsY.get(key);
}
totalX += x;
totalY += y;
logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
- logGamma(alpha + x) - logGamma(alpha + y);
}
logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY)
- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);
return logLikelihood;
}