当前位置: 首页>>代码示例>>Java>>正文


Java HashMultiset.add方法代码示例

本文整理汇总了Java中com.google.common.collect.HashMultiset.add方法的典型用法代码示例。如果您正苦于以下问题:Java HashMultiset.add方法的具体用法?Java HashMultiset.add怎么用?Java HashMultiset.add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在com.google.common.collect.HashMultiset的用法示例。


在下文中一共展示了HashMultiset.add方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: update

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
private void update() {
    ArrayList<UUID> onlinePlayers = new ArrayList<UUID>();
    for (Object obj : FMLCommonHandler.instance().getMinecraftServerInstance().getPlayerList().getPlayerList()) {
        EntityPlayerMP player = (EntityPlayerMP) obj;
        UUID uuid = player.getUniqueID();

        onlinePlayers.add(uuid);
        timeOnCount.add(uuid);

        //Kick players who are on too long
        if ((maxTimeOn.containsKey(uuid) && timeOnCount.count(uuid) > maxTimeOn.get(uuid)) || (maxTimeOnGlobal != 0 && timeOnCount.count(uuid) > maxTimeOnGlobal)) {
            rejoinTime.put(uuid, System.currentTimeMillis() + (breakTime.containsKey(uuid) ? breakTime.get(uuid) * 50 : breakTimeGlobal * 50));
            kickPlayerForTime(player);
            timeOnCount.remove(uuid, timeOnCount.count(uuid));
        }
    }

    //Decrease timeOnCount time for players that aren't online
    HashMultiset<UUID> uuids = HashMultiset.create();
    for (UUID entry : timeOnCount.elementSet()) {
        if (!onlinePlayers.contains(entry)) {
            uuids.add(entry);
        }
    }
    Multisets.removeOccurrences(timeOnCount, uuids);
}
 
开发者ID:kihira,项目名称:BeProductive,代码行数:27,代码来源:BeProductive.java

示例2: prepareNGramDictionary

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
private String[] prepareNGramDictionary(QGram qgram) throws IOException {
    final HashMultiset<String> set = HashMultiset.create();
    try (BufferedReader reader = new BufferedReader(new FileReader(
            inputFilePath))) {

        String line;
        while ((line = reader.readLine()) != null) {
            if (line.isEmpty()) {
                continue;
            }

            String[] split = SPLIT_PATTERN.split(line);
            String tkn = cleanToken(split[0]);
            Map<String, Integer> profile = qgram.getProfile(tkn);
            for (Map.Entry<String, Integer> entry : profile.entrySet()) {
                //noinspection ResultOfMethodCallIgnored
                set.add(entry.getKey(), entry.getValue());
            }
        }
    }

    // do some naive word statistics cut-off
    return set.entrySet()
            .stream()
            .filter(e -> e.getCount() > MIN_CHAR_NGRAM_OCCURRENCE)
            .map(Multiset.Entry::getElement)
            .sorted()
            .toArray(String[]::new);
}
 
开发者ID:thomasjungblut,项目名称:ner-sequencelearning,代码行数:30,代码来源:VectorizerMain.java

示例3: getSyntaxElements

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
private Multiset<String> getSyntaxElements() {
    HashMultiset<String> result = HashMultiset.create();
    for (Method method : ClassesThat.class.getMethods()) {
        result.add(method.getName());
    }
    return result;
}
 
开发者ID:TNG,项目名称:ArchUnit,代码行数:8,代码来源:ClassesThatTestsExistTest.java

示例4: multiNodeCluster2

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
@Test
public void multiNodeCluster2() throws Exception {
  final Wrapper wrapper = newWrapper(200, 1, 20,
      ImmutableList.of(
          new EndpointAffinity(N1_EP2, 0.15, true, 50),
          new EndpointAffinity(N2_EP2, 0.15, true, 50),
          new EndpointAffinity(N3_EP1, 0.10, true, 50),
          new EndpointAffinity(N4_EP2, 0.20, true, 50),
          new EndpointAffinity(N1_EP1, 0.20, true, 50)
      ));
  INSTANCE.parallelizeFragment(wrapper, newParameters(1, 5, 20), null);

  // Expect the fragment parallelization to be 20 because:
  // 1. the cost (200) is above the threshold (SLICE_TARGET_DEFAULT) (which gives 200/1=200 width) and
  // 2. Number of mandatory node assignments are 5 (current width 200 satisfies the requirement)
  // 3. max fragment width is 20 which limits the width
  assertEquals(20, wrapper.getWidth());

  final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints();
  assertEquals(20, assignedEps.size());
  final HashMultiset<NodeEndpoint> counts = HashMultiset.create();
  for(final NodeEndpoint ep : assignedEps) {
    counts.add(ep);
  }
  // Each node gets at max 5.
  assertTrue(counts.count(N1_EP2) <= 5);
  assertTrue(counts.count(N2_EP2) <= 5);
  assertTrue(counts.count(N3_EP1) <= 5);
  assertTrue(counts.count(N4_EP2) <= 5);
  assertTrue(counts.count(N1_EP1) <= 5);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:32,代码来源:TestHardAffinityFragmentParallelizer.java

示例5: produceBagOfWords_Token

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
/**
	 * Loads document from file and transform it in a token multi-set using stanford PTBTokenizer.
	 * @param documentPath
	 * @return
	 * @throws IOException
	 */
	public HashMultiset<String> produceBagOfWords_Token(String documentPath) throws IOException{ 
		HashMultiset<String>tokenMultiset = HashMultiset.create();
		PTBTokenizer<CoreLabel> ptbt = new PTBTokenizer<>(new FileReader(documentPath),
	              new CoreLabelTokenFactory(), "");
	      while (ptbt.hasNext()) {
	        CoreLabel label = ptbt.next();
	        tokenMultiset.add(label.toString());
//	        System.out.println(label);
	      }
//	      System.out.println("\n\nMULTISET:\n\n");
//	      for (String token: tokenMultiset) System.out.println(token +"	"+ tokenMultiset.count(token));
	      return tokenMultiset;
	}
 
开发者ID:JULIELab,项目名称:JEmAS,代码行数:20,代码来源:File2BagOfWords_Processor.java

示例6: produceBagOfWords_Lemma

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
/**
	 * Loads file at given path into string representation. Process it with Stanford-Lemmatizer and returns it as Mulitset.
	 * @param documentPath
	 * @return
	 * @throws IOException
	 */
	public HashMultiset<String> produceBagOfWords_Lemma(String documentPath) throws IOException{
		HashMultiset<String> lemmaMultiset = HashMultiset.create();
		String doc = Util.readfile2String(documentPath);
//		if (this.lemmatizer==null) this.lemmatizer = new StanfordLemmatizer(); //deprecated. will be constructed in class constructor
		List<String> lemmas = this.lemmatizer.lemmatize(doc);
		for (String lemma: lemmas){
			lemmaMultiset.add(lemma);
//			System.out.println(lemma);
		}
	
		return lemmaMultiset;
	}
 
开发者ID:JULIELab,项目名称:JEmAS,代码行数:19,代码来源:File2BagOfWords_Processor.java

示例7: logRead

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
/**
 * Read a token produced by the given producer
 * 
 * @param buffer
 *            the buffer where the token has been read
 * @param producer
 *            the token producer
 */
public void logRead(Buffer buffer, ProfiledStep producer) {
	// get the set of tokens already produced by the given producer
	HashMultiset<Buffer> producedTokens = tokensProducers.get(producer);
	if (producedTokens == null) {
		producedTokens = HashMultiset.create();
		tokensProducers.put(producer, producedTokens);
	}
	producedTokens.add(buffer);

	// log the read
	consumedTokens.add(buffer);
}
 
开发者ID:turnus,项目名称:turnus,代码行数:21,代码来源:StepDataBox.java

示例8: multiNodeCluster2

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
@Test
public void multiNodeCluster2() throws Exception {
  final Wrapper wrapper = newWrapper(200, 1, 20,
      ImmutableList.of(
          new EndpointAffinity(N1_EP2, 0.15, true, MAX_VALUE),
          new EndpointAffinity(N2_EP2, 0.15, true, MAX_VALUE),
          new EndpointAffinity(N3_EP1, 0.10, true, MAX_VALUE),
          new EndpointAffinity(N4_EP2, 0.20, true, MAX_VALUE),
          new EndpointAffinity(N1_EP1, 0.20, true, MAX_VALUE)
      ));
  INSTANCE.parallelizeFragment(wrapper, newParameters(1, 5, 20), null);

  // Expect the fragment parallelization to be 20 because:
  // 1. the cost (200) is above the threshold (SLICE_TARGET_DEFAULT) (which gives 200/1=200 width) and
  // 2. Number of mandatory node assignments are 5 (current width 200 satisfies the requirement)
  // 3. max fragment width is 20 which limits the width
  assertEquals(20, wrapper.getWidth());

  final List<DrillbitEndpoint> assignedEps = wrapper.getAssignedEndpoints();
  assertEquals(20, assignedEps.size());
  final HashMultiset<DrillbitEndpoint> counts = HashMultiset.create();
  for(final DrillbitEndpoint ep : assignedEps) {
    counts.add(ep);
  }
  // Each node gets at max 5.
  assertTrue(counts.count(N1_EP2) <= 5);
  assertTrue(counts.count(N2_EP2) <= 5);
  assertTrue(counts.count(N3_EP1) <= 5);
  assertTrue(counts.count(N4_EP2) <= 5);
  assertTrue(counts.count(N1_EP1) <= 5);
}
 
开发者ID:axbaretto,项目名称:drill,代码行数:32,代码来源:TestHardAffinityFragmentParallelizer.java

示例9: setUp

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
@BeforeExperiment
void setUp() {
  Random random = new Random();
  multisets.clear();
  for (int i = 0; i < ARRAY_SIZE; i++) {
    HashMultiset<Integer> multiset = HashMultiset.<Integer>create();
    multisets.add(multiset);
    queries[i] = random.nextInt();
    multiset.add(queries[i]);
  }
}
 
开发者ID:sander120786,项目名称:guava-libraries,代码行数:12,代码来源:HashMultisetAddPresentBenchmark.java

示例10: getMostUsedArticleCasing

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
public String getMostUsedArticleCasing() {
	HashMultiset<String> articleNames = HashMultiset.create();
	String result;

	for (Writable writable: super.get()) {
		LinkWritable link = (LinkWritable)writable;
		articleNames.add(link.getArticle().toString());
	}

	ImmutableMultiset<String> sorted = Multisets.copyHighestCountFirst(articleNames);
	result = (String)sorted.elementSet().toArray()[0];
	
	return result;
}
 
开发者ID:rossf7,项目名称:wikireverse,代码行数:15,代码来源:LinkArrayWritable.java

示例11: generateHashMultiset

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
@Generates private static <E> HashMultiset<E> generateHashMultiset(E freshElement) {
  HashMultiset<E> multiset = HashMultiset.create();
  multiset.add(freshElement);
  return multiset;
}
 
开发者ID:zugzug90,项目名称:guava-mock,代码行数:6,代码来源:FreshValueGenerator.java

示例12: testNodeInputSplit

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
@Test
public void testNodeInputSplit() throws IOException, InterruptedException {
  // Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on 
  // both nodes. The grouping ensures that both nodes get splits instead of 
  // just the first node
  DummyInputFormat inFormat = new DummyInputFormat();
  int numBlocks = 12;
  long totLength = 0;
  long blockSize = 100;
  long maxSize = 200;
  long minSizeNode = 50;
  long minSizeRack = 50;
  String[] locations = { "h1", "h2" };
  String[] racks = new String[0];
  Path path = new Path("hdfs://file");
  
  OneBlockInfo[] blocks = new OneBlockInfo[numBlocks];
  for(int i=0; i<numBlocks; ++i) {
    blocks[i] = new OneBlockInfo(path, i*blockSize, blockSize, locations, racks);
    totLength += blockSize;
  }
  
  List<InputSplit> splits = new ArrayList<InputSplit>();
  HashMap<String, Set<String>> rackToNodes = 
                            new HashMap<String, Set<String>>();
  HashMap<String, List<OneBlockInfo>> rackToBlocks = 
                            new HashMap<String, List<OneBlockInfo>>();
  HashMap<OneBlockInfo, String[]> blockToNodes = 
                            new HashMap<OneBlockInfo, String[]>();
  HashMap<String, Set<OneBlockInfo>> nodeToBlocks = 
                            new HashMap<String, Set<OneBlockInfo>>();
  
  OneFileInfo.populateBlockInfo(blocks, rackToBlocks, blockToNodes, 
                           nodeToBlocks, rackToNodes);
  
  inFormat.createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength,  
                        maxSize, minSizeNode, minSizeRack, splits);
  
  int expectedSplitCount = (int)(totLength/maxSize);
  assertEquals(expectedSplitCount, splits.size());
  HashMultiset<String> nodeSplits = HashMultiset.create();
  for(int i=0; i<expectedSplitCount; ++i) {
    InputSplit inSplit = splits.get(i);
    assertEquals(maxSize, inSplit.getLength());
    assertEquals(1, inSplit.getLocations().length);
    nodeSplits.add(inSplit.getLocations()[0]);
  }
  assertEquals(3, nodeSplits.count(locations[0]));
  assertEquals(3, nodeSplits.count(locations[1]));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestCombineFileInputFormat.java

示例13: run

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
public void run() throws Exception {
    HashMultimap<String, String> typemap = HashMultimap.create();
    BufferedReader in = new BufferedReader(new FileReader(counts_file));
    String line = null;
    while ((line = in.readLine()) != null) {
        String[] typedata = line.split(" ");
        typemap.put(typedata[1], typedata[0]);
    }
    in.close();

    in = new BufferedReader(new FileReader(qrels_file));
    QueryParser qps = new QueryParser(FreebaseTools.FIELD_NAME_SUBJECT, tools.getIndexAnalyzer());
    IndexSearcher searcher = tools.getIndexSearcher();
    IndexReader reader = tools.getIndexReader();
    Joiner.MapJoiner joiner = Joiner.on(", ").withKeyValueSeparator(" = ");

    int count = 0;
    int correct = 0;
    while ((line = in.readLine()) != null) {
        count++;
        String[] fields = line.split("\t");
        System.out.println("# [Query: " + fields[0] + "] [KBid: " + fields[1] + "] [type: " + fields[2] + "]");
        String lookup = "f_" + fields[1];
        String actual_type = fields[2];

        // execute a Lucene query for the entity, get back 10 docs
        int docid = tools.getSubjectDocID(lookup);
        if (docid == -1) {
            System.out.println("# kbid not found: " + lookup);
            continue;
        }
        Document d = tools.getDocumentInMode(docid);
        String[] types = d.getValues("r_type");
        HashMultiset<String> typecount = HashMultiset.create(4);
        for (String t : types) {
            if (typemap.containsKey(t))
                for (String tt : typemap.get(t))
                    typecount.add(tt);
        }
        if (typecount.size() > 0) {
            String guess_type = Multisets.copyHighestCountFirst(typecount).entrySet().asList().get(0).getElement();
            System.out.print(actual_type + ", guessing " + guess_type + " [");
            for (Multiset.Entry<String> me : typecount.entrySet()) {
                System.out.print(me.getElement() + " = " + me.getCount() + " ");
            }
            System.out.println("]");

            if (actual_type.equals(guess_type))
                correct++;
        }
    }

    System.out.println(correct + " correct out of " + count + " = " + (float)correct/count);
}
 
开发者ID:isoboroff,项目名称:basekb-search,代码行数:55,代码来源:PredictType.java

示例14: testNodeInputSplit

import com.google.common.collect.HashMultiset; //导入方法依赖的package包/类
public void testNodeInputSplit() throws IOException, InterruptedException {
  // Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on 
  // both nodes. The grouping ensures that both nodes get splits instead of 
  // just the first node
  DummyInputFormat inFormat = new DummyInputFormat();
  int numBlocks = 12;
  long totLength = 0;
  long blockSize = 100;
  long maxSize = 200;
  long minSizeNode = 50;
  long minSizeRack = 50;
  String[] locations = { "h1", "h2" };
  String[] racks = new String[0];
  Path path = new Path("hdfs://file");
  
  OneBlockInfo[] blocks = new OneBlockInfo[numBlocks];
  for(int i=0; i<numBlocks; ++i) {
    blocks[i] = new OneBlockInfo(path, i*blockSize, blockSize, locations, racks);
    totLength += blockSize;
  }
  
  List<InputSplit> splits = new ArrayList<InputSplit>();
  HashMap<String, Set<String>> rackToNodes = 
                            new HashMap<String, Set<String>>();
  HashMap<String, List<OneBlockInfo>> rackToBlocks = 
                            new HashMap<String, List<OneBlockInfo>>();
  HashMap<OneBlockInfo, String[]> blockToNodes = 
                            new HashMap<OneBlockInfo, String[]>();
  HashMap<String, Set<OneBlockInfo>> nodeToBlocks = 
                            new HashMap<String, Set<OneBlockInfo>>();
  
  OneFileInfo.populateBlockInfo(blocks, rackToBlocks, blockToNodes, 
                           nodeToBlocks, rackToNodes);
  
  inFormat.createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength,  
                        maxSize, minSizeNode, minSizeRack, splits);
  
  int expectedSplitCount = (int)(totLength/maxSize);
  Assert.assertEquals(expectedSplitCount, splits.size());
  HashMultiset<String> nodeSplits = HashMultiset.create();
  for(int i=0; i<expectedSplitCount; ++i) {
    InputSplit inSplit = splits.get(i);
    Assert.assertEquals(maxSize, inSplit.getLength());
    Assert.assertEquals(1, inSplit.getLocations().length);
    nodeSplits.add(inSplit.getLocations()[0]);
  }
  Assert.assertEquals(3, nodeSplits.count(locations[0]));
  Assert.assertEquals(3, nodeSplits.count(locations[1]));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:50,代码来源:TestCombineFileInputFormat.java


注:本文中的com.google.common.collect.HashMultiset.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。