当前位置: 首页>>代码示例>>C++>>正文


C++ Tokens::clear方法代码示例

本文整理汇总了C++中Tokens::clear方法的典型用法代码示例。如果您正苦于以下问题:C++ Tokens::clear方法的具体用法?C++ Tokens::clear怎么用?C++ Tokens::clear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Tokens的用法示例。


在下文中一共展示了Tokens::clear方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: loadPacket

bool FileWorker::loadPacket(std::vector<std::pair<Token,Tokens>>& formats, const Token& fileName)
{
	xml_document doc;
	xml_parse_result result = doc.load_file(fileName.c_str());	
	if (result) {
		Tokens packet;
		for (xml_node pkt = doc.child("packet"); pkt; pkt = pkt.next_sibling()) {
			for (xml_attribute_iterator attribute = ++pkt.attributes_begin(); 
				attribute!=pkt.attributes_end(); ++attribute)
				packet.push_back(attribute->value());						
			formats.push_back(make_pair(pkt.attributes_begin()->value(),packet));		
			packet.clear();
		}
		return true;
	}
	else if (result.status == pugi::status_file_not_found) {
		setGlobalError("Packets file: file not found");
		DbgMsg(__FILE__, __LINE__, 
			"Device::loadPacket() load_file() ERROR: file not found\n");		
		return false;
	}
	else {
		setGlobalError("Packets file: XML parsed with errors");
		DbgMsg(__FILE__, __LINE__, 
			"Device::loadPacket() load_file() ERROR: file parsed with errors:\n");	
		DbgMsg(__FILE__, __LINE__, 
			"Description: %s\n", result.description());	
		DbgMsg(__FILE__, __LINE__, 
			"Error offset: %s\n", result.offset);	
		return false;
	}	
}
开发者ID:3ka5-cat,项目名称:PacketGenerator,代码行数:32,代码来源:FileWorker.cpp

示例2: main

int main(int argc, char const *argv[]) {
  // If there are not enough args, return -1
  if (argc < 5) {
    std::cerr << "Usage: P7 <corpus> <sentence> <dictionary> <n> <threshold> <delta> <model>" << '\n';
    return -1;
  }

  // Otherwise, collect the function parameters
  string corpusFileName = argv[1];
  string sentenceFileName = argv[2];
  string dictionaryFileName = argv[3];
  unsigned int n = stoi(argv[4]);
  unsigned int threshold = stoi(argv[5]);
  double delta = stod(argv[6]);
  bool model = stoi(argv[7]);



  // Capture all tokens
  Tokens corpusTokens;
  Tokens sentenceTokens;
  Tokens dictionaryTokens;
  read_tokens(corpusFileName, corpusTokens, false);
  read_tokens(sentenceFileName, sentenceTokens, true);
  read_tokens(dictionaryFileName, dictionaryTokens, false);


  if (corpusTokens.size() < n) {
    std::cerr << "\nInput file '" << corpusFileName << "' is too small to create any nGrams of size " << n;
    return -1;
  }

  if (sentenceTokens.size() < n) {
    std::cerr << "\nInput file '" << sentenceFileName << "' is too small to create any nGrams of size " << n;
    return -1;
  }


  unordered_map <string, int> vocabulary;
  unordered_map <string, int> dictionary;
  vector<Corpus> corpus = getCorpusList(corpusTokens, n);

  for (auto &word : corpusTokens) {
    if (vocabulary.count(word) == 0)
      vocabulary[word] = 1;
  }

  for (auto &word : dictionaryTokens) {
    if (dictionary.count(word) == 0)
      dictionary[word] = 1;
  }

  vector<double> probs;

  int V = vocabulary.size() + 1;
  double N = corpusTokens.size();

  // Collect sentences
  vector<Tokens> sentences;
  Tokens sentence;
  for (auto &word : sentenceTokens) {
    if (word == EOS) {
      sentences.push_back(sentence);
      sentence.clear();
    } else {
      sentence.push_back(word);
    }
  }

  // Proof sentences
  for (auto &sentence : sentences) {
    std::cout << "Sentence:\t";
    for (auto &word : sentence)
      std::cout << word << ' ';
    std::cout << '\n';
    // Check against all words within reasonable distance
    vector<Tokens> candidateWords;
    for (auto &word : sentence) {
      Tokens candidates;
      for (auto &candidate : dictionary)
        if (uiLevenshteinDistance(word, candidate.first) <= 1)
          candidates.push_back(candidate.first);

      candidateWords.push_back(candidates);
    }

    // Check that the produced sentences from the candidate words makes semantic sense
    vector<Tokens> candidateSentences;

    // for (auto &words : candidateWords) {
    //   for (auto &word : words) {
    //     Tokens temp = sentence;
    //     temp
    //     candidateSentences.push_back(temp)
    //   }
    // }

    for (int i = 0; i < candidateWords.size(); i++) {
      for (auto &word : candidateWords[i]) {
        Tokens temp = sentence;
//.........这里部分代码省略.........
开发者ID:MartinKilonzo,项目名称:School-Work,代码行数:101,代码来源:P7.cpp


注:本文中的Tokens::clear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。