本文整理汇总了C++中UTIL_THROW_IF2函数的典型用法代码示例。如果您正苦于以下问题:C++ UTIL_THROW_IF2函数的具体用法?C++ UTIL_THROW_IF2怎么用?C++ UTIL_THROW_IF2使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了UTIL_THROW_IF2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ReduceCache
void PhraseDictionaryOnDisk::InitializeForInput(InputType const& source)
{
const StaticData &staticData = StaticData::Instance();
ReduceCache();
OnDiskPt::OnDiskWrapper *obj = new OnDiskPt::OnDiskWrapper();
obj->BeginLoad(m_filePath);
UTIL_THROW_IF2(obj->GetMisc("Version") != OnDiskPt::OnDiskWrapper::VERSION_NUM,
"On-disk phrase table is version " << obj->GetMisc("Version")
<< ". It is not compatible with version " << OnDiskPt::OnDiskWrapper::VERSION_NUM);
UTIL_THROW_IF2(obj->GetMisc("NumSourceFactors") != m_input.size(),
"On-disk phrase table has " << obj->GetMisc("NumSourceFactors") << " source factors."
<< ". The ini file specified " << m_input.size() << " source factors");
UTIL_THROW_IF2(obj->GetMisc("NumTargetFactors") != m_output.size(),
"On-disk phrase table has " << obj->GetMisc("NumTargetFactors") << " target factors."
<< ". The ini file specified " << m_output.size() << " target factors");
UTIL_THROW_IF2(obj->GetMisc("NumScores") != m_numScoreComponents,
"On-disk phrase table has " << obj->GetMisc("NumScores") << " scores."
<< ". The ini file specified " << m_numScoreComponents << " scores");
m_implementation.reset(obj);
}
示例2: getline
bool Vocab::Load(FileHandler* vcbin, const FactorDirection& direction,
const FactorList& factors, bool closed)
{
// load vocab id -> word mapping
m_words2ids.clear(); // reset mapping
m_ids2words.clear();
std::string line, word_str;
wordID_t id;
std::istream &ret = getline(*vcbin, line);
UTIL_THROW_IF2(!ret, "Couldn't read file");
std::istringstream first(line.c_str());
uint32_t vcbsize(0);
first >> vcbsize;
uint32_t loadedsize = 0;
while (loadedsize++ < vcbsize && getline(*vcbin, line)) {
std::istringstream entry(line.c_str());
entry >> word_str;
Word word;
word.CreateFromString( direction, factors, word_str, false); // TODO set correctly isNonTerminal
entry >> id;
// may be no id (i.e. file may just be a word list)
if (id == 0 && word != GetkOOVWord())
id = m_ids2words.size() + 1; // assign ids sequentially starting from 1
UTIL_THROW_IF2(m_ids2words.count(id) != 0 || m_words2ids.count(word) != 0,
"Error");
m_ids2words[id] = word;
m_words2ids[word] = id;
}
m_closed = closed; // once loaded fix vocab ?
std::cerr << "Loaded vocab with " << m_ids2words.size() << " words." << std::endl;
return true;
}
示例3: inFile
void Model1Vocabulary::Load(const std::string& fileName)
{
InputFileStream inFile(fileName);
FactorCollection &factorCollection = FactorCollection::Instance();
std::string line;
unsigned i = 0;
if ( getline(inFile, line) ) { // first line of MGIZA vocabulary files seems to be special : "1 UNK 0" -- skip if it's this
++i;
std::vector<std::string> tokens = Tokenize(line);
UTIL_THROW_IF2(tokens.size()!=3, "Line " << i << " in " << fileName << " has wrong number of tokens.");
unsigned id = atoll( tokens[0].c_str() );
if (! ( (id == 1) && (tokens[1] == "UNK") )) {
const Factor* factor = factorCollection.AddFactor(tokens[1],false); // TODO: can we assume that the vocabulary is know and filter the model on loading?
bool stored = Store(factor, id);
UTIL_THROW_IF2(!stored, "Line " << i << " in " << fileName << " overwrites existing vocabulary entry.");
}
}
while ( getline(inFile, line) ) {
++i;
std::vector<std::string> tokens = Tokenize(line);
UTIL_THROW_IF2(tokens.size()!=3, "Line " << i << " in " << fileName << " has wrong number of tokens.");
unsigned id = atoll( tokens[0].c_str() );
const Factor* factor = factorCollection.AddFactor(tokens[1],false); // TODO: can we assume that the vocabulary is know and filter the model on loading?
bool stored = Store(factor, id);
UTIL_THROW_IF2(!stored, "Line " << i << " in " << fileName << " overwrites existing vocabulary entry.");
}
inFile.Close();
}
示例4: UTIL_THROW_IF2
/***
* print surface factor only for the given phrase
*/
void BaseManager::OutputSurface(std::ostream &out, const Phrase &phrase,
const std::vector<FactorType> &outputFactorOrder,
bool reportAllFactors) const
{
UTIL_THROW_IF2(outputFactorOrder.size() == 0,
"Cannot be empty phrase");
if (reportAllFactors == true) {
out << phrase;
} else {
size_t size = phrase.GetSize();
for (size_t pos = 0 ; pos < size ; pos++) {
const Factor *factor = phrase.GetFactor(pos, outputFactorOrder[0]);
out << *factor;
UTIL_THROW_IF2(factor == NULL,
"Empty factor 0 at position " << pos);
for (size_t i = 1 ; i < outputFactorOrder.size() ; i++) {
const Factor *factor = phrase.GetFactor(pos, outputFactorOrder[i]);
UTIL_THROW_IF2(factor == NULL,
"Empty factor " << i << " at position " << pos);
out << "|" << *factor;
}
out << " ";
}
}
}
示例5: if
void WordTranslationFeature::Load(AllOptions::ptr const& opts)
{
m_options = opts;
// load word list for restricted feature set
if (m_filePathSource.empty()) {
return;
} //else if (tokens.size() == 8) {
FEATUREVERBOSE(1, "Loading word translation word lists from " << m_filePathSource << " and " << m_filePathTarget << std::endl);
if (m_domainTrigger) {
// domain trigger terms for each input document
ifstream inFileSource(m_filePathSource.c_str());
UTIL_THROW_IF2(!inFileSource, "could not open file " << m_filePathSource);
std::string line;
while (getline(inFileSource, line)) {
m_vocabDomain.resize(m_vocabDomain.size() + 1);
vector<string> termVector;
boost::split(termVector, line, boost::is_any_of("\t "));
for (size_t i=0; i < termVector.size(); ++i)
m_vocabDomain.back().insert(termVector[i]);
}
inFileSource.close();
} else if (!m_filePathSource.empty() || !m_filePathTarget.empty()) {
return;
// restricted source word vocabulary
ifstream inFileSource(m_filePathSource.c_str());
UTIL_THROW_IF2(!inFileSource, "could not open file " << m_filePathSource);
std::string line;
while (getline(inFileSource, line)) {
m_vocabSource.insert(line);
}
inFileSource.close();
// restricted target word vocabulary
ifstream inFileTarget(m_filePathTarget.c_str());
UTIL_THROW_IF2(!inFileTarget, "could not open file " << m_filePathTarget);
while (getline(inFileTarget, line)) {
m_vocabTarget.insert(line);
}
inFileTarget.close();
m_unrestricted = false;
}
}
示例6: inFileSource
void PhrasePairFeature::Load()
{
if (m_domainTrigger) {
// domain trigger terms for each input document
ifstream inFileSource(m_filePathSource.c_str());
UTIL_THROW_IF2(!inFileSource, "could not open file " << m_filePathSource);
std::string line;
while (getline(inFileSource, line)) {
std::set<std::string> terms;
vector<string> termVector;
boost::split(termVector, line, boost::is_any_of("\t "));
for (size_t i=0; i < termVector.size(); ++i)
terms.insert(termVector[i]);
// add term set for current document
m_vocabDomain.push_back(terms);
}
inFileSource.close();
} else {
// restricted source word vocabulary
ifstream inFileSource(m_filePathSource.c_str());
UTIL_THROW_IF2(!inFileSource, "could not open file " << m_filePathSource);
std::string line;
while (getline(inFileSource, line)) {
m_vocabSource.insert(line);
}
inFileSource.close();
/* // restricted target word vocabulary
ifstream inFileTarget(filePathTarget.c_str());
if (!inFileTarget)
{
cerr << "could not open file " << filePathTarget << endl;
return false;
}
while (getline(inFileTarget, line)) {
m_vocabTarget.insert(line);
}
inFileTarget.close();*/
m_unrestricted = false;
}
}
示例7: ReformatHieroRule
void ReformatHieroRule(int sourceTarget, string &phrase, map<size_t, pair<size_t, size_t> > &ntAlign)
{
vector<string> toks;
Tokenize(toks, phrase, " ");
for (size_t i = 0; i < toks.size(); ++i) {
string &tok = toks[i];
size_t tokLen = tok.size();
if (tok.substr(0, 1) == "[" && tok.substr(tokLen - 1, 1) == "]") {
// no-term
vector<string> split = Tokenize(tok, ",");
UTIL_THROW_IF2(split.size() != 2,
"Incorrectly formmatted non-terminal: " << tok);
tok = "[X]" + split[0] + "]";
size_t coIndex = Scan<size_t>(split[1]);
pair<size_t, size_t> &alignPoint = ntAlign[coIndex];
if (sourceTarget == 0) {
alignPoint.first = i;
} else {
alignPoint.second = i;
}
}
}
phrase = Join(" ", toks) + " [X]";
}
示例8: inStream
bool SoftMatchingFeature::Load(const std::string& filePath)
{
StaticData &SD = StaticData::InstanceNonConst();
InputFileStream inStream(filePath);
std::string line;
while(getline(inStream, line)) {
std::vector<std::string> tokens = Tokenize(line);
UTIL_THROW_IF2(tokens.size() != 2, "Error: wrong format of SoftMatching file: must have two nonterminals per line");
// no soft matching necessary if LHS and RHS are the same
if (tokens[0] == tokens[1]) {
continue;
}
Word LHS, RHS;
LHS.CreateFromString(Output, SD.options()->output.factor_order, tokens[0], true);
RHS.CreateFromString(Output, SD.options()->output.factor_order, tokens[1], true);
m_softMatches[RHS[0]->GetId()].push_back(LHS);
GetOrSetFeatureName(RHS, LHS);
}
SD.SetSoftMatches(m_softMatches);
return true;
}
示例9: UTIL_THROW_IF2
std::vector<float> ControlRecombination::DefaultWeights() const
{
UTIL_THROW_IF2(m_numScoreComponents,
"ControlRecombination should not have any scores");
vector<float> ret(0);
return ret;
}
示例10: dash
void TargetPhrase::SetAlignmentInfo(const StringPiece &alignString)
{
AlignmentInfo::CollType alignTerm, alignNonTerm;
for (util::TokenIter<util::AnyCharacter, true> token(alignString, util::AnyCharacter(" \t")); token; ++token) {
util::TokenIter<util::SingleCharacter, false> dash(*token, util::SingleCharacter('-'));
char *endptr;
size_t sourcePos = strtoul(dash->data(), &endptr, 10);
UTIL_THROW_IF(endptr != dash->data() + dash->size(), util::ErrnoException, "Error parsing alignment" << *dash);
++dash;
size_t targetPos = strtoul(dash->data(), &endptr, 10);
UTIL_THROW_IF(endptr != dash->data() + dash->size(), util::ErrnoException, "Error parsing alignment" << *dash);
UTIL_THROW_IF2(++dash, "Extra gunk in alignment " << *token);
if (GetWord(targetPos).IsNonTerminal()) {
alignNonTerm.insert(std::pair<size_t,size_t>(sourcePos, targetPos));
} else {
alignTerm.insert(std::pair<size_t,size_t>(sourcePos, targetPos));
}
}
SetAlignTerm(alignTerm);
SetAlignNonTerm(alignNonTerm);
}
示例11: Tokenize
void TargetPhraseImpl::SetAlignmentInfo(const std::string &alignString)
{
AlignmentInfo::CollType alignTerm, alignNonTerm;
vector<string> toks = Tokenize(alignString);
for (size_t i = 0; i < toks.size(); ++i) {
vector<size_t> alignPair = Tokenize<size_t>(toks[i], "-");
UTIL_THROW_IF2(alignPair.size() != 2, "Wrong alignment format");
size_t sourcePos = alignPair[0];
size_t targetPos = alignPair[1];
if ((*this)[targetPos].isNonTerminal) {
alignNonTerm.insert(std::pair<size_t,size_t>(sourcePos, targetPos));
} else {
alignTerm.insert(std::pair<size_t,size_t>(sourcePos, targetPos));
}
}
SetAlignTerm(alignTerm);
SetAlignNonTerm(alignNonTerm);
// cerr << "TargetPhrase::SetAlignmentInfo(const StringPiece &alignString) this:|" << *this << "|\n";
//cerr << "alignTerm=" << alignTerm.size() << endl;
//cerr << "alignNonTerm=" << alignNonTerm.size() << endl;
}
示例12: UTIL_THROW_IF2
ChartHypothesis *RuleCubeItem::ReleaseHypothesis()
{
UTIL_THROW_IF2(m_hypothesis == NULL, "Hypothesis is NULL");
ChartHypothesis *hypo = m_hypothesis;
m_hypothesis = NULL;
return hypo;
}
示例13: UTIL_THROW_IF2
void ChartParser::CreateInputPaths(const InputType &input)
{
size_t size = input.GetSize();
m_inputPathMatrix.resize(size);
UTIL_THROW_IF2(input.GetType() != SentenceInput && input.GetType() != TreeInputType,
"Input must be a sentence or a tree, not lattice or confusion networks");
for (size_t phaseSize = 1; phaseSize <= size; ++phaseSize) {
for (size_t startPos = 0; startPos < size - phaseSize + 1; ++startPos) {
size_t endPos = startPos + phaseSize -1;
vector<InputPath*> &vec = m_inputPathMatrix[startPos];
WordsRange range(startPos, endPos);
Phrase subphrase(input.GetSubString(WordsRange(startPos, endPos)));
const NonTerminalSet &labels = input.GetLabelSet(startPos, endPos);
InputPath *node;
if (range.GetNumWordsCovered() == 1) {
node = new InputPath(subphrase, labels, range, NULL, NULL);
vec.push_back(node);
} else {
const InputPath &prevNode = GetInputPath(startPos, endPos - 1);
node = new InputPath(subphrase, labels, range, &prevNode, NULL);
vec.push_back(node);
}
//m_inputPathQueue.push_back(node);
}
}
}
示例14: UTIL_THROW_IF2
const PhraseDictionaryNodeMemory &PhraseDictionaryFuzzyMatch::GetRootNode(long translationId) const
{
std::map<long, PhraseDictionaryNodeMemory>::const_iterator iter = m_collection.find(translationId);
UTIL_THROW_IF2(iter == m_collection.end(),
"Couldn't find root node for input: " << translationId);
return iter->second;
}
示例15: outStream
std::vector<TargetPhrase*> PhraseDictionaryTransliteration::CreateTargetPhrases(const Phrase &sourcePhrase, const string &outDir) const
{
std::vector<TargetPhrase*> ret;
string outPath = outDir + "/out.txt";
ifstream outStream(outPath.c_str());
string line;
while (getline(outStream, line)) {
vector<string> toks;
Tokenize(toks, line, "\t");
UTIL_THROW_IF2(toks.size() != 2, "Error in transliteration output file. Expecting word\tscore");
TargetPhrase *tp = new TargetPhrase();
Word &word = tp->AddWord();
word.CreateFromString(Output, m_output, toks[0], false);
float score = Scan<float>(toks[1]);
tp->GetScoreBreakdown().PlusEquals(this, score);
// score of all other ff when this rule is being loaded
tp->Evaluate(sourcePhrase, GetFeaturesToApply());
ret.push_back(tp);
}
outStream.close();
return ret;
}