本文整理汇总了C++中Tokenizer类的典型用法代码示例。如果您正苦于以下问题:C++ Tokenizer类的具体用法?C++ Tokenizer怎么用?C++ Tokenizer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Tokenizer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: removeFirstPostingsFromDocument
void XapianIndex::removeFirstPostingsFromDocument(Tokenizer &tokens, Xapian::Document &doc,
const string &prefix, const string &language, StemmingMode mode) const
{
Xapian::TermIterator termListIter = doc.termlist_begin();
Xapian::Stem *pStemmer = NULL;
string upperCasePrefix("R");
string term;
// Do we know what language to use for stemming ?
if (language.empty() == false)
{
pStemmer = new Xapian::Stem(StringManip::toLowerCase(language));
}
// Terms starting with a capital letter are R-prefixed, unless a prefix is already defined
if (prefix.empty() == false)
{
upperCasePrefix = prefix;
}
// Get the terms and remove the first posting for each
while (tokens.nextToken(term) == true)
{
if (term.empty() == true)
{
continue;
}
// Does it start with a capital letter ?
if (isupper((int)term[0]) != 0)
{
removeFirstPosting(doc, termListIter, upperCasePrefix + term);
}
// Lower case the term
term = StringManip::toLowerCase(term);
// Stem the term ?
if ((mode == STORE_UNSTEM) ||
(pStemmer == NULL))
{
removeFirstPosting(doc, termListIter, prefix + XapianDatabase::limitTermLength(term));
}
else if (mode == STORE_STEM)
{
#if XAPIAN_MAJOR_VERSION==0
string stemmedTerm(pStemmer->stem_word(term));
#else
string stemmedTerm((*pStemmer)(term));
#endif
removeFirstPosting(doc, termListIter, prefix + XapianDatabase::limitTermLength(stemmedTerm));
}
else if (mode == STORE_BOTH)
{
#if XAPIAN_MAJOR_VERSION==0
string stemmedTerm(pStemmer->stem_word(term));
#else
string stemmedTerm((*pStemmer)(term));
#endif
removeFirstPosting(doc, termListIter, prefix + XapianDatabase::limitTermLength(term));
if (stemmedTerm != term)
{
removeFirstPosting(doc, termListIter, prefix + XapianDatabase::limitTermLength(stemmedTerm));
}
}
}
if (pStemmer != NULL)
{
delete pStemmer;
}
}
示例2: exception
void RPedigree::inputPed(string fileString){
// Authors: Rohan L. Fernando
// (2005)
// Contributors:
const char* fname = fileString.c_str();
cout << "reading pedigree file \n";
if (colName.size() < 3) {
cerr << "RPedigree::input(): colName.size() < 3 \n";
cerr << "Did you forget the putColNames method? \n";
throw exception("Error in RPedigree::input()");
}
int indIndex = colName.getIndex("individual");
if (indIndex == -1){
cerr << "RPedigree::input(): individual column is missing in colName \n";
throw exception("Error in RPedigree::input()");
}
int sireIndex = colName.getIndex("sire");
if (sireIndex == -1){
cerr << "RPedigree::input(): sire column is missing in colName \n";
throw exception("Error in RPedigree::input()");
}
int damIndex = colName.getIndex("dam");
if (damIndex == -1){
cerr << "RPedigree::input(): dam column is missing in colName \n";
throw exception("Error in RPedigree::input()");
}
unsigned numCol = colName.size();
double rec = 0, rec1 = 0;
string indstr, sirestr, damstr;
ifstream datafile(fname);
if(!datafile){
cout<< "Cannot open pedigree file: " << fname << endl;
exit(1);
}
datafile.setf(ios::skipws);
PNode *ptr;
std::string sep(" \t,\n\r");
std::string inputStr;
Tokenizer colData;
unsigned COUNT = 0;
while ( getline(datafile,inputStr) ){
colData.getTokens(inputStr,sep);
indstr = colData[indIndex];
sirestr = colData[sireIndex];
damstr = colData[damIndex];
rec++;
if(rec==1000){
cout<<rec+rec1<<"\r";
cout.flush();
rec1 += rec;
rec = 0;
}
if (colData.size() != numCol){
cerr << " Record " << rec1 + rec << " has " << colData.size() << " columns \n";
cerr << " Expected " << numCol << endl;
throw exception("Error in RPedigree::input()");
}
ptr = new PNode(indstr, sirestr, damstr);
if (orderedPed) ptr->ind = ++COUNT;
(*this)[indstr] = ptr;
}
datafile.close();
if(orderedPed){
seqnPed();
}
else {
generateEntriesforParents();
codePed();
}
makePedVector();
calc_inbreeding();
fillCoder();
}
示例3: removeUnusedFlats
void ArchiveOperations::removeUnusedFlats(Archive* archive)
{
// Check archive was given
if (!archive)
return;
// --- Build list of used flats ---
TexUsedMap used_textures;
int total_maps = 0;
// Get all SECTORS entries
Archive::SearchOptions opt;
opt.match_type = EntryType::fromId("map_sectors");
auto sectors = archive->findAll(opt);
total_maps += sectors.size();
// Go through and add used flats to list
DoomMapFormat::Sector sec;
wxString tex_floor, tex_ceil;
for (auto& sector : sectors)
{
int nsec = sector->size() / 26;
sector->seek(0, SEEK_SET);
for (int s = 0; s < nsec; s++)
{
// Read sector data
sector->read(&sec, 26);
// Get textures
tex_floor = wxString::FromAscii(sec.f_tex, 8);
tex_ceil = wxString::FromAscii(sec.c_tex, 8);
// Add to used textures list
used_textures[tex_floor].used = true;
used_textures[tex_ceil].used = true;
}
}
// Get all TEXTMAP entries
opt.match_name = "TEXTMAP";
opt.match_type = EntryType::fromId("udmf_textmap");
auto udmfmaps = archive->findAll(opt);
total_maps += udmfmaps.size();
// Go through and add used flats to list
Tokenizer tz;
tz.setSpecialCharacters("{};=");
for (auto& udmfmap : udmfmaps)
{
// Open in tokenizer
tz.openMem(udmfmap->data(), "UDMF TEXTMAP");
// Go through text tokens
wxString token = tz.getToken();
while (!token.IsEmpty())
{
// Check for sector definition
if (token == "sector")
{
tz.getToken(); // Skip {
token = tz.getToken();
while (token != "}")
{
// Check for texture property
if (token == "texturefloor" || token == "textureceiling")
{
tz.getToken(); // Skip =
used_textures[tz.getToken()].used = true;
}
token = tz.getToken();
}
}
// Next token
token = tz.getToken();
}
}
// Check if any maps were found
if (total_maps == 0)
return;
// Find all flats
opt.match_name = "";
opt.match_namespace = "flats";
opt.match_type = nullptr;
auto flats = archive->findAll(opt);
// Create list of all unused flats
wxArrayString unused_tex;
bool anim = false;
for (auto& flat : flats)
{
// Skip markers
if (flat->size() == 0)
continue;
// Check for animation start
//.........这里部分代码省略.........
示例4: read_pair_timestep
//
// reads the next timestep from the opened file,
// using the current quantization settings in the global
// state object
//
bool read_pair_timestep(TIMESTEP &ts) {
static Tokenizer tok;
static char buf[G_MAX_INPUT_LINE] = "";
if(!state.fpData)
return false;
ts.edges.clear();
ts.vertices.clear();
// do we have a left-over line from the last timestep, or do we need
// to read another line from the file?
if(!buf[0])
if(!fgets(buf, G_MAX_INPUT_LINE, state.fpData)) {
// no lines left in file -- we're done reading the whole file
buf[0] = 0;
return false;
}
// read a new timestep
ts.epoch = -1;
do {
tok.tokenize(buf);
if(tok.num_tokens() >= 1) { // skip blank lines
// get the timestep label
char *endptr;
errno = 0;
int tsnum = strtod(tok.token(0), &endptr);
if(errno || endptr == tok.token(0)) {
// encountered a non-numeric timestamp
fprintf(stderr, "Error: encountered a non-numeric timestep label '%s' in input file.\n", tok.token(0));
exit(-5);
}
int quant = quantize(tsnum);
if(ts.epoch == -1)
ts.epoch = quant;
if(ts.epoch != quant) // new timestep?
break;
// add this observation to the timestep
if(tok.num_tokens() >= 2) {
// have at least one vertex
int vid1 = map_vertex(tok.token(1));
ts.vertices.insert(vid1);
if(tok.num_tokens() >= 3) {
// have an edge
int vid2 = map_vertex(tok.token(2));
// skip self-edges, i.e., (v, v)
if(vid1 != vid2) {
ts.vertices.insert(vid2);
// make sure vid1 < vid2, so if the graph is undirected then
// we never insert a duplicate edge with the endpoints flipped
if(!state.directed && vid2 < vid1) {
int t = vid1;
vid1 = vid2;
vid2 = t;
}
ts.edges.insert(make_pair<int,int>(vid1,vid2));
}
}
}
}
// read the next line
if(!fgets(buf, G_MAX_INPUT_LINE, state.fpData)) {
buf[0] = 0;
break;
}
} while(1);
// update state
state.ne+= ts.edges.size();
state.nt++;
// sanity check
assert(ts.edges.size() <= ts.vertices.size()*(ts.vertices.size()-1)/(state.directed?1:2) );
return true;
}
示例5: parse
/* CTexture::parse
* Parses a TEXTURES format texture definition
*******************************************************************/
bool CTexture::parse(Tokenizer& tz, string type)
{
// Check if optional
if (S_CMPNOCASE(tz.peekToken(), "optional"))
{
tz.getToken(); // Skip it
optional = true;
}
// Read basic info
this->type = type;
this->extended = true;
this->defined = false;
name = tz.getToken().Upper();
tz.getToken(); // Skip ,
width = tz.getInteger();
tz.getToken(); // Skip ,
height = tz.getInteger();
// Check for extended info
if (tz.peekToken() == "{")
{
tz.getToken(); // Skip {
// Read properties
string property = tz.getToken();
while (property != "}")
{
// Check if end of text is reached (error)
if (property.IsEmpty())
{
wxLogMessage("Error parsing texture %s: End of text found, missing } perhaps?", name);
return false;
}
// XScale
if (S_CMPNOCASE(property, "XScale"))
scale_x = tz.getFloat();
// YScale
if (S_CMPNOCASE(property, "YScale"))
scale_y = tz.getFloat();
// Offset
if (S_CMPNOCASE(property, "Offset"))
{
offset_x = tz.getInteger();
tz.getToken(); // Skip ,
offset_y = tz.getInteger();
}
// WorldPanning
if (S_CMPNOCASE(property, "WorldPanning"))
world_panning = true;
// NoDecals
if (S_CMPNOCASE(property, "NoDecals"))
no_decals = true;
// NullTexture
if (S_CMPNOCASE(property, "NullTexture"))
null_texture = true;
// Patch
if (S_CMPNOCASE(property, "Patch"))
{
CTPatchEx* patch = new CTPatchEx();
patch->parse(tz);
patches.push_back(patch);
}
// Graphic
if (S_CMPNOCASE(property, "Graphic"))
{
CTPatchEx* patch = new CTPatchEx();
patch->parse(tz, PTYPE_GRAPHIC);
patches.push_back(patch);
}
// Read next property
property = tz.getToken();
}
}
return true;
}
示例6: readConfigFile
// -----------------------------------------------------------------------------
// Reads and parses the SLADE configuration file
// -----------------------------------------------------------------------------
void readConfigFile()
{
// Open SLADE.cfg
Tokenizer tz;
if (!tz.openFile(App::path("slade3.cfg", App::Dir::User)))
return;
// Go through the file with the tokenizer
while (!tz.atEnd())
{
// If we come across a 'cvars' token, read in the cvars section
if (tz.advIf("cvars", 2))
{
// Keep reading name/value pairs until we hit the ending '}'
while (!tz.checkOrEnd("}"))
{
CVar::set(tz.current().text, tz.peek().text);
tz.adv(2);
}
tz.adv(); // Skip ending }
}
// Read base resource archive paths
if (tz.advIf("base_resource_paths", 2))
{
while (!tz.checkOrEnd("}"))
{
archive_manager.addBaseResourcePath(tz.current().text);
tz.adv();
}
tz.adv(); // Skip ending }
}
// Read recent files list
if (tz.advIf("recent_files", 2))
{
while (!tz.checkOrEnd("}"))
{
archive_manager.addRecentFile(tz.current().text);
tz.adv();
}
tz.adv(); // Skip ending }
}
// Read keybinds
if (tz.advIf("keys", 2))
KeyBind::readBinds(tz);
// Read nodebuilder paths
if (tz.advIf("nodebuilder_paths", 2))
{
while (!tz.checkOrEnd("}"))
{
NodeBuilders::addBuilderPath(tz.current().text, tz.peek().text);
tz.adv(2);
}
tz.adv(); // Skip ending }
}
// Read game exe paths
if (tz.advIf("executable_paths", 2))
{
while (!tz.checkOrEnd("}"))
{
Executables::setGameExePath(tz.current().text, tz.peek().text);
tz.adv(2);
}
tz.adv(); // Skip ending }
}
// Read window size/position info
if (tz.advIf("window_info", 2))
Misc::readWindowInfo(tz);
// Next token
tz.adv();
}
}
示例7: RunInstallScripts
//=================================================================================================
bool RunInstallScripts()
{
Info("Reading install scripts.");
WIN32_FIND_DATA data;
HANDLE find = FindFirstFile(Format("%s/install/*.txt", g_system_dir.c_str()), &data);
if(find == INVALID_HANDLE_VALUE)
return true;
vector<InstallScript> scripts;
Tokenizer t;
t.AddKeyword("install", 0);
t.AddKeyword("version", 1);
t.AddKeyword("remove", 2);
do
{
int major, minor, patch;
// read file to find version info
try
{
if(t.FromFile(Format("%s/install/%s", g_system_dir.c_str(), data.cFileName)))
{
t.Next();
if(t.MustGetKeywordId() == 2)
{
// old install script
if(sscanf_s(data.cFileName, "%d.%d.%d.txt", &major, &minor, &patch) != 3)
{
if(sscanf_s(data.cFileName, "%d.%d.txt", &major, &minor) == 2)
patch = 0;
else
{
// unknown version
major = 0;
minor = 0;
patch = 0;
}
}
}
else
{
t.AssertKeyword(0);
t.Next();
if(t.MustGetInt() != 1)
t.Throw(Format("Unknown install script version '%d'.", t.MustGetInt()));
t.Next();
t.AssertKeyword(1);
t.Next();
major = t.MustGetInt();
t.Next();
minor = t.MustGetInt();
t.Next();
patch = t.MustGetInt();
}
InstallScript& s = Add1(scripts);
s.filename = data.cFileName;
s.version = (((major & 0xFF) << 16) | ((minor & 0xFF) << 8) | (patch & 0xFF));
}
}
catch(const Tokenizer::Exception& e)
{
Warn("Unknown install script '%s': %s", data.cFileName, e.ToString());
}
} while(FindNextFile(find, &data));
FindClose(find);
if(scripts.empty())
return true;
std::sort(scripts.begin(), scripts.end());
GetModuleFileName(nullptr, BUF, 256);
char buf[512], buf2[512];
char* filename;
GetFullPathName(BUF, 512, buf, &filename);
*filename = 0;
DWORD len = strlen(buf);
LocalString s, s2;
for(vector<InstallScript>::iterator it = scripts.begin(), end = scripts.end(); it != end; ++it)
{
cstring path = Format("%s/install/%s", g_system_dir.c_str(), it->filename.c_str());
try
{
if(!t.FromFile(path))
{
Error("Failed to load install script '%s'.", it->filename.c_str());
continue;
}
Info("Using install script %s.", it->filename.c_str());
t.Next();
t.AssertKeyword();
//.........这里部分代码省略.........
示例8: parseTokens
void CheckUnusedFunctions::parseTokens(const Tokenizer &tokenizer)
{
// Function declarations..
for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next()) {
if (tok->fileIndex() != 0)
continue;
// token contains a ':' => skip to next ; or {
if (tok->str().find(":") != std::string::npos) {
while (tok && tok->str().find_first_of(";{"))
tok = tok->next();
if (tok)
continue;
break;
}
// If this is a template function, skip it
if (tok->previous() && tok->previous()->str() == ">")
continue;
const Token *funcname = 0;
if (Token::Match(tok, "%type% %var% ("))
funcname = tok->tokAt(1);
else if (Token::Match(tok, "%type% * %var% ("))
funcname = tok->tokAt(2);
else if (Token::Match(tok, "%type% :: %var% (") && !Token::Match(tok, tok->strAt(2).c_str()))
funcname = tok->tokAt(2);
// Don't assume throw as a function name: void foo() throw () {}
if (Token::Match(tok->previous(), ")|const"))
funcname = 0;
// Check that ") {" is found..
for (const Token *tok2 = funcname; tok2; tok2 = tok2->next()) {
if (tok2->str() == ")") {
if (! Token::simpleMatch(tok2, ") {") &&
! Token::simpleMatch(tok2, ") const {") &&
! Token::simpleMatch(tok2, ") const throw ( ) {") &&
! Token::simpleMatch(tok2, ") throw ( ) {"))
funcname = 0;
break;
}
}
if (funcname) {
FunctionUsage &func = _functions[ funcname->str()];
if (!func.lineNumber)
func.lineNumber = funcname->linenr();
// No filename set yet..
if (func.filename.empty()) {
func.filename = tokenizer.getFiles()->at(0);
}
// Multiple files => filename = "+"
else if (func.filename != tokenizer.getFiles()->at(0)) {
//func.filename = "+";
func.usedOtherFile |= func.usedSameFile;
}
}
}
// Function usage..
for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next()) {
const Token *funcname = 0;
if (Token::Match(tok->next(), "%var% (")) {
funcname = tok->next();
}
else if (Token::Match(tok, "[;{}.,()[=+-/&|!?:] %var% [(),;:}]"))
funcname = tok->next();
else if (Token::Match(tok, "[=(,] & %var% :: %var% [,);]"))
funcname = tok->tokAt(4);
else
continue;
// funcname ( => Assert that the end parenthesis isn't followed by {
if (Token::Match(funcname, "%var% (")) {
int parlevel = 0;
for (const Token *tok2 = funcname; tok2; tok2 = tok2->next()) {
if (tok2->str() == "(")
++parlevel;
else if (tok2->str() == ")") {
--parlevel;
if (parlevel == 0 && (Token::Match(tok2, ") const|{")))
funcname = NULL;
if (parlevel <= 0)
break;
}
}
}
if (funcname) {
FunctionUsage &func = _functions[ funcname->str()];
//.........这里部分代码省略.........
示例9: main
int main()
{
Tokenizer t;
//adding the tokens for the tokenizer
t.add("\\+|-",PLUSMINUS);
t.add("\\*|/",MULTDIV);
t.add("\\^",RAISED);
t.add("!",FACULTY);
//regexp matching is greedy; try sinh/cosh/tanh first
t.add("sinh|cosh|tanh",FUNCTION);
t.add("asin|acos|atan",FUNCTION);
t.add("sin|cos|tan|sqrt",FUNCTION);
t.add("log\\[[[[:digit:]]+(\\.)?[[:digit:]]*\\]|log",FUNCTION);
t.add("\\(",OPEN_BRACKET);
t.add("\\)",CLOSE_BRACKET);
t.add("[[:digit:]]+(\\.)?[[:digit:]]*",NUMBER);
string str;
while(1) {
cout << "=> ";
if(!getline(cin,str)) {
cout << endl;
return 1;
}
str.erase(remove_if(str.begin(),str.end(), ::isspace),str.end());
if(str.empty())
continue;
if(str == "quit")
break;
try{
vector<token> a = t.tokenize(str);
Parser p(a);
Expression* exp = p.parse();
cout << "\t\t== " << exp->evaluate() << endl;;
} catch (runtime_error& e) {
cout << "\t\tSyntax error: " << e.what() << endl;;
}
}
return 0;
}
示例10: HandleError
void ppPragma::HandleError(Tokenizer &tk)
{
Errors::Error(tk.GetString());
}
示例11: executeRules
void CppCheck::executeRules(const std::string &tokenlist, const Tokenizer &tokenizer)
{
(void)tokenlist;
(void)tokenizer;
#ifdef HAVE_RULES
// Are there rules to execute?
bool isrule = false;
for (std::list<Settings::Rule>::const_iterator it = mSettings.rules.begin(); it != mSettings.rules.end(); ++it) {
if (it->tokenlist == tokenlist)
isrule = true;
}
// There is no rule to execute
if (isrule == false)
return;
// Write all tokens in a string that can be parsed by pcre
std::ostringstream ostr;
for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next())
ostr << " " << tok->str();
const std::string str(ostr.str());
for (std::list<Settings::Rule>::const_iterator it = mSettings.rules.begin(); it != mSettings.rules.end(); ++it) {
const Settings::Rule &rule = *it;
if (rule.pattern.empty() || rule.id.empty() || rule.severity == Severity::none || rule.tokenlist != tokenlist)
continue;
const char *pcreCompileErrorStr = nullptr;
int erroffset = 0;
pcre * const re = pcre_compile(rule.pattern.c_str(),0,&pcreCompileErrorStr,&erroffset,nullptr);
if (!re) {
if (pcreCompileErrorStr) {
const std::string msg = "pcre_compile failed: " + std::string(pcreCompileErrorStr);
const ErrorLogger::ErrorMessage errmsg(std::list<ErrorLogger::ErrorMessage::FileLocation>(),
emptyString,
Severity::error,
msg,
"pcre_compile",
false);
reportErr(errmsg);
}
continue;
}
// Optimize the regex, but only if PCRE_CONFIG_JIT is available
#ifdef PCRE_CONFIG_JIT
const char *pcreStudyErrorStr = nullptr;
pcre_extra * const pcreExtra = pcre_study(re, PCRE_STUDY_JIT_COMPILE, &pcreStudyErrorStr);
// pcre_study() returns NULL for both errors and when it can not optimize the regex.
// The last argument is how one checks for errors.
// It is NULL if everything works, and points to an error string otherwise.
if (pcreStudyErrorStr) {
const std::string msg = "pcre_study failed: " + std::string(pcreStudyErrorStr);
const ErrorLogger::ErrorMessage errmsg(std::list<ErrorLogger::ErrorMessage::FileLocation>(),
emptyString,
Severity::error,
msg,
"pcre_study",
false);
reportErr(errmsg);
// pcre_compile() worked, but pcre_study() returned an error. Free the resources allocated by pcre_compile().
pcre_free(re);
continue;
}
#else
const pcre_extra * const pcreExtra = nullptr;
#endif
int pos = 0;
int ovector[30]= {0};
while (pos < (int)str.size()) {
const int pcreExecRet = pcre_exec(re, pcreExtra, str.c_str(), (int)str.size(), pos, 0, ovector, 30);
if (pcreExecRet < 0) {
const std::string errorMessage = pcreErrorCodeToString(pcreExecRet);
if (!errorMessage.empty()) {
const ErrorLogger::ErrorMessage errmsg(std::list<ErrorLogger::ErrorMessage::FileLocation>(),
emptyString,
Severity::error,
std::string("pcre_exec failed: ") + errorMessage,
"pcre_exec",
false);
reportErr(errmsg);
}
break;
}
const unsigned int pos1 = (unsigned int)ovector[0];
const unsigned int pos2 = (unsigned int)ovector[1];
// jump to the end of the match for the next pcre_exec
pos = (int)pos2;
// determine location..
ErrorLogger::ErrorMessage::FileLocation loc;
loc.setfile(tokenizer.list.getSourceFilePath());
loc.line = 0;
//.........这里部分代码省略.........
示例12: strlen
void DocumentWriter::addData(const char* str, int len, bool flush)
{
if (len == 0 && !flush)
return;
if (len == -1)
len = strlen(str);
Tokenizer* tokenizer = m_frame->document()->tokenizer();
if (tokenizer && tokenizer->wantsRawData()) {
if (len > 0)
tokenizer->writeRawData(str, len);
return;
}
if (!m_decoder) {
if (Settings* settings = m_frame->settings()) {
m_decoder = TextResourceDecoder::create(m_mimeType,
settings->defaultTextEncodingName(),
settings->usesEncodingDetector());
Frame* parentFrame = m_frame->tree()->parent();
// Set the hint encoding to the parent frame encoding only if
// the parent and the current frames share the security origin.
// We impose this condition because somebody can make a child frame
// containing a carefully crafted html/javascript in one encoding
// that can be mistaken for hintEncoding (or related encoding) by
// an auto detector. When interpreted in the latter, it could be
// an attack vector.
// FIXME: This might be too cautious for non-7bit-encodings and
// we may consider relaxing this later after testing.
if (canReferToParentFrameEncoding(m_frame, parentFrame))
m_decoder->setHintEncoding(parentFrame->document()->decoder());
} else
m_decoder = TextResourceDecoder::create(m_mimeType, String());
Frame* parentFrame = m_frame->tree()->parent();
if (m_encoding.isEmpty()) {
if (canReferToParentFrameEncoding(m_frame, parentFrame))
m_decoder->setEncoding(parentFrame->document()->inputEncoding(), TextResourceDecoder::EncodingFromParentFrame);
} else {
m_decoder->setEncoding(m_encoding,
m_encodingWasChosenByUser ? TextResourceDecoder::UserChosenEncoding : TextResourceDecoder::EncodingFromHTTPHeader);
}
m_frame->document()->setDecoder(m_decoder.get());
}
String decoded = m_decoder->decode(str, len);
if (flush)
decoded += m_decoder->flush();
if (decoded.isEmpty())
return;
if (!m_receivedData) {
m_receivedData = true;
if (m_decoder->encoding().usesVisualOrdering())
m_frame->document()->setVisuallyOrdered();
m_frame->document()->recalcStyle(Node::Force);
}
if (tokenizer) {
ASSERT(!tokenizer->wantsRawData());
tokenizer->write(decoded, true);
}
}
示例13: wxLogMessage
/* Console::execute
* Attempts to execute the command line given
*******************************************************************/
void Console::execute(string command)
{
wxLogMessage("> %s", command);
// Don't bother doing anything else with an empty command
if (command.size() == 0)
return;
// Add the command to the log
cmd_log.insert(cmd_log.begin(), command);
// Announce that a command has been executed
MemChunk mc;
announce("console_execute", mc);
// Tokenize the command string
Tokenizer tz;
tz.openString(command);
// Get the command name
string cmd_name = tz.getToken();
// Get all args
string arg = tz.getToken();
vector<string> args;
while (arg != "")
{
args.push_back(arg);
arg = tz.getToken();
}
// Check that it is a valid command
for (size_t a = 0; a < commands.size(); a++)
{
// Found it, execute and return
if (commands[a].getName() == cmd_name)
{
commands[a].execute(args);
return;
}
}
// Check if it is a cvar
CVar* cvar = get_cvar(cmd_name);
if (cvar)
{
// Arg(s) given, set cvar value
if (args.size() > 0)
{
if (cvar->type == CVAR_BOOLEAN)
{
if (args[0] == "0" || args[0] == "false")
*((CBoolCVar*)cvar) = false;
else
*((CBoolCVar*)cvar) = true;
}
else if (cvar->type == CVAR_INTEGER)
*((CIntCVar*)cvar) = atoi(CHR(args[0]));
else if (cvar->type == CVAR_FLOAT)
*((CFloatCVar*)cvar) = (float)atof(CHR(args[0]));
else if (cvar->type == CVAR_STRING)
*((CStringCVar*)cvar) = args[0];
}
// Print cvar value
string value = "";
if (cvar->type == CVAR_BOOLEAN)
{
if (cvar->GetValue().Bool)
value = "true";
else
value = "false";
}
else if (cvar->type == CVAR_INTEGER)
value = S_FMT("%d", cvar->GetValue().Int);
else if (cvar->type == CVAR_FLOAT)
value = S_FMT("%1.4f", cvar->GetValue().Float);
else
value = ((CStringCVar*)cvar)->value;
logMessage(S_FMT("\"%s\" = \"%s\"", cmd_name, value));
if (cmd_name == "log_verbosity")
Global::log_verbosity = cvar->GetValue().Int;
return;
}
// Toggle global debug mode
if (cmd_name == "debug")
{
Global::debug = !Global::debug;
if (Global::debug)
logMessage("Debugging stuff enabled");
else
logMessage("Debugging stuff disabled");
//.........这里部分代码省略.........
示例14: CHECKERR
void Line::process_function(Tokenizer& tokens, string& errs)
/*****************************************************************************/
{
//create a function call object
string temp = tokens.token().value();
FunctionCall* funcCall = Registrar::generateCall(temp);
CHECKERR((funcCall == NULL), func_err(temp))
tokens.nextToken(); //move past funcname
tokens.nextToken(); //move past open paren
//put tokens into the argument lists.. We loop until we have seen the paren
//that terminates this function call or until we have run out of tokens on
//this line
list< vector<Token> > args;
vector<Token> currArg;
int depth = 0;
while(!((tokens.token().value() == ")" && depth == 0) || tokens.eol())) {
//if we see a comma at paren depth zero, we have just reached the end of an
//argument
if (tokens.token().type() == COMMA && depth == 0) {
assert(!currArg.empty());
args.push_back(currArg);
currArg.clear();
}
else {
currArg.push_back(tokens.token());
if (tokens.token() == Token(OPERATOR, "(", 0))
++depth;
if (tokens.token() == Token(OPERATOR, ")", 0))
--depth;
}
tokens.nextToken();
}
if (!currArg.empty())
args.push_back(currArg);
CHECKERR(
tokens.eol() || tokens.token().value() != ")",
arg_err(temp)
)
if (funcCall->hasVariableArgs()) {
CHECKERR (
args.size() < funcCall->getNumArgs(),
arg_err(temp)
)
} else {
CHECKERR(
args.size() != funcCall->getNumArgs(),
arg_err(temp)
)
}
//Construct a Line for each argument
list< vector<Token> >::iterator arg_itr = args.begin();
for ( ; arg_itr != args.end(); ++arg_itr) {
CHECKERR (((*arg_itr).size() == 0), arg_err(temp))
Tokenizer tempToken(*arg_itr);
Line* arg = new Line(tempToken, _parent, errs, true);
funcCall->fillArg(arg);
}
addNewObject(funcCall);
}
示例15: main
int main() {
ifstream sourceFile;
TokenList tokens;
Tokenizer tokenizer;
//Read in a file line-by-line and tokenize each line
sourceFile.open("test.vhd");
if (!sourceFile.is_open())
{
cout << "Failed to open file" << endl;
return 1;
}
while(!sourceFile.eof())
{
string lineA, lineB;
getline(sourceFile, lineA);
//while the current line ends with a line-continuation \
//append the next line to the current line
while(lineA.length() > 0 && lineA[lineA.length()-1] == '\\')
{
lineA.erase(lineA.length()-1, 1);
getline(sourceFile, lineB);
lineA += lineB;
}
tokenizer.setString(&lineA);
while(!tokenizer.isComplete())
{
tokens.append(tokenizer.getNextToken());
}
//Re-insert newline that was removed by the getline function
tokens.append("\n");
}
Token *b = tokens.getFirst();
while(b)
{
cout << b->getStringRep() << " ";// << endl; // remove endl for submission
b = b->getNext();
}
cout << endl << "----------------------------------------------"<< endl<< endl;
removeComments(tokens);
/*Test your tokenization of the file by traversing the tokens list and printing out the tokens*/
Token *t = tokens.getFirst();
while(t)
{
cout << t->getStringRep() << " ";// << endl; // remove endl for submission
t = t->getNext();
}
return 0;
}