本文整理汇总了C++中prt_error函数的典型用法代码示例。如果您正苦于以下问题:C++ prt_error函数的具体用法?C++ prt_error怎么用?C++ prt_error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了prt_error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: spellcheck_suggest
int spellcheck_suggest(void * chk, char ***sug, const char * word)
{
struct linkgrammar_aspell *aspell = (struct linkgrammar_aspell *)chk;
if (!sug) {
prt_error("Error: Aspell. Corrupt pointer.\n");
return 0;
}
if (aspell && aspell->speller) {
const AspellWordList *list = NULL;
AspellStringEnumeration *elem = NULL;
const char *aword = NULL;
unsigned int size, i;
char **array = NULL;
list = aspell_speller_suggest(aspell->speller, word, -1);
elem = aspell_word_list_elements(list);
size = aspell_word_list_size(list);
/* allocate an array of char* for returning back to link-parser
*/
array = (char **)malloc(sizeof(char *) * size);
if (!array) {
prt_error("Error: Aspell. Out of memory.\n");
delete_aspell_string_enumeration(elem);
return 0;
}
i = 0;
while ((aword = aspell_string_enumeration_next(elem)) != NULL) {
array[i++] = strdup(aword);
}
delete_aspell_string_enumeration(elem);
*sug = array;
return size;
}
return 0;
}
示例2: upcase_utf8_str
/**
* Upcase the first letter of the word.
* XXX FIXME This works 'most of the time', but is not technically correct.
* This is because towlower() and towupper() are locale dependent, and also
* because the byte-counts might not match up, e.g. German ß and SS.
* The correct long-term fix is to use ICU or glib g_utf8_strup(), etc.
*/
void upcase_utf8_str(char *to, const char * from, size_t usize)
{
wchar_t c;
int i, nbl, nbh;
char low[MB_LEN_MAX];
mbstate_t mbs;
memset(&mbs, 0, sizeof(mbs));
nbh = mbrtowc (&c, from, MB_CUR_MAX, &mbs);
if (nbh < 0)
{
prt_error("Error: Invalid multi-byte string!");
return;
}
c = towupper(c);
nbl = wctomb_check(low, c);
/* Check for error on an in-place copy */
if ((nbh < nbl) && (to == from))
{
/* I'm to lazy to fix this */
prt_error("Error: can't upcase multi-byte string!");
return;
}
/* Upcase */
for (i=0; i<nbl; i++) { to[i] = low[i]; }
if ((nbh == nbl) && (to == from)) return;
from += nbh;
to += nbl;
safe_strcpy(to, from, usize-nbl);
}
示例3: win32_getlocale
static char *
win32_getlocale (void)
{
char lbuf[10];
char locale[32];
LCID lcid = GetThreadLocale();
if (0 >= GetLocaleInfoA(lcid, LOCALE_SISO639LANGNAME, lbuf, sizeof(lbuf)))
{
prt_error("Error: GetLocaleInfoA LOCALE_SENGLISHLANGUAGENAME LCID=%d: "
"Error %d\n", (int)lcid, (int)GetLastError());
return NULL;
}
strcpy(locale, lbuf);
strcat(locale, "-");
if (0 >= GetLocaleInfoA(lcid, LOCALE_SISO3166CTRYNAME, lbuf, sizeof(lbuf)))
{
prt_error("Error: GetLocaleInfoA LOCALE_SISO3166CTRYNAME LCID=%d: "
"Error %d\n", (int)lcid, (int)GetLastError());
return NULL;
}
strcat(locale, lbuf);
return strdup(locale);
}
示例4: set_utf8_program_locale
/**
* Ensure that the program's locale has a UTF-8 codeset.
*/
void set_utf8_program_locale(void)
{
#ifndef _WIN32
/* The LG library doesn't use mbrtowc_l(), since it doesn't exist in
* the dynamic glibc (2.22). mbsrtowcs_l() could also be used, but for
* some reason it exists only in the static glibc.
* In order that mbrtowc() will work for any UTF-8 character, UTF-8
* codeset is ensured. */
const char *codeset = nl_langinfo(CODESET);
if (!strstr(codeset, "UTF") && !strstr(codeset, "utf"))
{
const char *locale = setlocale(LC_CTYPE, NULL);
/* Avoid an initial spurious message. */
if ((0 != strcmp(locale, "C")) && (0 != strcmp(locale, "POSIX")))
{
prt_error("Warning: Program locale \"%s\" (codeset %s) was not UTF-8; "
"force-setting to en_US.UTF-8\n", locale, codeset);
}
locale = setlocale(LC_CTYPE, "en_US.UTF-8");
if (NULL == locale)
{
prt_error("Warning: Program locale en_US.UTF-8 could not be set; "
"force-setting to C.UTF-8\n");
locale = setlocale(LC_CTYPE, "C.UTF-8");
if (NULL == locale)
{
prt_error("Warning: Could not set a UTF-8 program locale; "
"program may malfunction\n");
}
}
}
#endif /* !_WIN32 */
}
示例5: prepare_to_parse
/**
* Assumes that the sentence expression lists have been generated.
*/
void prepare_to_parse(Sentence sent, Parse_Options opts)
{
size_t i;
build_sentence_disjuncts(sent, opts->disjunct_cost, opts);
if (verbosity_level(D_PREP))
{
prt_error("Debug: After expanding expressions into disjuncts:\n");
print_disjunct_counts(sent);
}
print_time(opts, "Built disjuncts");
for (i=0; i<sent->length; i++)
{
sent->word[i].d = eliminate_duplicate_disjuncts(sent->word[i].d);
/* Some long Russian sentences can really blow up, here. */
if (resources_exhausted(opts->resources))
return;
}
print_time(opts, "Eliminated duplicate disjuncts");
if (verbosity_level(D_PREP))
{
prt_error("Debug: After expression pruning and duplicate elimination:\n");
print_disjunct_counts(sent);
}
setup_connectors(sent);
}
示例6: read_starting_link_table
/**
* Read table of [link, domain type].
* This tells us what domain type each link belongs to.
* This lookup table *must* be defined in the knowledge file.
*/
static void read_starting_link_table(pp_knowledge *k)
{
const char *p;
const char label[] = "STARTING_LINK_TYPE_TABLE";
int i, n_tokens;
if (!pp_lexer_set_label(k->lt, label))
{
prt_error("Fatal error: post_process: Couldn't find starting link table %s",label);
exit(1);
}
n_tokens = pp_lexer_count_tokens_of_label(k->lt);
if (n_tokens %2)
{
prt_error("Fatal error: post_process: Link table must have format [<link> <domain name>]+");
exit(1);
}
k->nStartingLinks = n_tokens/2;
k->starting_link_lookup_table = (StartingLinkAndDomain*)
xalloc((1+k->nStartingLinks)*sizeof(StartingLinkAndDomain));
for (i=0; i<k->nStartingLinks; i++)
{
/* read the starting link itself */
k->starting_link_lookup_table[i].starting_link =
string_set_add(pp_lexer_get_next_token_of_label(k->lt),k->string_set);
/* read the domain type of the link */
p = pp_lexer_get_next_token_of_label(k->lt);
check_domain_is_legal(p);
k->starting_link_lookup_table[i].domain = (int) p[0];
}
/* end sentinel */
k->starting_link_lookup_table[k->nStartingLinks].domain = -1;
}
示例7: sentence_parse
int sentence_parse(Sentence sent, Parse_Options opts)
{
int rc;
sent->num_valid_linkages = 0;
/* If the sentence has not yet been split, do so now.
* This is for backwards compatibility, for existing programs
* that do not explicitly call the splitter.
*/
if (0 == sent->length)
{
rc = sentence_split(sent, opts);
if (rc) return -1;
}
else
{
/* During a panic parse, we enter here a second time, with leftover
* garbage. Free it. We really should make the code that is panicking
* do this free, but right now, they have no API for it, so we do it
* as a favor. XXX FIXME someday. */
free_sentence_disjuncts(sent);
}
/* Check for bad sentence length */
if (MAX_SENTENCE <= sent->length)
{
prt_error("Error: sentence too long, contains more than %d words\n",
MAX_SENTENCE);
return -2;
}
resources_reset(opts->resources);
/* Expressions were set up during the tokenize stage.
* Prune them, and then parse.
*/
expression_prune(sent, opts);
print_time(opts, "Finished expression pruning");
if (opts->use_sat_solver)
{
sat_parse(sent, opts);
}
else
{
classic_parse(sent, opts);
}
print_time(opts, "Finished parse");
if ((verbosity > 0) &&
(PARSE_NUM_OVERFLOW < sent->num_linkages_found))
{
prt_error("Warning: Combinatorial explosion! nulls=%zu cnt=%d\n"
"Consider retrying the parse with the max allowed disjunct cost set lower.\n"
"At the command line, use !cost-max\n",
sent->null_count, sent->num_linkages_found);
}
return sent->num_valid_linkages;
}
示例8: sentence_parse
int sentence_parse(Sentence sent, Parse_Options opts)
{
int rc;
verbosity = opts->verbosity;
debug = opts->debug;
test = opts->test;
sent->num_valid_linkages = 0;
/* If the sentence has not yet been split, do so now.
* This is for backwards compatibility, for existing programs
* that do not explicitly call the splitter.
*/
if (0 == sent->length)
{
rc = sentence_split(sent, opts);
if (rc) return -1;
}
/* Check for bad sentence length */
if (MAX_SENTENCE <= sent->length)
{
prt_error("Error: sentence too long, contains more than %d words\n",
MAX_SENTENCE);
return -2;
}
/* Initialize/free any leftover garbage */
free_sentence_disjuncts(sent); /* Is this really needed ??? */
resources_reset_space(opts->resources);
if (resources_exhausted(opts->resources))
return 0;
/* Expressions were previously set up during the tokenize stage. */
expression_prune(sent);
print_time(opts, "Finished expression pruning");
if (opts->use_sat_solver)
{
sat_parse(sent, opts);
}
else
{
chart_parse(sent, opts);
}
print_time(opts, "Finished parse");
if ((verbosity > 0) &&
(PARSE_NUM_OVERFLOW < sent->num_linkages_found))
{
prt_error("WARNING: Combinatorial explosion! nulls=%zu cnt=%d\n"
"Consider retrying the parse with the max allowed disjunct cost set lower.\n"
"At the command line, use !cost-max\n",
sent->null_count, sent->num_linkages_found);
}
return sent->num_valid_linkages;
}
示例9: dictopen
/**
* Read in the whole stinkin file. This routine returns
* malloced memory, which should be freed as soon as possible.
*/
char *get_file_contents(const char * dict_name)
{
int fd;
size_t tot_size;
size_t tot_read = 0;
struct stat buf;
char * contents;
/* On Windows, 'b' (binary mode) is mandatory, otherwise fstat file length
* is confused by crlf counted as one byte. POSIX systems just ignore it. */
FILE *fp = dictopen(dict_name, "rb");
if (fp == NULL)
return NULL;
/* Get the file size, in bytes. */
fd = fileno(fp);
fstat(fd, &buf);
tot_size = buf.st_size;
contents = (char *) malloc(sizeof(char) * (tot_size+7));
/* Now, read the whole file.
* Normally, a single fread() call below reads the whole file. */
while (1)
{
size_t read_size = fread(contents, 1, tot_size+7, fp);
if (0 == read_size)
{
bool err = (0 != ferror(fp));
if (err)
{
char errbuf[64];
strerror_r(errno, errbuf, sizeof(errbuf));
fclose(fp);
prt_error("Error: %s: Read error (%s)\n", dict_name, errbuf);
free(contents);
return NULL;
}
fclose(fp);
break;
}
tot_read += read_size;
}
if (tot_read > tot_size+6)
{
prt_error("Error: %s: File size is insane (%zu)!\n", dict_name, tot_size);
free(contents);
return NULL;
}
contents[tot_read] = '\0';
return contents;
}
示例10: strcpy
void LGCheckGrammar::P_UNDEFINED ()
{
int t, n = n_errors;
if (n_terms > max_char_set+1)
{
strcpy (gft, ".lex");
strcpy (grmfid, gdn);
strcat (grmfid, gfn);
strcat (grmfid, gft);
char* Input_start = input_start;
char* Input_end = input_end;
char** Line_ptr = line_ptr;
input_start = lex_input_start;
input_end = lex_input_end;
line_ptr = lex_line_ptr;
for (t = max_char_set+1; t < n_terms; t++) // Skip <eof>
{
if (term_type[t] & LEXFILE)
prt_error ("'%s' is listed in the .lex file, but not defined in the .lgr file", term_name[t], 0, term_line[t]);
}
strcpy (gft, ".lgr");
strcpy (grmfid, gdn);
strcat (grmfid, gfn);
strcat (grmfid, gft);
input_start = Input_start;
input_end = Input_end;
line_ptr = Line_ptr;
for (t = max_char_set+1; t < n_terms; t++) // Skip <eof>
{
if (!(term_type[t] & LEXFILE))
{
char code = charcode[*term_name[t]];
if ( code == DIGIT || code == QUOTE)
{
prt_error ("'%s' is not a predefined symbol", term_name[t], 0, term_line[t]);
prt_log ("Predefined symbols are:\n");
prt_log (" 0..31\n");
for (int i = 32; i < 127; i++) prt_log (" %s\n", term_name[i]);
prt_log (" 127..255\n");
LG::Terminate(95);
}
else
{
prt_error ("'%s' is not defined in the lexical grammar (.lgr file)", term_name[t], 0, term_line[t]);
}
}
}
}
FREE (term_type, n_terms);
}
示例11: lg_corpus_senses
static Sense * lg_corpus_senses(Corpus *corp,
const char * inflected_word,
const char * disjunct,
int wrd)
{
double log_prob;
const unsigned char *sense;
Sense *sns, *head = NULL;
int rc;
/* Look up the disjunct in the database */
rc = sqlite3_bind_text(corp->sense_query, 1,
inflected_word, -1, SQLITE_STATIC);
if (rc != SQLITE_OK)
{
prt_error("Error: SQLite can't bind word in sense query: rc=%d \n", rc);
return NULL;
}
rc = sqlite3_bind_text(corp->sense_query, 2,
disjunct, -1, SQLITE_STATIC);
if (rc != SQLITE_OK)
{
prt_error("Error: SQLite can't bind disjunct in sense query: rc=%d \n", rc);
return NULL;
}
rc = sqlite3_step(corp->sense_query);
while (SQLITE_ROW == rc)
{
sense = sqlite3_column_text(corp->sense_query, 0);
log_prob = sqlite3_column_double(corp->sense_query, 1);
// printf ("Word=%s dj=%s sense=%s score=%f\n",
// inflected_word, disjunct, sense, log_prob);
sns = (Sense *) malloc(sizeof(Sense));
sns->next = head;
head = sns;
sns->inflected_word = inflected_word;
sns->disjunct = disjunct;
sns->sense = strdup(sense);
sns->score = log_prob;
sns->word = wrd;
/* Get the next row, if any */
rc = sqlite3_step(corp->sense_query);
}
/* Failure to do both a reset *and* a clear will cause subsequent
* binds tp fail. */
sqlite3_reset(corp->sense_query);
sqlite3_clear_bindings(corp->sense_query);
return head;
}
示例12: get_disjunct_score
/**
* get_disjunct_score -- get log probability of observing disjunt.
*
* Given an "inflected" word and a disjunct, thris routine returns the
* -log_2 conditional probability prob(d|w) of seeing the disjunct 'd'
* given that the word 'w' was observed. Here, "inflected word" means
* the link-grammar dictionary entry, complete with its trailing period
* and tag -- e.g. run.v or running.g -- everything after the dot is the
* "inflection".
*/
static double get_disjunct_score(Corpus *corp,
const char * inflected_word,
const char * disjunct)
{
double val;
int rc;
/* Look up the disjunct in the database */
rc = sqlite3_bind_text(corp->rank_query, 1,
inflected_word, -1, SQLITE_STATIC);
if (rc != SQLITE_OK)
{
const char *errmsg = sqlite3_errmsg(corp->dbconn);
prt_error("Error: SQLite can't bind word: rc=%d %s\n", rc, errmsg);
return LOW_SCORE;
}
rc = sqlite3_bind_text(corp->rank_query, 2,
disjunct, -1, SQLITE_STATIC);
if (rc != SQLITE_OK)
{
const char *errmsg = sqlite3_errmsg(corp->dbconn);
prt_error("Error: SQLite can't bind disjunct: rc=%d %s\n", rc, errmsg);
return LOW_SCORE;
}
rc = sqlite3_step(corp->rank_query);
if (rc != SQLITE_ROW)
{
val = LOW_SCORE;
#ifdef DEBUG
printf ("Word=%s dj=%s not found in dict, assume score=%f\n",
inflected_word, disjunct, val);
#endif
if (rc < SQLITE_ROW)
{
const char *errmsg = sqlite3_errmsg(corp->dbconn);
prt_error("Error: SQLite can't ifind word: rc=%d %s\n", rc, errmsg);
}
}
else
{
val = sqlite3_column_double(corp->rank_query, 0);
if (LOW_SCORE < val) val = LOW_SCORE;
#ifdef DEBUG
printf ("Word=%s dj=%s score=%f\n", inflected_word, disjunct, val);
#endif
}
/* Failure to do both a reset *and* a clear will cause subsequent
* binds to fail. */
sqlite3_reset(corp->rank_query);
sqlite3_clear_bindings(corp->rank_query);
return val;
}
示例13: read_contains_rules
static void read_contains_rules(pp_knowledge *k, const char *label,
pp_rule **rules, int *nRules)
{
/* Reading the 'contains_one_rules' and reading the
'contains_none_rules' into their respective arrays */
int n_commas, n_tokens, i, r;
const char *p;
const char **tokens;
if (!pp_lexer_set_label(k->lt, label)) {
*nRules = 0;
if (verbosity>0) printf("PP warning: Not using any %s rules\n", label);
}
else {
n_commas = pp_lexer_count_commas_of_label(k->lt);
*nRules = (n_commas + 1)/3;
}
*rules = (pp_rule*) xalloc ((1+*nRules)*sizeof(pp_rule));
for (r=0; r<*nRules; r++)
{
/* first read link */
tokens = pp_lexer_get_next_group_of_tokens_of_label(k->lt, &n_tokens);
if (n_tokens>1)
{
prt_error("Fatal Error: post_process: Invalid syntax in %s (rule %i)",label,r+1);
exit(1);
}
(*rules)[r].selector = string_set_add(tokens[0], k->string_set);
/* read link set */
tokens = pp_lexer_get_next_group_of_tokens_of_label(k->lt, &n_tokens);
(*rules)[r].link_set = pp_linkset_open(n_tokens);
(*rules)[r].link_set_size = n_tokens;
(*rules)[r].link_array = (const char **) xalloc((1+n_tokens)*sizeof(const char*));
for (i=0; i<n_tokens; i++)
{
p = string_set_add(tokens[i], k->string_set);
pp_linkset_add((*rules)[r].link_set, p);
(*rules)[r].link_array[i] = p;
}
(*rules)[r].link_array[i]=0; /* NULL-terminator */
/* read error message */
tokens = pp_lexer_get_next_group_of_tokens_of_label(k->lt, &n_tokens);
if (n_tokens>1)
{
prt_error("Fatal Error: post_process: Invalid syntax in %s (rule %i)",label,r+1);
exit(1);
}
(*rules)[r].msg = string_set_add(tokens[0], k->string_set);
}
/* sentinel entry */
(*rules)[*nRules].msg = 0;
}
示例14: read_form_a_cycle_rules
static bool read_form_a_cycle_rules(pp_knowledge *k, const char *label)
{
size_t n_commas, n_tokens;
size_t r, i;
pp_linkset *lsHandle;
const char **tokens;
if (!pp_lexer_set_label(k->lt, label)) {
k->n_form_a_cycle_rules = 0;
if (verbosity_level(+D_PPK))
prt_error("Warning: File %s: Not using any 'form a cycle' rules\n",
k->path);
}
else {
n_commas = pp_lexer_count_commas_of_label(k->lt);
k->n_form_a_cycle_rules = (n_commas + 1)/2;
}
k->form_a_cycle_rules=
(pp_rule*) malloc ((1+k->n_form_a_cycle_rules)*sizeof(pp_rule));
for (r=0; r<k->n_form_a_cycle_rules; r++)
{
/* read link set */
tokens = pp_lexer_get_next_group_of_tokens_of_label(k->lt, &n_tokens);
if (n_tokens <= 0)
{
prt_error("Error: File %s: Syntax error\n", k->path);
return false;
}
lsHandle = pp_linkset_open(n_tokens);
for (i=0; i<n_tokens; i++)
pp_linkset_add(lsHandle,string_set_add(tokens[i], k->string_set));
k->form_a_cycle_rules[r].link_set = lsHandle;
/* read error message */
tokens = pp_lexer_get_next_group_of_tokens_of_label(k->lt, &n_tokens);
if (n_tokens > 1)
{
prt_error("Error: File %s: Invalid syntax (rule %zu of %s)\n",
k->path, r+1,label);
return false;
}
k->form_a_cycle_rules[r].msg = string_set_add(tokens[0], k->string_set);
k->form_a_cycle_rules[r].use_count = 0;
}
/* sentinel entry */
k->form_a_cycle_rules[k->n_form_a_cycle_rules].msg = 0;
k->form_a_cycle_rules[k->n_form_a_cycle_rules].use_count = 0;
return true;
}
示例15: prt_regerror
static void prt_regerror(const char *msg, const Regex_node *re, int rc,
int erroffset)
{
#if HAVE_PCRE2_H
PCRE2_UCHAR errbuf[ERRBUFFLEN];
pcre2_get_error_message(rc, errbuf, ERRBUFFLEN);
#else
char errbuf[ERRBUFFLEN];
regerror(rc, re->re, errbuf, ERRBUFFLEN);
#endif /* HAVE_PCRE2_H */
prt_error("Error: %s: \"%s\" (%s", msg, re->pattern, re->name);
if (-1 != erroffset) prt_error(" at %d", erroffset);
prt_error("): %s (%d)\n", errbuf, rc);
}