本文整理汇总了C++中UnicodeSet::isEmpty方法的典型用法代码示例。如果您正苦于以下问题:C++ UnicodeSet::isEmpty方法的具体用法?C++ UnicodeSet::isEmpty怎么用?C++ UnicodeSet::isEmpty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类UnicodeSet
的用法示例。
在下文中一共展示了UnicodeSet::isEmpty方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: handleGetSourceSet
/**
* Implement Transliterator framework
*/
void CompoundTransliterator::handleGetSourceSet(UnicodeSet& result) const {
UnicodeSet set;
result.clear();
for (int32_t i=0; i<count; ++i) {
result.addAll(trans[i]->getSourceSet(set));
// Take the example of Hiragana-Latin. This is really
// Hiragana-Katakana; Katakana-Latin. The source set of
// these two is roughly [:Hiragana:] and [:Katakana:].
// But the source set for the entire transliterator is
// actually [:Hiragana:] ONLY -- that is, the first
// non-empty source set.
// This is a heuristic, and not 100% reliable.
if (!result.isEmpty()) {
break;
}
}
}
示例2: addChineseIndexCharacters
UBool AlphabeticIndex::addChineseIndexCharacters(UErrorCode &errorCode) {
UnicodeSet contractions;
collatorPrimaryOnly_->internalAddContractions(BASE[0], contractions, errorCode);
if (U_FAILURE(errorCode) || contractions.isEmpty()) { return FALSE; }
initialLabels_->addAll(contractions);
UnicodeSetIterator iter(contractions);
while (iter.next()) {
const UnicodeString &s = iter.getString();
U_ASSERT (s.startsWith(BASE, BASE_LENGTH));
UChar c = s.charAt(s.length() - 1);
if (0x41 <= c && c <= 0x5A) { // A-Z
// There are Pinyin labels, add ASCII A-Z labels as well.
initialLabels_->add(0x41, 0x5A); // A-Z
break;
}
}
return TRUE;
}
示例3: TestUnicodeSetErrors
void TransliteratorErrorTest::TestUnicodeSetErrors() {
UnicodeString badPattern="[[:L:]-[0x0300-0x0400]";
UnicodeSet set;
UErrorCode status = U_ZERO_ERROR;
UnicodeString result;
if (!set.isEmpty()) {
errln("FAIL: The default ctor of UnicodeSet created a non-empty object.");
}
set.applyPattern(badPattern, status);
if (U_SUCCESS(status)) {
errln("FAIL: Applied a bad pattern to the UnicodeSet object okay.");
}
status = U_ZERO_ERROR;
UnicodeSet *set1 = new UnicodeSet(badPattern, status);
if (U_SUCCESS(status)) {
errln("FAIL: Created a UnicodeSet based on bad patterns.");
}
delete set1;
}
示例4: fprintf
void
PreparsedUCD::parseScriptExtensions(const char *s, UnicodeSet &scx, UErrorCode &errorCode) {
if(U_FAILURE(errorCode)) { return; }
scx.clear();
CharString scString;
for(;;) {
const char *scs;
const char *scLimit=strchr(s, ' ');
if(scLimit!=NULL) {
scs=scString.clear().append(s, (int32_t)(scLimit-s), errorCode).data();
if(U_FAILURE(errorCode)) { return; }
} else {
scs=s;
}
int32_t script=pnames->getPropertyValueEnum(UCHAR_SCRIPT, scs);
if(script==UCHAR_INVALID_CODE) {
fprintf(stderr,
"error in preparsed UCD: '%s' is not a valid script code on line %ld\n",
scs, (long)lineNumber);
errorCode=U_PARSE_ERROR;
return;
} else if(scx.contains(script)) {
fprintf(stderr,
"error in preparsed UCD: scx has duplicate '%s' codes on line %ld\n",
scs, (long)lineNumber);
errorCode=U_PARSE_ERROR;
return;
} else {
scx.add(script);
}
if(scLimit!=NULL) {
s=scLimit+1;
} else {
break;
}
}
if(scx.isEmpty()) {
fprintf(stderr, "error in preparsed UCD: empty scx= on line %ld\n", (long)lineNumber);
errorCode=U_PARSE_ERROR;
}
}
示例5: iter
UVector *AlphabeticIndex::firstStringsInScript(UErrorCode &status) {
if (U_FAILURE(status)) {
return NULL;
}
LocalPointer<UVector> dest(new UVector(status), status);
if (U_FAILURE(status)) {
return NULL;
}
dest->setDeleter(uprv_deleteUObject);
// Fetch the script-first-primary contractions which are defined in the root collator.
// They all start with U+FDD1.
UnicodeSet set;
collatorPrimaryOnly_->internalAddContractions(0xFDD1, set, status);
if (U_FAILURE(status)) {
return NULL;
}
if (set.isEmpty()) {
status = U_UNSUPPORTED_ERROR;
return NULL;
}
UnicodeSetIterator iter(set);
while (iter.next()) {
const UnicodeString &boundary = iter.getString();
uint32_t gcMask = U_GET_GC_MASK(boundary.char32At(1));
if ((gcMask & (U_GC_L_MASK | U_GC_CN_MASK)) == 0) {
// Ignore boundaries for the special reordering groups.
// Take only those for "real scripts" (where the sample character is a Letter,
// and the one for unassigned implicit weights (Cn).
continue;
}
UnicodeString *s = new UnicodeString(boundary);
if (s == NULL) {
status = U_MEMORY_ALLOCATION_ERROR;
return NULL;
}
dest->addElement(s, status);
}
return dest.orphan();
}
示例6: buildIndex
void AlphabeticIndex::buildIndex(UErrorCode &status) {
if (U_FAILURE(status)) {
return;
}
if (!indexBuildRequired_) {
return;
}
// Discard any already-built data.
// This is important when the user builds and uses an index, then subsequently modifies it,
// necessitating a rebuild.
bucketList_->removeAllElements();
labels_->removeAllElements();
uhash_removeAll(alreadyIn_);
noDistinctSorting_->clear();
notAlphabetic_->clear();
// first sort the incoming Labels, with a "best" ordering among items
// that are the same according to the collator
UVector preferenceSorting(status); // Vector of UnicodeStrings; owned by the vector.
preferenceSorting.setDeleter(uprv_deleteUObject);
appendUnicodeSetToUVector(preferenceSorting, *initialLabels_, status);
preferenceSorting.sortWithUComparator(PreferenceComparator, &status, status);
// We now make a set of Labels.
// Some of the input may, however, be redundant.
// That is, we might have c, ch, d, where "ch" sorts just like "c", "h"
// So we make a pass through, filtering out those cases.
// TODO: filtering these out would seem to be at odds with the eventual goal
// of being able to split buckets that contain too many items.
UnicodeSet labelSet;
for (int32_t psIndex=0; psIndex<preferenceSorting.size(); psIndex++) {
UnicodeString item = *static_cast<const UnicodeString *>(preferenceSorting.elementAt(psIndex));
// TODO: Since preferenceSorting was originally populated from the contents of a UnicodeSet,
// is it even possible for duplicates to show up in this check?
if (labelSet.contains(item)) {
UnicodeSetIterator itemAlreadyInIter(labelSet);
while (itemAlreadyInIter.next()) {
const UnicodeString &itemAlreadyIn = itemAlreadyInIter.getString();
if (collatorPrimaryOnly_->compare(item, itemAlreadyIn) == 0) {
UnicodeSet *targets = static_cast<UnicodeSet *>(uhash_get(alreadyIn_, &itemAlreadyIn));
if (targets == NULL) {
// alreadyIn.put(itemAlreadyIn, targets = new LinkedHashSet<String>());
targets = new UnicodeSet();
uhash_put(alreadyIn_, itemAlreadyIn.clone(), targets, &status);
}
targets->add(item);
break;
}
}
} else if (item.moveIndex32(0, 1) < item.length() && // Label contains more than one code point.
collatorPrimaryOnly_->compare(item, separated(item)) == 0) {
noDistinctSorting_->add(item);
} else if (!ALPHABETIC->containsSome(item)) {
notAlphabetic_->add(item);
} else {
labelSet.add(item);
}
}
// If we have no labels, hard-code a fallback default set of [A-Z]
// This case can occur with locales that don't have exemplar character data, including root.
// A no-labels situation will cause other problems; it needs to be avoided.
if (labelSet.isEmpty()) {
labelSet.add((UChar32)0x41, (UChar32)0x5A);
}
// Move the set of Labels from the set into a vector, and sort
// according to the collator.
appendUnicodeSetToUVector(*labels_, labelSet, status);
labels_->sortWithUComparator(sortCollateComparator, collatorPrimaryOnly_, status);
// if the result is still too large, cut down to maxLabelCount_ elements, by removing every nth element
// Implemented by copying the elements to be retained to a new UVector.
const int32_t size = labelSet.size() - 1;
if (size > maxLabelCount_) {
UVector *newLabels = new UVector(status);
newLabels->setDeleter(uprv_deleteUObject);
int32_t count = 0;
int32_t old = -1;
for (int32_t srcIndex=0; srcIndex<labels_->size(); srcIndex++) {
const UnicodeString *str = static_cast<const UnicodeString *>(labels_->elementAt(srcIndex));
++count;
const int32_t bump = count * maxLabelCount_ / size;
if (bump == old) {
// it.remove();
} else {
newLabels->addElement(str->clone(), status);
old = bump;
}
}
delete labels_;
labels_ = newLabels;
}
//.........这里部分代码省略.........
示例7: codesAndRanges
int32_t
CollationDataWriter::write(UBool isBase, const UVersionInfo dataVersion,
const CollationData &data, const CollationSettings &settings,
const void *rootElements, int32_t rootElementsLength,
int32_t indexes[], uint8_t *dest, int32_t capacity,
UErrorCode &errorCode) {
if(U_FAILURE(errorCode)) { return 0; }
if(capacity < 0 || (capacity > 0 && dest == NULL)) {
errorCode = U_ILLEGAL_ARGUMENT_ERROR;
return 0;
}
// Figure out which data items to write before settling on
// the indexes length and writing offsets.
// For any data item, we need to write the start and limit offsets,
// so the indexes length must be at least index-of-start-offset + 2.
int32_t indexesLength;
UBool hasMappings;
UnicodeSet unsafeBackwardSet;
const CollationData *baseData = data.base;
int32_t fastLatinVersion;
if(data.fastLatinTable != NULL) {
fastLatinVersion = (int32_t)CollationFastLatin::VERSION << 16;
} else {
fastLatinVersion = 0;
}
int32_t fastLatinTableLength = 0;
if(isBase) {
// For the root collator, we write an even number of indexes
// so that we start with an 8-aligned offset.
indexesLength = CollationDataReader::IX_TOTAL_SIZE + 1;
U_ASSERT(settings.reorderCodesLength == 0);
hasMappings = TRUE;
unsafeBackwardSet = *data.unsafeBackwardSet;
fastLatinTableLength = data.fastLatinTableLength;
} else if(baseData == NULL) {
hasMappings = FALSE;
if(settings.reorderCodesLength == 0) {
// only options
indexesLength = CollationDataReader::IX_OPTIONS + 1; // no limit offset here
} else {
// only options, reorder codes, and the reorder table
indexesLength = CollationDataReader::IX_REORDER_TABLE_OFFSET + 2;
}
} else {
hasMappings = TRUE;
// Tailored mappings, and what else?
// Check in ascending order of optional tailoring data items.
indexesLength = CollationDataReader::IX_CE32S_OFFSET + 2;
if(data.contextsLength != 0) {
indexesLength = CollationDataReader::IX_CONTEXTS_OFFSET + 2;
}
unsafeBackwardSet.addAll(*data.unsafeBackwardSet).removeAll(*baseData->unsafeBackwardSet);
if(!unsafeBackwardSet.isEmpty()) {
indexesLength = CollationDataReader::IX_UNSAFE_BWD_OFFSET + 2;
}
if(data.fastLatinTable != baseData->fastLatinTable) {
fastLatinTableLength = data.fastLatinTableLength;
indexesLength = CollationDataReader::IX_FAST_LATIN_TABLE_OFFSET + 2;
}
}
UVector32 codesAndRanges(errorCode);
const int32_t *reorderCodes = settings.reorderCodes;
int32_t reorderCodesLength = settings.reorderCodesLength;
if(settings.hasReordering() &&
CollationSettings::reorderTableHasSplitBytes(settings.reorderTable)) {
// Rebuild the full list of reorder ranges.
// The list in the settings is truncated for efficiency.
data.makeReorderRanges(reorderCodes, reorderCodesLength, codesAndRanges, errorCode);
// Write the codes, then the ranges.
for(int32_t i = 0; i < reorderCodesLength; ++i) {
codesAndRanges.insertElementAt(reorderCodes[i], i, errorCode);
}
if(U_FAILURE(errorCode)) { return 0; }
reorderCodes = codesAndRanges.getBuffer();
reorderCodesLength = codesAndRanges.size();
}
int32_t headerSize;
if(isBase) {
headerSize = 0; // udata_create() writes the header
} else {
DataHeader header;
header.dataHeader.magic1 = 0xda;
header.dataHeader.magic2 = 0x27;
uprv_memcpy(&header.info, &dataInfo, sizeof(UDataInfo));
uprv_memcpy(header.info.dataVersion, dataVersion, sizeof(UVersionInfo));
headerSize = (int32_t)sizeof(header);
U_ASSERT((headerSize & 3) == 0); // multiple of 4 bytes
if(hasMappings && data.cesLength != 0) {
// Sum of the sizes of the data items which are
// not automatically multiples of 8 bytes and which are placed before the CEs.
int32_t sum = headerSize + (indexesLength + reorderCodesLength) * 4;
if((sum & 7) != 0) {
// We need to add padding somewhere so that the 64-bit CEs are 8-aligned.
// We add to the header size here.
// Alternatively, we could increment the indexesLength
//.........这里部分代码省略.........