本文整理汇总了C#中HashSet.add方法的典型用法代码示例。如果您正苦于以下问题:C# HashSet.add方法的具体用法?C# HashSet.add怎么用?C# HashSet.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类HashSet
的用法示例。
在下文中一共展示了HashSet.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: CreateRandomIndexes
private void CreateRandomIndexes(int maxSegments)
{
dir = NewDirectory();
numDocs = AtLeast(150);
int numTerms = TestUtil.NextInt(Random(), 1, numDocs / 5);
ISet<string> randomTerms = new HashSet<string>();
while (randomTerms.size() < numTerms)
{
randomTerms.add(TestUtil.RandomSimpleString(Random()));
}
terms = new List<string>(randomTerms);
int seed = Random().Next();
IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(new Random(seed)));
iwc.SetMergePolicy(TestSortingMergePolicy.NewSortingMergePolicy(sort));
iw = new RandomIndexWriter(new Random(seed), dir, iwc);
for (int i = 0; i < numDocs; ++i)
{
Document doc = RandomDocument();
iw.AddDocument(doc);
if (i == numDocs / 2 || (i != numDocs - 1 && Random().nextInt(8) == 0))
{
iw.Commit();
}
if (Random().nextInt(15) == 0)
{
string term = RandomInts.RandomFrom(Random(), terms);
iw.DeleteDocuments(new Term("s", term));
}
}
reader = iw.Reader;
}
示例2: TestReplaceParent
public void TestReplaceParent() {
Persister persister = new Persister();
Set<String> children = new HashSet<String>();
RealParent parent = new RealParent(children);
children.add("Tom");
children.add("Dick");
children.add("Harry");
StringWriter writer = new StringWriter();
persister.write(parent, writer);
String text = writer.toString();
System.out.println(text);
AssertEquals(text.indexOf("Tom"), -1);
AssertEquals(text.indexOf("Dick"), -1);
AssertEquals(text.indexOf("Harry"), -1);
validate(persister, parent);
}
示例3: getUmbrellaWorldNetwork
/**
* Return a Dynamic Bayesian Network of the Umbrella World Network.
*
* @return a Dynamic Bayesian Network of the Umbrella World Network.
*/
public static DynamicBayesianNetwork getUmbrellaWorldNetwork()
{
FiniteNode prior_rain_tm1 = new FullCPTNode(ExampleRV.RAIN_tm1_RV,
new double[] {0.5, 0.5});
BayesNet priorNetwork = new BayesNet(prior_rain_tm1);
// Prior belief state
FiniteNode rain_tm1 = new FullCPTNode(ExampleRV.RAIN_tm1_RV,
new double[] {0.5, 0.5});
// Transition Model
FiniteNode rain_t = new FullCPTNode(ExampleRV.RAIN_t_RV, new double[]
{
// R_t-1 = true, R_t = true
0.7,
// R_t-1 = true, R_t = false
0.3,
// R_t-1 = false, R_t = true
0.3,
// R_t-1 = false, R_t = false
0.7
}, rain_tm1);
// Sensor Model
FiniteNode umbrealla_t = new FullCPTNode(ExampleRV.UMBREALLA_t_RV,
new double[]
{
// R_t = true, U_t = true
0.9,
// R_t = true, U_t = false
0.1,
// R_t = false, U_t = true
0.2,
// R_t = false, U_t = false
0.8
}, rain_t);
Map<RandomVariable, RandomVariable> X_0_to_X_1 = new HashMap<RandomVariable, RandomVariable>();
X_0_to_X_1.put(ExampleRV.RAIN_tm1_RV, ExampleRV.RAIN_t_RV);
Set<RandomVariable> E_1 = new HashSet<RandomVariable>();
E_1.add(ExampleRV.UMBREALLA_t_RV);
return new DynamicBayesNet(priorNetwork, X_0_to_X_1, E_1, rain_tm1);
}
示例4: AddValue
/**
* Makes a bunch of single-char tokens (the max # unique terms will at most be 26).
* puts the # unique terms into expected, to be checked against the norm.
*/
private string AddValue()
{
StringBuilder sb = new StringBuilder();
HashSet<string> terms = new HashSet<string>();
int num = TestUtil.NextInt(Random(), 0, 255);
for (int i = 0; i < num; i++)
{
sb.append(' ');
char term = (char)TestUtil.NextInt(Random(), 'a', 'z');
sb.append(term);
terms.add("" + term);
}
expected.Add(terms.size());
return sb.toString();
}
示例5: _minimizeRuleSet
public virtual void _minimizeRuleSet( HashSet<string> ruleDefs, HashSet<string> ruleRefs, CompositeGrammarTree p )
{
var localRuleDefs = new HashSet<string>();
foreach ( Rule r in p.grammar.Rules )
{
if ( !ruleDefs.contains( r.name ) )
{
localRuleDefs.add( r.name );
ruleDefs.add( r.name );
}
}
System.Console.Out.WriteLine( "rule defs for " + p.grammar.name + ": " + localRuleDefs );
// remove locally-defined rules not in ref set
// find intersection of local rules and references from delegator
// that is set of rules needed by delegator
HashSet<string> localRuleDefsSatisfyingRefsFromBelow = new HashSet<string>();
foreach ( string r in ruleRefs )
{
if ( localRuleDefs.contains( r ) )
{
localRuleDefsSatisfyingRefsFromBelow.add( r );
}
}
// now get list of refs from localRuleDefsSatisfyingRefsFromBelow.
// Those rules are also allowed in this delegate
foreach ( GrammarAST refAST in p.grammar.ruleRefs )
{
if ( localRuleDefsSatisfyingRefsFromBelow.contains( refAST.enclosingRuleName ) )
{
// found rule ref within needed rule
}
}
// remove rule refs not in the new rule def set
// walk all children, adding rules not already defined
if ( p.children != null )
{
foreach ( CompositeGrammarTree @delegate in p.children )
{
_minimizeRuleSet( ruleDefs, ruleRefs, @delegate );
}
}
}
示例6: oldminimizeRuleSet
public virtual void oldminimizeRuleSet()
{
// first walk to remove all overridden rules
var ruleDefs = new HashSet<string>();
var ruleRefs = new HashSet<string>();
foreach ( GrammarAST refAST in delegateGrammarTreeRoot.grammar.ruleRefs )
{
string rname = refAST.Text;
ruleRefs.add( rname );
}
_minimizeRuleSet( ruleDefs,
ruleRefs,
delegateGrammarTreeRoot );
System.Console.Out.WriteLine( "overall rule defs: " + ruleDefs );
}
示例7: calcVCFGenotypeKeys
/// <summary>
/// Determine which genotype fields are in use in the genotypes in VC </summary>
/// <param name="vc"> </param>
/// <returns> an ordered list of genotype fields in use in VC. If vc has genotypes this will always include GT first </returns>
//JAVA TO C# CONVERTER WARNING: 'final' parameters are not allowed in .NET:
//ORIGINAL LINE: public static List<String> calcVCFGenotypeKeys(final VariantContext vc, final VCFHeader header)
public static IList<string> calcVCFGenotypeKeys(VariantContext vc, VCFHeader header)
{
Set<string> keys = new HashSet<string>();
bool sawGoodGT = false;
bool sawGoodQual = false;
bool sawGenotypeFilter = false;
bool sawDP = false;
bool sawAD = false;
bool sawPL = false;
foreach (Genotype g in vc.Genotypes)
{
keys.addAll(g.ExtendedAttributes.Keys);
if (g.Available)
{
sawGoodGT = true;
}
if (g.hasGQ())
{
sawGoodQual = true;
}
if (g.hasDP())
{
sawDP = true;
}
if (g.hasAD())
{
sawAD = true;
}
if (g.hasPL())
{
sawPL = true;
}
if (g.Filtered)
{
sawGenotypeFilter = true;
}
}
if (sawGoodQual)
{
keys.add(VCFConstants.GENOTYPE_QUALITY_KEY);
}
if (sawDP)
{
keys.add(VCFConstants.DEPTH_KEY);
}
if (sawAD)
{
keys.add(VCFConstants.GENOTYPE_ALLELE_DEPTHS);
}
if (sawPL)
{
keys.add(VCFConstants.GENOTYPE_PL_KEY);
}
if (sawGenotypeFilter)
{
keys.add(VCFConstants.GENOTYPE_FILTER_KEY);
}
IList<string> sortedList = ParsingUtils.sortList(new List<string>(keys));
// make sure the GT is first
if (sawGoodGT)
{
IList<string> newList = new List<string>(sortedList.Count + 1);
newList.Add(VCFConstants.GENOTYPE_KEY);
newList.AddRange(sortedList);
sortedList = newList;
}
if (sortedList.Count == 0 && header.hasGenotypingData())
{
// this needs to be done in case all samples are no-calls
return Collections.singletonList(VCFConstants.GENOTYPE_KEY);
}
else
{
return sortedList;
}
}
示例8: TestWithContext
public void TestWithContext()
{
Directory dir = NewDirectory();
IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
iwc.SetMergePolicy(NewLogMergePolicy());
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, iwc);
IDictionary<string, Document> docs = GenerateIndexDocuments(AtLeast(100));
foreach (Document doc in docs.Values)
{
writer.AddDocument(doc);
}
writer.Commit();
writer.Dispose();
IndexReader ir = DirectoryReader.Open(dir);
ValueSource[] toAdd = new ValueSource[] { new LongFieldSource(WEIGHT_FIELD_NAME_1), new LongFieldSource(WEIGHT_FIELD_NAME_2), new LongFieldSource(WEIGHT_FIELD_NAME_3) };
IDictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, new SumFloatFunction(toAdd), PAYLOAD_FIELD_NAME, CONTEXTS_FIELD_NAME);
IInputIterator inputIterator = dictionary.EntryIterator;
BytesRef f;
while ((f = inputIterator.Next()) != null)
{
string field = f.Utf8ToString();
Document doc = docs.ContainsKey(field) ? docs[field] : null;
docs.Remove(field);
long w1 = Convert.ToInt64(doc.GetField(WEIGHT_FIELD_NAME_1).NumericValue);
long w2 = Convert.ToInt64(doc.GetField(WEIGHT_FIELD_NAME_2).NumericValue);
long w3 = Convert.ToInt64(doc.GetField(WEIGHT_FIELD_NAME_3).NumericValue);
assertTrue(f.equals(new BytesRef(doc.Get(FIELD_NAME))));
assertEquals(inputIterator.Weight, (w1 + w2 + w3));
assertTrue(inputIterator.Payload.equals(doc.GetField(PAYLOAD_FIELD_NAME).BinaryValue));
ISet<BytesRef> originalCtxs = new HashSet<BytesRef>();
foreach (IndexableField ctxf in doc.GetFields(CONTEXTS_FIELD_NAME))
{
originalCtxs.add(ctxf.BinaryValue);
}
assertEquals(originalCtxs, inputIterator.Contexts);
}
assertTrue(!docs.Any());
ir.Dispose();
dir.Dispose();
}
示例9: TestTerms
public void TestTerms()
{
Random random = Random();
int num = AtLeast(10000);
#pragma warning disable 612, 618
IComparer<BytesRef> comparator = random.nextBoolean() ? BytesRef.UTF8SortedAsUnicodeComparer : BytesRef.UTF8SortedAsUTF16Comparer;
#pragma warning restore 612, 618
IDictionary<BytesRef, KeyValuePair<long, BytesRef>> sorted = new SortedDictionary<BytesRef, KeyValuePair<long, BytesRef>>(comparator); //new TreeMap<>(comparator);
IDictionary<BytesRef, long> sortedWithoutPayload = new SortedDictionary<BytesRef, long>(comparator); //new TreeMap<>(comparator);
IDictionary<BytesRef, KeyValuePair<long, ISet<BytesRef>>> sortedWithContext = new SortedDictionary<BytesRef, KeyValuePair<long, ISet<BytesRef>>>(comparator); //new TreeMap<>(comparator);
IDictionary<BytesRef, KeyValuePair<long, KeyValuePair<BytesRef, ISet<BytesRef>>>> sortedWithPayloadAndContext = new SortedDictionary<BytesRef, KeyValuePair<long, KeyValuePair<BytesRef, ISet<BytesRef>>>>(comparator); //new TreeMap<>(comparator);
Input[] unsorted = new Input[num];
Input[] unsortedWithoutPayload = new Input[num];
Input[] unsortedWithContexts = new Input[num];
Input[] unsortedWithPayloadAndContext = new Input[num];
ISet<BytesRef> ctxs;
for (int i = 0; i < num; i++)
{
BytesRef key2;
BytesRef payload;
ctxs = new HashSet<BytesRef>();
do
{
key2 = new BytesRef(TestUtil.RandomUnicodeString(random));
payload = new BytesRef(TestUtil.RandomUnicodeString(random));
for (int j = 0; j < AtLeast(2); j++)
{
ctxs.add(new BytesRef(TestUtil.RandomUnicodeString(random)));
}
} while (sorted.ContainsKey(key2));
long value = random.Next();
sortedWithoutPayload.Put(key2, value);
sorted.Put(key2, new KeyValuePair<long, BytesRef>(value, payload));
sortedWithContext.Put(key2, new KeyValuePair<long, ISet<BytesRef>>(value, ctxs));
sortedWithPayloadAndContext.Put(key2, new KeyValuePair<long, KeyValuePair<BytesRef, ISet<BytesRef>>>(value, new KeyValuePair<BytesRef, ISet<BytesRef>>(payload, ctxs)));
unsorted[i] = new Input(key2, value, payload);
unsortedWithoutPayload[i] = new Input(key2, value);
unsortedWithContexts[i] = new Input(key2, value, ctxs);
unsortedWithPayloadAndContext[i] = new Input(key2, value, payload, ctxs);
}
// test the sorted iterator wrapper with payloads
IInputIterator wrapper = new SortedInputIterator(new InputArrayIterator(unsorted), comparator);
IEnumerator<KeyValuePair<BytesRef, KeyValuePair<long, BytesRef>>> expected = sorted.GetEnumerator();
while (expected.MoveNext())
{
KeyValuePair<BytesRef, KeyValuePair<long, BytesRef>> entry = expected.Current;
assertEquals(entry.Key, wrapper.Next());
assertEquals(Convert.ToInt64(entry.Value.Key), wrapper.Weight);
assertEquals(entry.Value.Value, wrapper.Payload);
}
assertNull(wrapper.Next());
// test the sorted iterator wrapper with contexts
wrapper = new SortedInputIterator(new InputArrayIterator(unsortedWithContexts), comparator);
IEnumerator<KeyValuePair<BytesRef, KeyValuePair<long, ISet<BytesRef>>>> actualEntries = sortedWithContext.GetEnumerator();
while (actualEntries.MoveNext())
{
KeyValuePair<BytesRef, KeyValuePair<long, ISet<BytesRef>>> entry = actualEntries.Current;
assertEquals(entry.Key, wrapper.Next());
assertEquals(Convert.ToInt64(entry.Value.Key), wrapper.Weight);
ISet<BytesRef> actualCtxs = entry.Value.Value;
assertEquals(actualCtxs, wrapper.Contexts);
}
assertNull(wrapper.Next());
// test the sorted iterator wrapper with contexts and payload
wrapper = new SortedInputIterator(new InputArrayIterator(unsortedWithPayloadAndContext), comparator);
IEnumerator<KeyValuePair<BytesRef, KeyValuePair<long, KeyValuePair<BytesRef, ISet<BytesRef>>>>> expectedPayloadContextEntries = sortedWithPayloadAndContext.GetEnumerator();
while (expectedPayloadContextEntries.MoveNext())
{
KeyValuePair<BytesRef, KeyValuePair<long, KeyValuePair<BytesRef, ISet<BytesRef>>>> entry = expectedPayloadContextEntries.Current;
assertEquals(entry.Key, wrapper.Next());
assertEquals(Convert.ToInt64(entry.Value.Key), wrapper.Weight);
ISet<BytesRef> actualCtxs = entry.Value.Value.Value;
assertEquals(actualCtxs, wrapper.Contexts);
BytesRef actualPayload = entry.Value.Value.Key;
assertEquals(actualPayload, wrapper.Payload);
}
assertNull(wrapper.Next());
// test the unsorted iterator wrapper with payloads
wrapper = new UnsortedInputIterator(new InputArrayIterator(unsorted));
IDictionary<BytesRef, KeyValuePair<long, BytesRef>> actual = new SortedDictionary<BytesRef, KeyValuePair<long, BytesRef>>(); //new TreeMap<>();
BytesRef key;
while ((key = wrapper.Next()) != null)
{
long value = wrapper.Weight;
BytesRef payload = wrapper.Payload;
actual.Put(BytesRef.DeepCopyOf(key), new KeyValuePair<long, BytesRef>(value, BytesRef.DeepCopyOf(payload)));
}
assertEquals(sorted, actual);
// test the sorted iterator wrapper without payloads
IInputIterator wrapperWithoutPayload = new SortedInputIterator(new InputArrayIterator(unsortedWithoutPayload), comparator);
IEnumerator<KeyValuePair<BytesRef, long>> expectedWithoutPayload = sortedWithoutPayload.GetEnumerator();
while (expectedWithoutPayload.MoveNext())
{
//.........这里部分代码省略.........
示例10: TestWithContexts
public void TestWithContexts()
{
Directory dir = NewDirectory();
IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
iwc.SetMergePolicy(NewLogMergePolicy());
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, iwc);
KeyValuePair<List<string>, IDictionary<string, Document>> res = GenerateIndexDocuments(AtLeast(1000), true, true);
IDictionary<string, Document> docs = res.Value;
List<string> invalidDocTerms = res.Key;
foreach (Document doc in docs.Values)
{
writer.AddDocument(doc);
}
writer.Commit();
writer.Dispose();
IndexReader ir = DirectoryReader.Open(dir);
IDictionary dictionary = new DocumentDictionary(ir, FIELD_NAME, WEIGHT_FIELD_NAME, PAYLOAD_FIELD_NAME, CONTEXT_FIELD_NAME);
IInputIterator inputIterator = dictionary.EntryIterator;
BytesRef f;
while ((f = inputIterator.Next()) != null)
{
string field = f.Utf8ToString();
Document doc = docs.ContainsKey(field) ? docs[field] : null;
docs.Remove(field);
//Document doc = docs.remove(f.utf8ToString());
assertTrue(f.equals(new BytesRef(doc.Get(FIELD_NAME))));
IndexableField weightField = doc.GetField(WEIGHT_FIELD_NAME);
assertEquals(inputIterator.Weight, (weightField != null) ? Convert.ToInt64(weightField.NumericValue) : 0);
assertTrue(inputIterator.Payload.equals(doc.GetField(PAYLOAD_FIELD_NAME).BinaryValue));
ISet<BytesRef> oriCtxs = new HashSet<BytesRef>();
IEnumerable<BytesRef> contextSet = inputIterator.Contexts;
foreach (IndexableField ctxf in doc.GetFields(CONTEXT_FIELD_NAME))
{
oriCtxs.add(ctxf.BinaryValue);
}
assertEquals(oriCtxs.size(), contextSet.Count());
}
foreach (string invalidTerm in invalidDocTerms)
{
var invalid = docs[invalidTerm];
docs.Remove(invalidTerm);
assertNotNull(invalid);
}
assertTrue(!docs.Any());
ir.Dispose();
dir.Dispose();
}
示例11: GetOverriddenRulesWithDifferentFIRST
public HashSet<string> GetOverriddenRulesWithDifferentFIRST()
{
// walk every rule in this grammar and compare FIRST set with
// those in imported grammars.
HashSet<string> rules = new HashSet<string>();
for (Iterator it = getRules().iterator(); it.hasNext();) {
Rule r = (Rule)it.next();
//[email protected](r.name+" FIRST="+r.FIRST);
for (int i = 0; i < delegates.size(); i++) {
Grammar g = delegates.get(i);
Rule importedRule = g.getRule(r.name);
if ( importedRule != null ) { // exists in imported grammar
// [email protected](r.name+" exists in imported grammar: FIRST="+importedRule.FIRST);
if ( !r.FIRST.equals(importedRule.FIRST) ) {
rules.add(r.name);
}
}
}
}
return rules;
}
示例12: checkHits
private void checkHits(SpatialArgs args, int assertNumFound, int[] assertIds)
{
SearchResults got = executeQuery(strategy.MakeQuery(args), 100);
assertEquals("" + args, assertNumFound, got.numFound);
if (assertIds != null)
{
ISet<int?> gotIds = new HashSet<int?>();
foreach (SearchResult result in got.results)
{
gotIds.add(int.Parse(result.document.Get("id"), CultureInfo.InvariantCulture));
}
foreach (int assertId in assertIds)
{
assertTrue("has " + assertId, gotIds.contains(assertId));
}
}
}
示例13: AssertOperation
protected virtual void AssertOperation(IDictionary<String, IShape> indexedDocs,
SpatialOperation operation, IShape queryShape)
{
//Generate truth via brute force
ISet<string> expectedIds = new HashSet<string>();
foreach (var stringShapeEntry in indexedDocs)
{
if (operation.Evaluate(stringShapeEntry.Value, queryShape))
expectedIds.add(stringShapeEntry.Key);
}
SpatialTestQuery testQuery = new SpatialTestQuery();
testQuery.args = new SpatialArgs(operation, queryShape);
testQuery.ids = new List<string>(expectedIds);
runTestQuery(SpatialMatchConcern.FILTER, testQuery);
}
示例14: runTestQuery
public virtual void runTestQuery(SpatialMatchConcern concern, SpatialTestQuery q)
{
String msg = q.toString(); //"Query: " + q.args.toString(ctx);
SearchResults got = executeQuery(makeQuery(q), Math.Max(100, q.ids.size() + 1));
if (storeShape && got.numFound > 0)
{
//check stored value is there
assertNotNull(got.results[0].document.Get(strategy.FieldName));
}
if (concern.orderIsImportant)
{
IEnumerator<String> ids = q.ids.GetEnumerator();
foreach (SearchResult r in got.results)
{
String id = r.document.Get("id");
if (!ids.MoveNext())
{
fail(msg + " :: Did not get enough results. Expect" + q.ids + ", got: " + got.toDebugString());
}
assertEquals("out of order: " + msg, ids.Current, id);
}
if (ids.MoveNext())
{
fail(msg + " :: expect more results then we got: " + ids.Current);
}
}
else
{
// We are looking at how the results overlap
if (concern.resultsAreSuperset)
{
ISet<string> found = new HashSet<string>();
foreach (SearchResult r in got.results)
{
found.add(r.document.Get("id"));
}
foreach (String s in q.ids)
{
if (!found.contains(s))
{
fail("Results are mising id: " + s + " :: " + found);
}
}
}
else
{
List<string> found = new List<string>();
foreach (SearchResult r in got.results)
{
found.Add(r.document.Get("id"));
}
// sort both so that the order is not important
CollectionUtil.TimSort(q.ids);
CollectionUtil.TimSort(found);
assertEquals(msg, q.ids.toString(), found.toString());
}
}
}
示例15: Encoding
static Encoding()
{
byte[] testBuf = new byte[0x7F];
for (int i = 0; i < 0x7F; i++) {
if (isAsciiSupersetnessSensitive(i)) {
testBuf[i] = (byte) i;
} else {
testBuf[i] = (byte) 0x20;
}
}
Set<Encoding> encodings = new HashSet<Encoding>();
SortedMap<String, Charset> charsets = Charset.availableCharsets();
foreach (Map.Entry<String, Charset> entry in charsets.entrySet()) {
Charset cs = entry.getValue();
String name = toNameKey(cs.name());
String canonName = toAsciiLowerCase(cs.name());
if (!isBanned(name)) {
name = name.intern();
bool asciiSuperset = asciiMapsToBasicLatin(testBuf, cs);
Encoding enc = new Encoding(canonName.intern(), cs,
asciiSuperset, isObscure(name), isShouldNot(name),
isLikelyEbcdic(name, asciiSuperset));
encodings.add(enc);
Set<String> aliases = cs.aliases();
foreach (String alias in aliases) {
encodingByCookedName.put(toNameKey(alias).intern(), enc);
}
}
}
// Overwrite possible overlapping aliases with the real things--just in
// case
foreach (Encoding encoding in encodings) {
encodingByCookedName.put(toNameKey(encoding.getCanonName()),
encoding);
}
UTF8 = forName("utf-8");
UTF16 = forName("utf-16");
UTF16BE = forName("utf-16be");
UTF16LE = forName("utf-16le");
WINDOWS1252 = forName("windows-1252");
try {
forName("iso-8859-1").actualHtmlEncoding = forName("windows-1252");
} catch (UnsupportedCharsetException e) {
}
try {
forName("iso-8859-9").actualHtmlEncoding = forName("windows-1254");
} catch (UnsupportedCharsetException e) {
}
try {
forName("iso-8859-11").actualHtmlEncoding = forName("windows-874");
} catch (UnsupportedCharsetException e) {
}
try {
forName("x-iso-8859-11").actualHtmlEncoding = forName("windows-874");
} catch (UnsupportedCharsetException e) {
}
try {
forName("tis-620").actualHtmlEncoding = forName("windows-874");
} catch (UnsupportedCharsetException e) {
}
try {
forName("gb_2312-80").actualHtmlEncoding = forName("gbk");
} catch (UnsupportedCharsetException e) {
}
try {
forName("gb2312").actualHtmlEncoding = forName("gbk");
} catch (UnsupportedCharsetException e) {
}
try {
encodingByCookedName.put("x-x-big5", forName("big5"));
} catch (UnsupportedCharsetException e) {
}
try {
encodingByCookedName.put("euc-kr", forName("windows-949"));
} catch (UnsupportedCharsetException e) {
}
try {
encodingByCookedName.put("ks_c_5601-1987", forName("windows-949"));
} catch (UnsupportedCharsetException e) {
}
}