当前位置: 首页>>代码示例>>C#>>正文


C# System.Collections.Hashtable.MoveNext方法代码示例

本文整理汇总了C#中System.Collections.Hashtable.MoveNext方法的典型用法代码示例。如果您正苦于以下问题:C# System.Collections.Hashtable.MoveNext方法的具体用法?C# System.Collections.Hashtable.MoveNext怎么用?C# System.Collections.Hashtable.MoveNext使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在System.Collections.Hashtable的用法示例。


在下文中一共展示了System.Collections.Hashtable.MoveNext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: Flush

		public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
		{
			
			System.Collections.IDictionary oneThreadsAndFields = new System.Collections.Hashtable();
			System.Collections.IDictionary twoThreadsAndFields = new System.Collections.Hashtable();
			
			System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
			while (it.MoveNext())
			{
				
				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
				
				DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
				
				System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
				
				System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
				System.Collections.Hashtable oneFields = new System.Collections.Hashtable();
				System.Collections.Hashtable twoFields = new System.Collections.Hashtable();
				while (fieldsIt.MoveNext())
				{
					DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.Current;
					SupportClass.CollectionsHelper.AddIfNotContains(oneFields, perField.one);
					SupportClass.CollectionsHelper.AddIfNotContains(twoFields, perField.two);
				}
				
				oneThreadsAndFields[perThread.one] = oneFields;
				twoThreadsAndFields[perThread.two] = twoFields;
			}
			
			
			one.Flush(oneThreadsAndFields, state);
			two.Flush(twoThreadsAndFields, state);
		}
开发者ID:Inzaghi2012,项目名称:teamlab.v7.5,代码行数:34,代码来源:DocFieldConsumers.cs

示例2: Flush

		public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
		{
			
			System.Collections.IDictionary childThreadsAndFields = new System.Collections.Hashtable();
			System.Collections.IDictionary endChildThreadsAndFields = new System.Collections.Hashtable();
			
			System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
			while (it.MoveNext())
			{
				
				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
				
				DocInverterPerThread perThread = (DocInverterPerThread) entry.Key;
				
				System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
				
				System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
				System.Collections.Hashtable childFields = new System.Collections.Hashtable();
				System.Collections.Hashtable endChildFields = new System.Collections.Hashtable();
				while (fieldsIt.MoveNext())
				{
					DocInverterPerField perField = (DocInverterPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
					childFields[perField.consumer] = perField.consumer;
					endChildFields[perField.endConsumer] = perField.endConsumer;
				}
				
				childThreadsAndFields[perThread.consumer] = childFields;
				endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
			}
			
			consumer.Flush(childThreadsAndFields, state);
			endConsumer.Flush(endChildThreadsAndFields, state);
		}
开发者ID:carrie901,项目名称:mono,代码行数:33,代码来源:DocInverter.cs

示例3: Flush

		public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
		{
			lock (this)
			{
				
				if (tvx != null)
				{
					
					if (state.numDocsInStore > 0)
					// In case there are some final documents that we
					// didn't see (because they hit a non-aborting exception):
						Fill(state.numDocsInStore - docWriter.GetDocStoreOffset());
					
					tvx.Flush();
					tvd.Flush();
					tvf.Flush();
				}

                System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
				while (it.MoveNext())
				{
					System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
					System.Collections.IEnumerator it2 = ((System.Collections.ICollection) entry.Value).GetEnumerator();
					while (it2.MoveNext())
					{
						TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) ((System.Collections.DictionaryEntry) it2.Current).Key;
						perField.termsHashPerField.Reset();
						perField.ShrinkHash();
					}
					
					TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread) entry.Key;
					perThread.termsHashPerThread.Reset(true);
				}
			}
		}
开发者ID:Inzaghi2012,项目名称:teamlab.v7.5,代码行数:35,代码来源:TermVectorsTermsWriter.cs

示例4: Flush

		public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
		{
			lock (this)
			{
                // NOTE: it's possible that all documents seen in this segment
                // hit non-aborting exceptions, in which case we will
                // not have yet init'd the TermVectorsWriter.  This is
                // actually OK (unlike in the stored fields case)
                // because, although IieldInfos.hasVectors() will return
                // true, the TermVectorsReader gracefully handles
                // non-existence of the term vectors files.
				if (tvx != null)
				{
					
					if (state.numDocsInStore > 0)
					// In case there are some final documents that we
					// didn't see (because they hit a non-aborting exception):
						Fill(state.numDocsInStore - docWriter.GetDocStoreOffset());
					
					tvx.Flush();
					tvd.Flush();
					tvf.Flush();
				}

                System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
				while (it.MoveNext())
				{
					System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
					System.Collections.IEnumerator it2 = ((System.Collections.ICollection) entry.Value).GetEnumerator();
					while (it2.MoveNext())
					{
						TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) ((System.Collections.DictionaryEntry) it2.Current).Key;
						perField.termsHashPerField.Reset();
						perField.ShrinkHash();
					}
					
					TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread) entry.Key;
					perThread.termsHashPerThread.Reset(true);
				}
			}
		}
开发者ID:Mpdreamz,项目名称:lucene.net,代码行数:41,代码来源:TermVectorsTermsWriter.cs

示例5: WriteStringStringMap

 // map must be Map<String, String>
 public virtual void WriteStringStringMap(System.Collections.IDictionary map)
 {
     if (map == null)
     {
         WriteInt(0);
     }
     else
     {
         WriteInt(map.Count);
         System.Collections.IEnumerator it = new System.Collections.Hashtable(map).GetEnumerator();
         while (it.MoveNext())
         {
             System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
             WriteString((System.String) entry.Key);
             WriteString((System.String) entry.Value);
         }
     }
 }
开发者ID:BackupTheBerlios,项目名称:lyra2-svn,代码行数:19,代码来源:IndexOutput.cs

示例6: GetTermFreqVectors

		// get all vectors
		public override TermFreqVector[] GetTermFreqVectors(int n)
		{
			EnsureOpen();
			System.Collections.ArrayList results = new System.Collections.ArrayList();
            System.Collections.IEnumerator i = new System.Collections.Hashtable(fieldToReader).GetEnumerator();
			while (i.MoveNext())
			{
				System.Collections.DictionaryEntry e = (System.Collections.DictionaryEntry) i.Current;
				System.String field = (System.String) e.Key;
				IndexReader reader = (IndexReader) e.Value;
				TermFreqVector vector = reader.GetTermFreqVector(n, field);
				if (vector != null)
					results.Add(vector);
			}
			return (TermFreqVector[]) results.ToArray(typeof(TermFreqVector));
		}
开发者ID:VirtueMe,项目名称:ravendb,代码行数:17,代码来源:ParallelReader.cs

示例7: Flush

		// TODO: would be nice to factor out more of this, eg the
		// FreqProxFieldMergeState, and code to visit all Fields
		// under the same FieldInfo together, up into TermsHash*.
		// Other writers would presumably share alot of this...
		
		public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
		{
			
			// Gather all FieldData's that have postings, across all
			// ThreadStates
			System.Collections.ArrayList allFields = new System.Collections.ArrayList();

            System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
			while (it.MoveNext())
			{
				
				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
				
				System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
				
				System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
				
				while (fieldsIt.MoveNext())
				{
					FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
					if (perField.termsHashPerField.numPostings > 0)
						allFields.Add(perField);
				}
			}
			
			// Sort by field name
            allFields.Sort();
			int numAllFields = allFields.Count;
			
			// TODO: allow Lucene user to customize this consumer:
			FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
			/*
			Current writer chain:
			FormatPostingsFieldsConsumer
			-> IMPL: FormatPostingsFieldsWriter
			-> FormatPostingsTermsConsumer
			-> IMPL: FormatPostingsTermsWriter
			-> FormatPostingsDocConsumer
			-> IMPL: FormatPostingsDocWriter
			-> FormatPostingsPositionsConsumer
			-> IMPL: FormatPostingsPositionsWriter
			*/
			
			int start = 0;
			while (start < numAllFields)
			{
				FieldInfo fieldInfo = ((FreqProxTermsWriterPerField) allFields[start]).fieldInfo;
				System.String fieldName = fieldInfo.name;
				
				int end = start + 1;
				while (end < numAllFields && ((FreqProxTermsWriterPerField) allFields[end]).fieldInfo.name.Equals(fieldName))
					end++;
				
				FreqProxTermsWriterPerField[] fields = new FreqProxTermsWriterPerField[end - start];
				for (int i = start; i < end; i++)
				{
					fields[i - start] = (FreqProxTermsWriterPerField) allFields[i];
					
					// Aggregate the storePayload as seen by the same
					// field across multiple threads
					fieldInfo.storePayloads |= fields[i - start].hasPayloads;
				}
				
				// If this field has postings then add them to the
				// segment
				AppendPostings(fields, consumer);
				
				for (int i = 0; i < fields.Length; i++)
				{
					TermsHashPerField perField = fields[i].termsHashPerField;
					int numPostings = perField.numPostings;
					perField.Reset();
					perField.ShrinkHash(numPostings);
					fields[i].Reset();
				}
				
				start = end;
			}

            it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
			while (it.MoveNext())
			{
				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
				FreqProxTermsWriterPerThread perThread = (FreqProxTermsWriterPerThread) entry.Key;
				perThread.termsHashPerThread.Reset(true);
			}
			
			consumer.Finish();
		}
开发者ID:Inzaghi2012,项目名称:teamlab.v7.5,代码行数:94,代码来源:FreqProxTermsWriter.cs

示例8: Flush

		internal override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
		{
			lock (this)
			{
				System.Collections.IDictionary childThreadsAndFields = new System.Collections.Hashtable();
				System.Collections.IDictionary nextThreadsAndFields;
				
				if (nextTermsHash != null)
				{
					nextThreadsAndFields = new System.Collections.Hashtable();
				}
				else
					nextThreadsAndFields = null;

                System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
				while (it.MoveNext())
				{
					
					System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
					
					TermsHashPerThread perThread = (TermsHashPerThread) entry.Key;
					
					System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
					
					System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
                    System.Collections.Hashtable childFields = new System.Collections.Hashtable();
					System.Collections.Hashtable nextChildFields;
					
					if (nextTermsHash != null)
					{
                        nextChildFields = new System.Collections.Hashtable();
					}
					else
						nextChildFields = null;
					
					while (fieldsIt.MoveNext())
					{
						TermsHashPerField perField = (TermsHashPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
						childFields[perField.consumer] = perField.consumer;
						if (nextTermsHash != null)
							nextChildFields[perField.nextPerField] = perField.nextPerField;
					}
					
					childThreadsAndFields[perThread.consumer] = childFields;
					if (nextTermsHash != null)
						nextThreadsAndFields[perThread.nextPerThread] = nextChildFields;
				}
				
				consumer.Flush(childThreadsAndFields, state);
				
				ShrinkFreePostings(threadsAndFields, state);
				
				if (nextTermsHash != null)
					nextTermsHash.Flush(nextThreadsAndFields, state);
			}
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:56,代码来源:TermsHash.cs

示例9: Commit

			/// <summary> Commit all segment reader in the pool.</summary>
			/// <throws>  IOException </throws>
			internal virtual void  Commit()
			{
				lock (this)
				{
                    System.Collections.IEnumerator iter = new System.Collections.Hashtable(readerMap).GetEnumerator();
					while (iter.MoveNext())
					{
						System.Collections.DictionaryEntry ent = (System.Collections.DictionaryEntry) iter.Current;
						
						SegmentReader sr = (SegmentReader) ent.Value;
						if (sr.hasChanges)
						{
							System.Diagnostics.Debug.Assert(InfoIsLive(sr.GetSegmentInfo()));
							sr.DoCommit(null);
                            // Must checkpoint w/ deleter, because this
                            // segment reader will have created new _X_N.del
                            // file.
                            enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, false);
						}
					}
				}
			}
开发者ID:Mpdreamz,项目名称:lucene.net,代码行数:24,代码来源:IndexWriter.cs

示例10: AssertSameValues

		// make sure all the values in the maps match
		private void  AssertSameValues(System.Collections.Hashtable m1, System.Collections.Hashtable m2)
		{
			int n = m1.Count;
			int m = m2.Count;
			Assert.AreEqual(n, m);
			System.Collections.IEnumerator iter = new System.Collections.Hashtable().GetEnumerator();
			while (iter.MoveNext())
			{
				System.Object key = iter.Current;
				System.Object o1 = m1[key];
				System.Object o2 = m2[key];
				if (o1 is System.Single)
				{
					Assert.AreEqual((float) ((System.Single) o1), (float) ((System.Single) o2), 1e-6);
				}
				else
				{
					Assert.AreEqual(m1[key], m2[key]);
				}
			}
		}
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:22,代码来源:TestSort.cs

示例11: GetTermFreqVectors

 // get all vectors
 public override TermFreqVector[] GetTermFreqVectors(int n)
 {
     System.Collections.ArrayList results = new System.Collections.ArrayList();
     System.Collections.IEnumerator i = new System.Collections.Hashtable(fieldToReader).GetEnumerator();
     while (i.MoveNext())
     {
         System.Collections.DictionaryEntry e = (System.Collections.DictionaryEntry) i.Current;
         //IndexReader reader = (IndexReader) e.Key;         // {{Aroush}} which is right, those two lines?
         //System.String field = (System.String) e.Value;
         System.String field = (System.String) e.Key;        // {{Aroush-2.0}} or those two lines?
         IndexReader reader = (IndexReader) e.Value;
         TermFreqVector vector = reader.GetTermFreqVector(n, field);
         if (vector != null)
             results.Add(vector);
     }
     return (TermFreqVector[]) (results.ToArray(typeof(TermFreqVector)));
 }
开发者ID:kiichi7,项目名称:Search-Engine,代码行数:18,代码来源:ParallelReader.cs

示例12: ApplyDeletes

		// Apply buffered delete terms to this reader.
		private void  ApplyDeletes(System.Collections.Hashtable deleteTerms, IndexReader reader)
		{
			System.Collections.IEnumerator iter = new System.Collections.Hashtable(deleteTerms).GetEnumerator();
			while (iter.MoveNext())
			{
				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
				reader.DeleteDocuments((Term) entry.Key);
			}
		}
开发者ID:zweib730,项目名称:beagrep,代码行数:10,代码来源:IndexWriter.cs

示例13: ApplyDeletesSelectively

		// Apply buffered delete terms to the segment just flushed from ram
		// apply appropriately so that a delete term is only applied to
		// the documents buffered before it, not those buffered after it.
		private void  ApplyDeletesSelectively(System.Collections.Hashtable deleteTerms, IndexReader reader)
		{
			System.Collections.IEnumerator iter = new System.Collections.Hashtable(deleteTerms).GetEnumerator();
			while (iter.MoveNext())
			{
				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
				Term term = (Term) entry.Key;
				
				TermDocs docs = reader.TermDocs(term);
				if (docs != null)
				{
					int num = ((Num) entry.Value).GetNum();
					try
					{
						while (docs.Next())
						{
							int doc = docs.Doc();
							if (doc >= num)
							{
								break;
							}
							reader.DeleteDocument(doc);
						}
					}
					finally
					{
						docs.Close();
					}
				}
			}
		}
开发者ID:zweib730,项目名称:beagrep,代码行数:34,代码来源:IndexWriter.cs

示例14: TestMapper

		public virtual void  TestMapper()
		{
			TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
			Assert.IsTrue(reader != null);
			SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
			reader.Get(0, mapper);
			System.Collections.Generic.SortedDictionary<Object,Object> set_Renamed = mapper.GetTermVectorEntrySet();
			Assert.IsTrue(set_Renamed != null, "set is null and it shouldn't be");
			//three fields, 4 terms, all terms are the same
			Assert.IsTrue(set_Renamed.Count == 4, "set Size: " + set_Renamed.Count + " is not: " + 4);
			//Check offsets and positions
			for (System.Collections.IEnumerator iterator = set_Renamed.Keys.GetEnumerator(); iterator.MoveNext(); )
			{
				TermVectorEntry tve = (TermVectorEntry) iterator.Current;
				Assert.IsTrue(tve != null, "tve is null and it shouldn't be");
				Assert.IsTrue(tve.GetOffsets() != null, "tve.getOffsets() is null and it shouldn't be");
				Assert.IsTrue(tve.GetPositions() != null, "tve.getPositions() is null and it shouldn't be");
			}
			
			mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
			reader.Get(1, mapper);
			set_Renamed = mapper.GetTermVectorEntrySet();
			Assert.IsTrue(set_Renamed != null, "set is null and it shouldn't be");
			//three fields, 4 terms, all terms are the same
			Assert.IsTrue(set_Renamed.Count == 4, "set Size: " + set_Renamed.Count + " is not: " + 4);
			//Should have offsets and positions b/c we are munging all the fields together
			for (System.Collections.IEnumerator iterator = set_Renamed.Keys.GetEnumerator(); iterator.MoveNext(); )
			{
				TermVectorEntry tve = (TermVectorEntry) iterator.Current;
				Assert.IsTrue(tve != null, "tve is null and it shouldn't be");
				Assert.IsTrue(tve.GetOffsets() != null, "tve.getOffsets() is null and it shouldn't be");
				Assert.IsTrue(tve.GetPositions() != null, "tve.getPositions() is null and it shouldn't be");
			}
			
			
			FieldSortedTermVectorMapper fsMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
			reader.Get(0, fsMapper);
			System.Collections.IDictionary map = fsMapper.GetFieldToTerms();
			Assert.IsTrue(map.Count == testFields.Length, "map Size: " + map.Count + " is not: " + testFields.Length);
			for (System.Collections.IEnumerator iterator = new System.Collections.Hashtable(map).GetEnumerator(); iterator.MoveNext(); )
			{
				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iterator.Current;
				System.Collections.Generic.SortedDictionary<Object,Object> sortedSet = (System.Collections.Generic.SortedDictionary<Object,Object>)entry.Value;
				Assert.IsTrue(sortedSet.Count == 4, "sortedSet Size: " + sortedSet.Count + " is not: " + 4);
				for (System.Collections.IEnumerator inner = sortedSet.Keys.GetEnumerator(); inner.MoveNext(); )
				{
					TermVectorEntry tve = (TermVectorEntry) inner.Current;
					Assert.IsTrue(tve != null, "tve is null and it shouldn't be");
					//Check offsets and positions.
					Assert.IsTrue(tve != null, "tve is null and it shouldn't be");
					System.String field = tve.GetField();
					if (field.Equals(testFields[0]))
					{
						//should have offsets
						
						Assert.IsTrue(tve.GetOffsets() != null, "tve.getOffsets() is null and it shouldn't be");
						Assert.IsTrue(tve.GetPositions() != null, "tve.getPositions() is null and it shouldn't be");
					}
					else if (field.Equals(testFields[1]))
					{
						//should not have offsets
						
						Assert.IsTrue(tve.GetOffsets() == null, "tve.getOffsets() is not null and it shouldn't be");
						Assert.IsTrue(tve.GetPositions() == null, "tve.getPositions() is not null and it shouldn't be");
					}
				}
			}
			//Try mapper that ignores offs and positions
			fsMapper = new FieldSortedTermVectorMapper(true, true, new TermVectorEntryFreqSortedComparator());
			reader.Get(0, fsMapper);
			map = fsMapper.GetFieldToTerms();
			Assert.IsTrue(map.Count == testFields.Length, "map Size: " + map.Count + " is not: " + testFields.Length);
			for (System.Collections.IEnumerator iterator = new System.Collections.Hashtable(map).GetEnumerator(); iterator.MoveNext(); )
			{
				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iterator.Current;
				System.Collections.Generic.SortedDictionary<Object,Object> sortedSet = (System.Collections.Generic.SortedDictionary<Object,Object>)entry.Value;
				Assert.IsTrue(sortedSet.Count == 4, "sortedSet Size: " + sortedSet.Count + " is not: " + 4);
				for (System.Collections.IEnumerator inner = sortedSet.Keys.GetEnumerator(); inner.MoveNext(); )
				{
					TermVectorEntry tve = (TermVectorEntry) inner.Current;
					Assert.IsTrue(tve != null, "tve is null and it shouldn't be");
					//Check offsets and positions.
					Assert.IsTrue(tve != null, "tve is null and it shouldn't be");
					System.String field = tve.GetField();
					if (field.Equals(testFields[0]))
					{
						//should have offsets
						
						Assert.IsTrue(tve.GetOffsets() == null, "tve.getOffsets() is null and it shouldn't be");
						Assert.IsTrue(tve.GetPositions() == null, "tve.getPositions() is null and it shouldn't be");
					}
					else if (field.Equals(testFields[1]))
					{
						//should not have offsets
						
						Assert.IsTrue(tve.GetOffsets() == null, "tve.getOffsets() is not null and it shouldn't be");
						Assert.IsTrue(tve.GetPositions() == null, "tve.getPositions() is not null and it shouldn't be");
					}
				}
			}
//.........这里部分代码省略.........
开发者ID:VirtueMe,项目名称:ravendb,代码行数:101,代码来源:TestTermVectorsReader.cs

示例15: Clear

			/// <summary>Forcefully clear changes for the specifed segments,
			/// and remove from the pool.   This is called on succesful merge. 
			/// </summary>
			internal virtual void  Clear(SegmentInfos infos)
			{
				lock (this)
				{
					if (infos == null)
					{
                        System.Collections.IEnumerator iter = new System.Collections.Hashtable(readerMap).GetEnumerator();
						while (iter.MoveNext())
						{
							System.Collections.DictionaryEntry ent = (System.Collections.DictionaryEntry) iter.Current;
							((SegmentReader) ent.Value).hasChanges = false;
						}
					}
					else
					{
						int numSegments = infos.Count;
						for (int i = 0; i < numSegments; i++)
						{
							SegmentInfo info = infos.Info(i);
							if (readerMap.Contains(info))
							{
								((SegmentReader) readerMap[info]).hasChanges = false;
							}
						}
					}
				}
			}
开发者ID:Mpdreamz,项目名称:lucene.net,代码行数:30,代码来源:IndexWriter.cs


注:本文中的System.Collections.Hashtable.MoveNext方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。