当前位置: 首页>>代码示例>>C#>>正文


C# Lucene.Net.Documents.Document.GetFields方法代码示例

本文整理汇总了C#中Lucene.Net.Documents.Document.GetFields方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Documents.Document.GetFields方法的具体用法?C# Lucene.Net.Documents.Document.GetFields怎么用?C# Lucene.Net.Documents.Document.GetFields使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Lucene.Net.Documents.Document的用法示例。


在下文中一共展示了Lucene.Net.Documents.Document.GetFields方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: Add

		/// <summary>Adds field info for a Document. </summary>
		public void  Add(Document doc)
		{
			System.Collections.IList fields = doc.GetFields();
			System.Collections.IEnumerator fieldIterator = fields.GetEnumerator();
			while (fieldIterator.MoveNext())
			{
				Fieldable field = (Fieldable) fieldIterator.Current;
				Add(field.Name(), field.IsIndexed(), field.IsTermVectorStored(), field.IsStorePositionWithTermVector(), field.IsStoreOffsetWithTermVector(), field.GetOmitNorms());
			}
		}
开发者ID:zweib730,项目名称:beagrep,代码行数:11,代码来源:FieldInfos.cs

示例2: InvertDocument

		// Tokenizes the fields of a document into Postings.
		private void  InvertDocument(Document doc)
		{
			System.Collections.IEnumerator fieldIterator = doc.GetFields().GetEnumerator();
			while (fieldIterator.MoveNext())
			{
				Fieldable field = (Fieldable) fieldIterator.Current;
				System.String fieldName = field.Name();
				int fieldNumber = fieldInfos.FieldNumber(fieldName);
				
				int length = fieldLengths[fieldNumber]; // length of field
				int position = fieldPositions[fieldNumber]; // position in field
				if (length > 0)
					position += analyzer.GetPositionIncrementGap(fieldName);
				int offset = fieldOffsets[fieldNumber]; // offset field
				
				if (field.IsIndexed())
				{
					if (!field.IsTokenized())
					{
						// un-tokenized field
						System.String stringValue = field.StringValue();
						if (field.IsStoreOffsetWithTermVector())
							AddPosition(fieldName, stringValue, position++, new TermVectorOffsetInfo(offset, offset + stringValue.Length));
						else
							AddPosition(fieldName, stringValue, position++, null);
						offset += stringValue.Length;
						length++;
					}
					else
					{
						System.IO.TextReader reader; // find or make Reader
						if (field.ReaderValue() != null)
							reader = field.ReaderValue();
						else if (field.StringValue() != null)
							reader = new System.IO.StringReader(field.StringValue());
						else
							throw new System.ArgumentException("field must have either String or Reader value");
						
						// Tokenize field and add to postingTable
						TokenStream stream = analyzer.TokenStream(fieldName, reader);
						try
						{
							Token lastToken = null;
							for (Token t = stream.Next(); t != null; t = stream.Next())
							{
								position += (t.GetPositionIncrement() - 1);
								
								if (field.IsStoreOffsetWithTermVector())
									AddPosition(fieldName, t.TermText(), position++, new TermVectorOffsetInfo(offset + t.StartOffset(), offset + t.EndOffset()));
								else
									AddPosition(fieldName, t.TermText(), position++, null);
								
								lastToken = t;
								if (++length >= maxFieldLength)
								{
									if (infoStream != null)
										infoStream.WriteLine("maxFieldLength " + maxFieldLength + " reached, ignoring following tokens");
									break;
								}
							}
							
							if (lastToken != null)
								offset += lastToken.EndOffset() + 1;
						}
						finally
						{
							stream.Close();
						}
					}
					
					fieldLengths[fieldNumber] = length; // save field length
					fieldPositions[fieldNumber] = position; // save field position
					fieldBoosts[fieldNumber] *= field.GetBoost();
					fieldOffsets[fieldNumber] = offset;
				}
			}
		}
开发者ID:zweib730,项目名称:beagrep,代码行数:78,代码来源:DocumentWriter.cs

示例3: NumFields

		public static int NumFields(Document doc)
		{
			return doc.GetFields().Count;
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:4,代码来源:DocHelper.cs

示例4: AddDocument

		internal void  AddDocument(Document doc)
		{
			indexStream.WriteLong(fieldsStream.GetFilePointer());
			
			int storedCount = 0;
			System.Collections.IEnumerator fieldIterator = doc.GetFields().GetEnumerator();
			while (fieldIterator.MoveNext())
			{
				Fieldable field = (Fieldable) fieldIterator.Current;
				if (field.IsStored())
					storedCount++;
			}
			fieldsStream.WriteVInt(storedCount);
			
			fieldIterator = doc.GetFields().GetEnumerator();
			while (fieldIterator.MoveNext())
			{
				Fieldable field = (Fieldable) fieldIterator.Current;
				if (field.IsStored())
					WriteField(fieldInfos.FieldInfo(field.Name()), field);
			}
		}
开发者ID:Inzaghi2012,项目名称:teamlab.v7.5,代码行数:22,代码来源:FieldsWriter.cs

示例5: TestStoredFieldsOrder

		public virtual void  TestStoredFieldsOrder()
		{
			Directory d = new MockRAMDirectory();
			IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
			Document doc = new Document();
			doc.Add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
			doc.Add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
			doc.Add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
			w.AddDocument(doc);
			IndexReader r = w.GetReader();
			doc = r.Document(0);
			System.Collections.IEnumerator it = doc.GetFields().GetEnumerator();
			Assert.IsTrue(it.MoveNext());
			Field f = (Field) it.Current;
			Assert.AreEqual(f.Name(), "zzz");
			Assert.AreEqual(f.StringValue(), "a b c");
			
			Assert.IsTrue(it.MoveNext());
			f = (Field) it.Current;
			Assert.AreEqual(f.Name(), "aaa");
			Assert.AreEqual(f.StringValue(), "a b c");
			
			Assert.IsTrue(it.MoveNext());
			f = (Field) it.Current;
			Assert.AreEqual(f.Name(), "zzz");
			Assert.AreEqual(f.StringValue(), "1 2 3");
			Assert.IsFalse(it.MoveNext());
			r.Close();
			w.Close();
			d.Close();
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:31,代码来源:TestIndexWriter.cs

示例6: TestBinaryFields

		public virtual void  TestBinaryFields()
		{
			Directory dir = new RAMDirectory();
			byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
			
			IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
			
			for (int i = 0; i < 10; i++)
			{
				AddDoc(writer, "document number " + (i + 1));
				AddDocumentWithFields(writer);
				AddDocumentWithDifferentFields(writer);
				AddDocumentWithTermVectorFields(writer);
			}
			writer.Close();
			writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
			Document doc = new Document();
			doc.Add(new Field("bin1", bin, Field.Store.YES));
			doc.Add(new Field("bin2", bin, Field.Store.COMPRESS));
			doc.Add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
			writer.AddDocument(doc);
			writer.Close();
			IndexReader reader = IndexReader.Open(dir);
			doc = reader.Document(reader.MaxDoc() - 1);
			Field[] fields = doc.GetFields("bin1");
			Assert.IsNotNull(fields);
			Assert.AreEqual(1, fields.Length);
			Field b1 = fields[0];
			Assert.IsTrue(b1.IsBinary());
			byte[] data1 = b1.GetBinaryValue();
			Assert.AreEqual(bin.Length, b1.GetBinaryLength());
			for (int i = 0; i < bin.Length; i++)
			{
				Assert.AreEqual(bin[i], data1[i + b1.GetBinaryOffset()]);
			}
			fields = doc.GetFields("bin2");
			Assert.IsNotNull(fields);
			Assert.AreEqual(1, fields.Length);
			b1 = fields[0];
			Assert.IsTrue(b1.IsBinary());
			data1 = b1.GetBinaryValue();
			Assert.AreEqual(bin.Length, b1.GetBinaryLength());
			for (int i = 0; i < bin.Length; i++)
			{
				Assert.AreEqual(bin[i], data1[i + b1.GetBinaryOffset()]);
			}
			System.Collections.Hashtable lazyFields = new System.Collections.Hashtable();
			SupportClass.CollectionsHelper.AddIfNotContains(lazyFields, "bin1");
			FieldSelector sel = new SetBasedFieldSelector(new System.Collections.Hashtable(), lazyFields);
			doc = reader.Document(reader.MaxDoc() - 1, sel);
			Fieldable[] fieldables = doc.GetFieldables("bin1");
			Assert.IsNotNull(fieldables);
			Assert.AreEqual(1, fieldables.Length);
			Fieldable fb1 = fieldables[0];
			Assert.IsTrue(fb1.IsBinary());
			Assert.AreEqual(bin.Length, fb1.GetBinaryLength());
			data1 = fb1.GetBinaryValue();
			Assert.AreEqual(bin.Length, fb1.GetBinaryLength());
			for (int i = 0; i < bin.Length; i++)
			{
				Assert.AreEqual(bin[i], data1[i + fb1.GetBinaryOffset()]);
			}
			reader.Close();
			// force optimize
			
			
			writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
			writer.Optimize();
			writer.Close();
			reader = IndexReader.Open(dir);
			doc = reader.Document(reader.MaxDoc() - 1);
			fields = doc.GetFields("bin1");
			Assert.IsNotNull(fields);
			Assert.AreEqual(1, fields.Length);
			b1 = fields[0];
			Assert.IsTrue(b1.IsBinary());
			data1 = b1.GetBinaryValue();
			Assert.AreEqual(bin.Length, b1.GetBinaryLength());
			for (int i = 0; i < bin.Length; i++)
			{
				Assert.AreEqual(bin[i], data1[i + b1.GetBinaryOffset()]);
			}
			fields = doc.GetFields("bin2");
			Assert.IsNotNull(fields);
			Assert.AreEqual(1, fields.Length);
			b1 = fields[0];
			Assert.IsTrue(b1.IsBinary());
			data1 = b1.GetBinaryValue();
			Assert.AreEqual(bin.Length, b1.GetBinaryLength());
			for (int i = 0; i < bin.Length; i++)
			{
				Assert.AreEqual(bin[i], data1[i + b1.GetBinaryOffset()]);
			}
			reader.Close();
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:95,代码来源:TestIndexReader.cs

示例7: Init

			/// <summary>Initializes shared state for this new document </summary>
			internal void  Init(Document doc, int docID)
			{

                System.Diagnostics.Debug.Assert(!isIdle);
                System.Diagnostics.Debug.Assert(Enclosing_Instance.writer.TestPoint("DocumentsWriter.ThreadState.init start"));
				
				this.docID = docID;
				docBoost = doc.GetBoost();
				numStoredFields = 0;
				numFieldData = 0;
				numVectorFields = 0;
				maxTermPrefix = null;
				
				System.Diagnostics.Debug.Assert(0 == fdtLocal.Length());
				System.Diagnostics.Debug.Assert(0 == fdtLocal.GetFilePointer());
				System.Diagnostics.Debug.Assert(0 == tvfLocal.Length());
				System.Diagnostics.Debug.Assert(0 == tvfLocal.GetFilePointer());
				int thisFieldGen = fieldGen++;
				
				System.Collections.IList docFields = doc.GetFields();
				int numDocFields = docFields.Count;
				bool docHasVectors = false;
				
				// Absorb any new fields first seen in this document.
				// Also absorb any changes to fields we had already
				// seen before (eg suddenly turning on norms or
				// vectors, etc.):
				
				for (int i = 0; i < numDocFields; i++)
				{
					Fieldable field = (Fieldable) docFields[i];
					
					FieldInfo fi = Enclosing_Instance.fieldInfos.Add(field.Name(), field.IsIndexed(), field.IsTermVectorStored(), field.IsStorePositionWithTermVector(), field.IsStoreOffsetWithTermVector(), field.GetOmitNorms(), false);
					if (fi.isIndexed && !fi.omitNorms)
					{
						// Maybe grow our buffered norms
						if (Enclosing_Instance.norms.Length <= fi.number)
						{
							int newSize = (int) ((1 + fi.number) * 1.25);
							BufferedNorms[] newNorms = new BufferedNorms[newSize];
							Array.Copy(Enclosing_Instance.norms, 0, newNorms, 0, Enclosing_Instance.norms.Length);
							Enclosing_Instance.norms = newNorms;
						}
						
						if (Enclosing_Instance.norms[fi.number] == null)
							Enclosing_Instance.norms[fi.number] = new BufferedNorms();
						
						Enclosing_Instance.hasNorms = true;
					}
					
					// Make sure we have a FieldData allocated
					int hashPos = fi.name.GetHashCode() & fieldDataHashMask;
					FieldData fp = fieldDataHash[hashPos];
					while (fp != null && !fp.fieldInfo.name.Equals(fi.name))
						fp = fp.next;
					
					if (fp == null)
					{
						
						fp = new FieldData(this, fi);
						fp.next = fieldDataHash[hashPos];
						fieldDataHash[hashPos] = fp;
						
						if (numAllFieldData == allFieldDataArray.Length)
						{
							int newSize = (int) (allFieldDataArray.Length * 1.5);
							int newHashSize = fieldDataHash.Length * 2;
							
							FieldData[] newArray = new FieldData[newSize];
							FieldData[] newHashArray = new FieldData[newHashSize];
							Array.Copy(allFieldDataArray, 0, newArray, 0, numAllFieldData);
							
							// Rehash
							fieldDataHashMask = newSize - 1;
							for (int j = 0; j < fieldDataHash.Length; j++)
							{
								FieldData fp0 = fieldDataHash[j];
								while (fp0 != null)
								{
									hashPos = fp0.fieldInfo.name.GetHashCode() & fieldDataHashMask;
									FieldData nextFP0 = fp0.next;
									fp0.next = newHashArray[hashPos];
									newHashArray[hashPos] = fp0;
									fp0 = nextFP0;
								}
							}
							
							allFieldDataArray = newArray;
							fieldDataHash = newHashArray;
						}
						allFieldDataArray[numAllFieldData++] = fp;
					}
					else
					{
						System.Diagnostics.Debug.Assert(fp.fieldInfo == fi);
					}
					
					if (thisFieldGen != fp.lastGen)
					{
//.........这里部分代码省略.........
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:101,代码来源:DocumentsWriter.cs

示例8: AddDocument

		internal void  AddDocument(Document doc)
		{
			indexStream.WriteLong(fieldsStream.GetFilePointer());
			
			int storedCount = 0;
            foreach(Fieldable field in doc.GetFields())
            {			
				if (field.IsStored())
					storedCount++;
			}
			fieldsStream.WriteVInt(storedCount);
			
			foreach(Fieldable field in doc.GetFields())
            {
				if (field.IsStored())
					WriteField(fieldInfos.FieldInfo(field.Name()), field);
			}
		}
开发者ID:jhuntsman,项目名称:FlexNet,代码行数:18,代码来源:FieldsWriter.cs

示例9: AddDocument

		internal void  AddDocument(Document doc)
		{
			indexStream.WriteLong(fieldsStream.GetFilePointer());
			
			int storedCount = 0;
			System.Collections.IEnumerator fieldIterator = doc.GetFields().GetEnumerator();
			while (fieldIterator.MoveNext())
			{
				Fieldable field = (Fieldable) fieldIterator.Current;
				if (field.IsStored())
					storedCount++;
			}
			fieldsStream.WriteVInt(storedCount);
			
			fieldIterator = doc.GetFields().GetEnumerator();
			while (fieldIterator.MoveNext())
			{
				Fieldable field = (Fieldable) fieldIterator.Current;
				// if the field as an instanceof FieldsReader.FieldForMerge, we're in merge mode
				// and field.binaryValue() already returns the compressed value for a field
				// with isCompressed()==true, so we disable compression in that case
				bool disableCompression = (field is FieldsReader.FieldForMerge);
				if (field.IsStored())
				{
					fieldsStream.WriteVInt(fieldInfos.FieldNumber(field.Name()));
					
					byte bits = 0;
					if (field.IsTokenized())
						bits |= FieldsWriter.FIELD_IS_TOKENIZED;
					if (field.IsBinary())
						bits |= FieldsWriter.FIELD_IS_BINARY;
					if (field.IsCompressed())
						bits |= FieldsWriter.FIELD_IS_COMPRESSED;
					
					fieldsStream.WriteByte(bits);
					
					if (field.IsCompressed())
					{
						// compression is enabled for the current field
						byte[] data = null;
						
						if (disableCompression)
						{
							// optimized case for merging, the data
							// is already compressed
							data = field.BinaryValue();
						}
						else
						{
							// check if it is a binary field
							if (field.IsBinary())
							{
								data = Compress(field.BinaryValue());
							}
							else
							{
								data = Compress(System.Text.Encoding.GetEncoding("UTF-8").GetBytes(field.StringValue()));
							}
						}
						int len = data.Length;
						fieldsStream.WriteVInt(len);
						fieldsStream.WriteBytes(data, len);
					}
					else
					{
						// compression is disabled for the current field
						if (field.IsBinary())
						{
							byte[] data = field.BinaryValue();
							int len = data.Length;
							fieldsStream.WriteVInt(len);
							fieldsStream.WriteBytes(data, len);
						}
						else
						{
							fieldsStream.WriteString(field.StringValue());
						}
					}
				}
			}
		}
开发者ID:zweib730,项目名称:beagrep,代码行数:81,代码来源:FieldsWriter.cs


注:本文中的Lucene.Net.Documents.Document.GetFields方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。