当前位置: 首页>>代码示例>>C#>>正文


C# IndexWriter.Flush方法代码示例

本文整理汇总了C#中Lucene.Net.Index.IndexWriter.Flush方法的典型用法代码示例。如果您正苦于以下问题:C# IndexWriter.Flush方法的具体用法?C# IndexWriter.Flush怎么用?C# IndexWriter.Flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Lucene.Net.Index.IndexWriter的用法示例。


在下文中一共展示了IndexWriter.Flush方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: AddOrUpdateDocuments

        public void AddOrUpdateDocuments(params CmsDocument[] documents)
        {
            DeleteDocuments(documents);
            using (var writer = new IndexWriter(_Directory, _Analyzer, false, new IndexWriter.MaxFieldLength(1024 * 1024 * 4)))
            {
                foreach (var document in documents)
                {
                    if (document.Id == Guid.Empty)
                        throw new ArgumentOutOfRangeException("Attempt to index transient document: " + document.Title);

                    var doc = new Document();
                    doc.Add(new Field(CmsDocumentField.Id.ToString(), document.Id.ToString("b"), Field.Store.YES, Field.Index.NOT_ANALYZED));
                    if (!String.IsNullOrEmpty(document.Title))
                        doc.Add(new Field(CmsDocumentField.Title.ToString(), document.Title, Field.Store.YES, Field.Index.ANALYZED));
                    foreach (var tag in document.Tags)
                    {
                        doc.Add(new Field(CmsDocumentField.Tag.ToString(), tag, Field.Store.YES, Field.Index.ANALYZED));
                    }
                    foreach (var partValue in document.Parts.Select(p => p.Value))
                    {
                        if(!String.IsNullOrEmpty(partValue))
                            doc.Add(new Field(CmsDocumentField.Value.ToString(), partValue, Field.Store.NO, Field.Index.ANALYZED));
                    }
                    writer.AddDocument(doc);
                }
                writer.Flush(true, true, true);
            }
        }
开发者ID:Steinerd,项目名称:MicroCms,代码行数:28,代码来源:LuceneCmsSearchService.cs

示例2: TestSimpleSkip

		public virtual void  TestSimpleSkip()
		{
			RAMDirectory dir = new RAMDirectory();
			IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
			Term term = new Term("test", "a");
			for (int i = 0; i < 5000; i++)
			{
				Document d1 = new Document();
				d1.Add(new Field(term.Field(), term.Text(), Field.Store.NO, Field.Index.ANALYZED));
				writer.AddDocument(d1);
			}
			writer.Flush();
			writer.Optimize();
			writer.Close();
			
			IndexReader reader = SegmentReader.GetOnlySegmentReader(dir);
			SegmentTermPositions tp = (SegmentTermPositions) reader.TermPositions();
			tp.freqStream_ForNUnit = new CountingStream(this, tp.freqStream_ForNUnit);
			
			for (int i = 0; i < 2; i++)
			{
				counter = 0;
				tp.Seek(term);
				
				CheckSkipTo(tp, 14, 185); // no skips
				CheckSkipTo(tp, 17, 190); // one skip on level 0
				CheckSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
				
				// this test would fail if we had only one skip level,
				// because than more bytes would be read from the freqStream
				CheckSkipTo(tp, 4800, 250); // one skip on level 2
			}
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:33,代码来源:TestMultiLevelSkipList.cs

示例3: btnFolder_Click

        private void btnFolder_Click(object sender, EventArgs e)
        {
            FolderBrowserDialog dia = new FolderBrowserDialog();
            DialogResult res = dia.ShowDialog();
            if (res != System.Windows.Forms.DialogResult.OK)
            {
                return;
            }

            FSDirectory dir = FSDirectory.GetDirectory(Environment.CurrentDirectory + "\\LuceneIndex");
            //Lucene.Net.Store.RAMDirectory dir = new RAMDirectory();
            Lucene.Net.Analysis.Standard.StandardAnalyzer an = new Lucene.Net.Analysis.Standard.StandardAnalyzer();
            IndexWriter wr = new IndexWriter(dir, an,true);
            IStemmer stemmer = new EnglishStemmer();
            DirectoryInfo diMain = new DirectoryInfo(dia.SelectedPath);
            foreach(FileInfo fi in diMain.GetFiles()){
                Document doc = new Document();
                doc.Add(new Field("title", fi.Name,Field.Store.YES, Field.Index.NO));
                //doc.Add(new Field("text", File.ReadAllText(fi.FullName),Field.Store.YES, Field.Index.TOKENIZED,Field.TermVector.YES));
                doc.Add(new Field("text", PerformStemming(stemmer,NLPToolkit.Tokenizer.TokenizeNow(File.ReadAllText(fi.FullName)).ToArray()), Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
                wr.AddDocument(doc);
            }
            wr.Optimize();
            wr.Flush();
            wr.Close();
            dir.Close();

            IndexReader reader = IndexReader.Open(dir);
            for (int i = 0; i < reader.MaxDoc(); i++)
            {
                if (reader.IsDeleted(i))
                    continue;

                Document doc = reader.Document(i);
                String docId = doc.Get("docId");
                foreach (TermFreqVector vector in reader.GetTermFreqVectors(i))
                {
                    foreach(string term in vector.GetTerms()){
                        Console.WriteLine(term);
                    }
                }
                // do something with docId here...
            }
            //IndexSearcher search = new IndexSearcher(wr.GetReader());

            //MoreLikeThis mlt = new MoreLikeThis(wr.GetReader());
            //FileInfo fitarget = new FileInfo(@"C:\Users\peacemaker\Desktop\TestNoBitcoin\test.txt");
            //Query query = mlt.Like(fitarget);

            //var hits = search.Search(query, int.MaxValue);
            //foreach (ScoreDoc doc in hits.ScoreDocs)
            //{
            //    textBox1.Text += doc.Score + Environment.NewLine;
            //}
        }
开发者ID:peac3maker,项目名称:TwitterSentimentAnalysis,代码行数:55,代码来源:Form1.cs

示例4: doWithWriter

 private static void doWithWriter(string indexRoot, Action<IndexWriter> actionWithWriter, Analyzer analyzer, bool recreateIndex = false)
 {
     var indexDirectory = FSDirectory.Open(indexRoot);
     if(analyzer == null)
         analyzer = new StandardAnalyzer(Version.LUCENE_30);
     var writer = new IndexWriter(indexDirectory, analyzer, recreateIndex, IndexWriter.MaxFieldLength.UNLIMITED);
     actionWithWriter(writer);
     //writer.Commit();
     writer.Optimize();
     writer.Flush(true, true, true);
     writer.Dispose();
 }
开发者ID:kallex,项目名称:ServiceCatalogueIndexer,代码行数:12,代码来源:FieldIndexSupport.cs

示例5: Initialize

        private void Initialize()
        {
            _directory = new RAMDirectory();
            _analyzer = new StandardAnalyzer(Version.LUCENE_30);

            using (var writer = new IndexWriter(_directory, _analyzer, IndexWriter.MaxFieldLength.UNLIMITED))
            {
                StoreDocument("The lazy fox jumps over the quick brown dog", writer);
                StoreDocument("The quick brown fox jumps over the lazy dog", writer);

                writer.Optimize();
                writer.Flush(true, true, true);
            }
        }
开发者ID:wmeints,项目名称:Presentations,代码行数:14,代码来源:SpanQueryTestFixture.cs

示例6: CreateIndex

        public static void CreateIndex(MongoCollection<TweetItem> collection)
        {
            DateTime dtmFirst = new DateTime(2014, 05, 17, 0, 0, 0);
            DateTime dtmLast = new DateTime(2014, 05, 17, 23, 59, 59);
            FSDirectory dir = FSDirectory.GetDirectory(Environment.CurrentDirectory + "\\LuceneIndex");
            //Lucene.Net.Store.RAMDirectory dir = new RAMDirectory();
            Lucene.Net.Analysis.StopAnalyzer an = new Lucene.Net.Analysis.StopAnalyzer();
            IndexWriter wr = new IndexWriter(dir, an, true);
            IStemmer stemmer = new EnglishStemmer();
            while (dtmFirst.Date <= DateTime.Now.Date)
            {
                var query = Query<TweetItem>.Where(t => t.CreationDate >= dtmFirst && t.CreationDate <= dtmLast);
                List<TweetItem> value = collection.Find(query).ToList();
                //DirectoryInfo diMain = new DirectoryInfo(dia.SelectedPath);               
                using (var client = new HttpClient())
                {
                    client.BaseAddress = new Uri("http://www.datasciencetoolkit.org/text2sentiment");
                    client.DefaultRequestHeaders.Accept.Clear();
                    client.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));

                    foreach (TweetItem tweet in value)
                    {
                        Document doc = new Document();
                        
                        //SentimentResult res = await GetSentiment(tweet.Text, client);                        
                        string stemmedtext = PerformStemming(stemmer, NLPToolkit.Tokenizer.TokenizeNow(tweet.Text).ToArray());
                        var scores = classifier.Classify(stemmedtext,DragonHelper.DragonHelper.ExcludeList);
                        string positiveSentiment = string.Empty;
                        string negativeSentiment = string.Empty;                        
                        positiveSentiment = scores["Positive"].ToString();
                        negativeSentiment = scores["Negative"].ToString();
                        doc.Add(new Field("id", tweet._id.ToString(), Field.Store.YES, Field.Index.NO));
                        doc.Add(new Field("created", tweet.CreationDate.ToString(), Field.Store.YES, Field.Index.NO));
                        doc.Add(new Field("user", tweet.User, Field.Store.YES, Field.Index.NO));                        
                        doc.Add(new Field("text", stemmedtext, Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));                        
                        doc.Add(new Field("possentiment", positiveSentiment , Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
                        doc.Add(new Field("negsentiment", negativeSentiment, Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));

                        wr.AddDocument(doc);
                    }
                }
                dtmFirst = dtmFirst.AddDays(1);
                dtmLast = dtmLast.AddDays(1);
            }
            wr.Optimize();
            wr.Flush();
            wr.Close();
            dir.Close();
        }
开发者ID:peac3maker,项目名称:TwitterSentimentAnalysis,代码行数:49,代码来源:Program.cs

示例7: Main

        static void Main(string[] args)
        {
            //Setup indexer

            Directory directory = FSDirectory.GetDirectory("LuceneIndex");
            Analyzer analyzer = new StandardAnalyzer();
            IndexWriter writer = new IndexWriter(directory, analyzer);

            IndexReader red = IndexReader.Open(directory);
            int totDocs = red.MaxDoc();
            red.Close();

            //Add documents to the index
            string text = String.Empty;
            Console.WriteLine("Enter the text you want to add to the index:");
            Console.Write(">");
            int txts = totDocs;
            int j = 0;
            while ((text = Console.ReadLine()) != String.Empty)
            {
                AddTextToIndex(txts++, text, writer);
                j++;
                Console.Write(">");
            }

            writer.Optimize();
            //Close the writer
            writer.Flush();
            writer.Close();

            Console.WriteLine(j + " lines added, "+txts+" documents total");

            //Setup searcher
            IndexSearcher searcher = new IndexSearcher(directory);
            QueryParser parser = new QueryParser("postBody", analyzer);

            Console.WriteLine("Enter the search string:");
            Console.Write(">");

            while ((text = Console.ReadLine()) != String.Empty)
            {
                Search(text, searcher, parser);
                Console.Write(">");
            }

            //Clean up everything
            searcher.Close();
            directory.Close();
        }
开发者ID:aragorn55,项目名称:codeclimber,代码行数:49,代码来源:Program.cs

示例8: SetupIndex

 protected static IndexReader SetupIndex()
 {
     var directory = new RAMDirectory();
     var writer = new IndexWriter(directory, new StandardAnalyzer(Version.LUCENE_30), true,
         IndexWriter.MaxFieldLength.LIMITED);
     for (var i = 0; i < 50000; i++)
         writer.AddDocument(new Document()
             .AddField("title", Guid.NewGuid().ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED)
             .AddField("color", GenerateColor(), Field.Store.YES, Field.Index.NOT_ANALYZED)
             .AddField("type", GenerateFood(), Field.Store.YES, Field.Index.NOT_ANALYZED)
             .AddField("type", GenerateFruit(), Field.Store.YES, Field.Index.NOT_ANALYZED)
             .AddField("price", "10", Field.Store.YES, Field.Index.NOT_ANALYZED));
     writer.Flush(true, true, true);
     writer.Optimize();
     writer.Commit();
     return IndexReader.Open(directory, true);
 }
开发者ID:modulexcite,项目名称:MultiFacetLuceneNet,代码行数:17,代码来源:Program.cs

示例9: Initialize

        public void Initialize()
        {
            _analyzer = new StandardAnalyzer(Version.LUCENE_30);
            _searchIndex = new RAMDirectory();

            var db = new DbAccess("RecipeBrowser");
            var recipes = db.Query<dynamic>("SELECT rcp.RecipeId as RecipeId,rcp.Name as RecipeName, " +
                "rcp.Description as RecipeDescription, rcp.CookingInstructions as CookingInstructions, " +
                "cat.Name as CategoryName FROM Recipe rcp " +
                "JOIN RecipeCategory rcpcat ON rcpcat.RecipeId = rcp.RecipeId " +
                "JOIN Category cat ON cat.CategoryId = rcpcat.CategoryId").ToList();

            using (var writer = new IndexWriter(_searchIndex, _analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED))
            {
                foreach (dynamic record in recipes)
                {
                    Document document = new Document();

                    // Store the basic data for the recipe in the search index.
                    document.Add(new Field("id", record.RecipeId.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
                    document.Add(new Field("name", record.RecipeName.ToString(), Field.Store.YES, Field.Index.ANALYZED));
                    document.Add(new Field("description", record.RecipeDescription.ToString(), Field.Store.YES, Field.Index.ANALYZED));
                    document.Add(new Field("instructions", record.CookingInstructions.ToString(), Field.Store.NO, Field.Index.ANALYZED));
                    document.Add(new Field("category", record.CategoryName.ToString(), Field.Store.NO, Field.Index.ANALYZED));

                    dynamic ingredientRecords =
                        db.Query<dynamic>(
                            "SELECT IngredientId, Name FROM Ingredient WHERE RecipeId = @RecipeId",
                            new { RecipeId = record.RecipeId.ToString() });

                    // Store multiple values for the ingredients in the same document.
                    // All the values get analyzed separately so that you can search for them.
                    // They do not get stored however, so you won't be able to retrieve them.
                    foreach (dynamic ingredient in ingredientRecords)
                    {
                        document.Add(new Field("ingredient", ingredient.Name.ToString(), Field.Store.NO, Field.Index.ANALYZED));
                    }
                }

                // Store everything in the directory and merge!
                writer.Optimize(true);
                writer.Flush(true, true, true);
            }
        }
开发者ID:wmeints,项目名称:Presentations,代码行数:44,代码来源:AdvancedQueryParserUsage.cs

示例10: LuceneCmsSearchService

 public LuceneCmsSearchService(Directory directory)
 {
     _Directory = directory;
     _Analyzer = new StandardAnalyzer(Version.LUCENE_30);
     try
     {
         //Try to open directory.
         using (var rd = IndexReader.Open(directory, true))
         {
         }
     }
     catch (Exception)
     {
         //If open fails, create it
         using (var writer = new IndexWriter(directory, _Analyzer, true, new IndexWriter.MaxFieldLength(1024 * 1024 * 4)))
         {
             writer.Flush(true, true, true);
         }
     }
 }
开发者ID:Steinerd,项目名称:MicroCms,代码行数:20,代码来源:LuceneCmsSearchService.cs

示例11: Index

    /// <summary>
    /// This method indexes the content that is sent across to it. Each piece of content (or "document")
    /// that is indexed has to have a unique identifier (so that the caller can take action based on the
    /// document id). Therefore, this method accepts key-value pairs in the form of a dictionary. The key
    /// is a ulong which uniquely identifies the string to be indexed. The string itself is the value
    /// within the dictionary for that key. Be aware that stop words (like the, this, at, etc.) are _not_
    /// indexed.
    /// </summary>
    /// <param name="txtIdPairToBeIndexed">A dictionary of key-value pairs that are sent by the caller
    /// to uniquely identify each string that is to be indexed.</param>
    /// <returns>The number of documents indexed.</returns>
    public int Index (Dictionary<long, string> txtIdPairToBeIndexed) {

		using (Directory directory = FSDirectory.Open(_indexDir))
		using (Analyzer analyzer = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30))
		using (IndexWriter writer = new IndexWriter(directory, analyzer, IndexWriter.MaxFieldLength.UNLIMITED))
		using (IndexReader reader = writer.GetReader())
		{

			//writer.DeleteAll();


			Dictionary<long, string>.KeyCollection keys = txtIdPairToBeIndexed.Keys;

			foreach (long id in keys)
			{
				char[] delimiter = { ';' };
				string[] text = txtIdPairToBeIndexed[id].Split(delimiter);
				Document document = new Document();

				Field title = new	Field("title", text[0], Field.Store.YES, Field.Index.NO);
				Field type = new Field("type", text[1], Field.Store.YES, Field.Index.NO);
				Field idField = new Field("date", (id).ToString(), Field.Store.YES, Field.Index.ANALYZED);

				document.Add(title);
				document.Add(type);
				document.Add(idField);

				writer.AddDocument(document);
			}

			int numIndexed = writer.GetDocCount(0);//TODO check number
			writer.Optimize();
			writer.Flush(true,true,true);

			return numIndexed;
		}

		}
开发者ID:irfiit,项目名称:wikipedia,代码行数:49,代码来源:IndexMaker.cs

示例12: LuceneEngine

        public LuceneEngine(List<Line> linesDone, bool modified)
        {
            analyzer = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30);
            string path = linesDone.First().path;
            string filename = System.IO.Path.GetFileNameWithoutExtension(path);
            fileDirectoryPath = System.IO.Path.GetDirectoryName(path);
            indexPath = fileDirectoryPath + "\\" + filename;
            if (modified)
            {
                if (System.IO.Directory.Exists(indexPath))
                {
                    System.IO.Directory.Delete(indexPath, true);

                }
                luceneIndexDirectory = FSDirectory.Open(indexPath);
                w = new IndexWriter(luceneIndexDirectory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
                foreach (var line in linesDone)
                {
                    addDoc(w, line);
                }
                w.Optimize();
                w.Flush(true, true, true);
                w.Dispose();
            }
            else
            {
                luceneIndexDirectory = FSDirectory.Open(indexPath);
            }
            //if (modified)
            //{
            //    foreach (var line in linesDone)
            //    {
            //        addDoc(w, line);
            //    }
            //}
        }
开发者ID:Morkowski,项目名称:image_search,代码行数:36,代码来源:LuceneEngine.cs

示例13: TestNoTermVectorAfterTermVectorMerge

		public virtual void  TestNoTermVectorAfterTermVectorMerge()
		{
			MockRAMDirectory dir = new MockRAMDirectory();
			IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true);
			Document document = new Document();
			document.Add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
			iw.AddDocument(document);
			iw.Flush();
			
			document = new Document();
			document.Add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
			iw.AddDocument(document);
			// Make first segment
			iw.Flush();
			
			iw.Optimize();
			
			document.Add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
			iw.AddDocument(document);
			// Make 2nd segment
			iw.Flush();
			iw.Optimize();
			
			iw.Close();
			dir.Close();
		}
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:26,代码来源:TestIndexWriter.cs

示例14: TestRAMDeletes

		public virtual void  TestRAMDeletes()
		{
			for (int pass = 0; pass < 2; pass++)
			{
				for (int t = 0; t < 2; t++)
				{
					bool autoCommit = (0 == pass);
					Directory dir = new MockRAMDirectory();
					IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
					modifier.SetMaxBufferedDocs(4);
					modifier.SetMaxBufferedDeleteTerms(4);
					
					int id = 0;
					int value_Renamed = 100;
					
					AddDoc(modifier, ++id, value_Renamed);
					if (0 == t)
						modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed)));
					else
						modifier.DeleteDocuments(new TermQuery(new Term("value", System.Convert.ToString(value_Renamed))));
					AddDoc(modifier, ++id, value_Renamed);
					if (0 == t)
					{
						modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed)));
						Assert.AreEqual(2, modifier.GetNumBufferedDeleteTerms());
						Assert.AreEqual(1, modifier.GetBufferedDeleteTermsSize());
					}
					else
						modifier.DeleteDocuments(new TermQuery(new Term("value", System.Convert.ToString(value_Renamed))));
					
					AddDoc(modifier, ++id, value_Renamed);
					Assert.AreEqual(0, modifier.GetSegmentCount());
					modifier.Flush();
					
					modifier.Commit();
					
					IndexReader reader = IndexReader.Open(dir);
					Assert.AreEqual(1, reader.NumDocs());
					
					int hitCount = GetHitCount(dir, new Term("id", System.Convert.ToString(id)));
					Assert.AreEqual(1, hitCount);
					reader.Close();
					modifier.Close();
					dir.Close();
				}
			}
		}
开发者ID:Mpdreamz,项目名称:lucene.net,代码行数:47,代码来源:TestIndexWriterDelete.cs

示例15: TestFlushExceptions

		public virtual void  TestFlushExceptions()
		{
			
			MockRAMDirectory directory = new MockRAMDirectory();
			FailOnlyOnFlush failure = new FailOnlyOnFlush();
			directory.FailOn(failure);
			
			IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
			ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
			writer.SetMergeScheduler(cms);
			writer.SetMaxBufferedDocs(2);
			Document doc = new Document();
			Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
			doc.Add(idField);
			for (int i = 0; i < 10; i++)
			{
				for (int j = 0; j < 20; j++)
				{
					idField.SetValue(System.Convert.ToString(i * 20 + j));
					writer.AddDocument(doc);
				}
				
				writer.AddDocument(doc);
				
				failure.SetDoFail();
				try
				{
					writer.Flush();
					Assert.Fail("failed to hit IOException");
				}
				catch (System.IO.IOException ioe)
				{
					failure.ClearDoFail();
				}
			}
			
			writer.Close();
			IndexReader reader = IndexReader.Open(directory);
			Assert.AreEqual(200, reader.NumDocs());
			reader.Close();
			directory.Close();
		}
开发者ID:kstenson,项目名称:NHibernate.Search,代码行数:42,代码来源:TestConcurrentMergeScheduler.cs


注:本文中的Lucene.Net.Index.IndexWriter.Flush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。