当前位置: 首页>>代码示例>>Java>>正文


Java DefaultTokenizerFactory类代码示例

本文整理汇总了Java中org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory的典型用法代码示例。如果您正苦于以下问题:Java DefaultTokenizerFactory类的具体用法?Java DefaultTokenizerFactory怎么用?Java DefaultTokenizerFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DefaultTokenizerFactory类属于org.deeplearning4j.text.tokenization.tokenizerfactory包,在下文中一共展示了DefaultTokenizerFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testWord2VecPlot

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
@Test
public void testWord2VecPlot() throws Exception {
    File inputFile = new ClassPathResource("/big/raw_sentences.txt").getFile();
    SentenceIterator iter = new BasicLineIterator(inputFile.getAbsolutePath());

    TokenizerFactory t = new DefaultTokenizerFactory();
    t.setTokenPreProcessor(new CommonPreprocessor());

    Word2Vec vec = new Word2Vec.Builder().minWordFrequency(5).iterations(2).batchSize(1000).learningRate(0.025)
                    .layerSize(100).seed(42).sampling(0).negativeSample(0).windowSize(5)
                    .modelUtils(new BasicModelUtils<VocabWord>()).useAdaGrad(false).iterate(iter).workers(10)
                    .tokenizerFactory(t).build();

    vec.fit();

    //        UiConnectionInfo connectionInfo = UiServer.getInstance().getConnectionInfo();

    //        vec.getLookupTable().plotVocab(100, connectionInfo);

    Thread.sleep(10000000000L);
    fail("Not implemented");
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:23,代码来源:ManualTests.java

示例2: main

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

        // Gets Path to Text file
        String filePath = "c:/raw_sentences.txt";

        log.info("Load & Vectorize Sentences....");
        // Strip white space before and after for each line
        SentenceIterator iter = UimaSentenceIterator.createWithPath(filePath);
        // Split on white spaces in the line to get words
        TokenizerFactory t = new DefaultTokenizerFactory();
        t.setTokenPreProcessor(new CommonPreprocessor());

        InMemoryLookupCache cache = new InMemoryLookupCache();
        WeightLookupTable table = new InMemoryLookupTable.Builder()
                .vectorLength(100)
                .useAdaGrad(false)
                .cache(cache)
                .lr(0.025f).build();

        log.info("Building model....");
        Word2Vec vec = new Word2Vec.Builder()
                .minWordFrequency(5).iterations(1)
                .layerSize(100).lookupTable(table)
                .stopWords(new ArrayList<String>())
                .vocabCache(cache).seed(42)
                .windowSize(5).iterate(iter).tokenizerFactory(t).build();

        log.info("Fitting Word2Vec model....");
        vec.fit();

        log.info("Writing word vectors to text file....");
        // Write word
        WordVectorSerializer.writeWordVectors(vec, "word2vec.txt");

        log.info("Closest Words:");
        Collection<String> lst = vec.wordsNearest("man", 5); 
        System.out.println(lst);
        double cosSim = vec.similarity("cruise", "voyage");
        System.out.println(cosSim);
    }
 
开发者ID:PacktPublishing,项目名称:Java-Data-Science-Cookbook,代码行数:41,代码来源:Word2VecRawTextExample.java

示例3: SentimentExampleIterator

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
/**
 * @param dataDirectory the directory of the IMDB review data set
 * @param wordVectors WordVectors object
 * @param batchSize Size of each minibatch for training
 * @param truncateLength If reviews exceed
 * @param train If true: return the training data. If false: return the testing data.
 */
public SentimentExampleIterator(String dataDirectory, WordVectors wordVectors, int batchSize, int truncateLength, boolean train) throws IOException {
	this.batchSize = batchSize;
	this.vectorSize = wordVectors.getWordVector(wordVectors.vocab().wordAtIndex(0)).length;


	File p = new File(FilenameUtils.concat(dataDirectory, "aclImdb/" + (train ? "train" : "test") + "/pos/") + "/");
	File n = new File(FilenameUtils.concat(dataDirectory, "aclImdb/" + (train ? "train" : "test") + "/neg/") + "/");
	positiveFiles = p.listFiles();
	negativeFiles = n.listFiles();

	this.wordVectors = wordVectors;
	this.truncateLength = truncateLength;

	tokenizerFactory = new DefaultTokenizerFactory();
	tokenizerFactory.setTokenPreProcessor(new CommonPreprocessor());
}
 
开发者ID:IsaacChanghau,项目名称:NeuralNetworksLite,代码行数:24,代码来源:SentimentExampleIterator.java

示例4: SentimentRecurrentIterator

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
/**
 * @param dataDirectory the directory of the IMDB review data set
 * @param wordVectors WordVectors object
 * @param batchSize Size of each minibatch for training
 * @param truncateLength If reviews exceed
 * @param train If true: return the training data. If false: return the testing data.
 */
public SentimentRecurrentIterator(String dataDirectory, WordVectors wordVectors, int batchSize, int truncateLength, boolean train) throws IOException {
  this.batchSize = batchSize;
  this.vectorSize = wordVectors.lookupTable().layerSize();

  File p = new File(FilenameUtils.concat(dataDirectory, (train ? "train" : "test") + "/positive/") + "/");
  File n = new File(FilenameUtils.concat(dataDirectory, (train ? "train" : "test") + "/negative/") + "/");
  positiveFiles = p.listFiles();
  negativeFiles = n.listFiles();
  numPositives  = positiveFiles.length;
  numNegatives  = negativeFiles.length;
  numTotals     = numPositives+numNegatives;
  rnd           = new Random(1);

  this.wordVectors = wordVectors;
  this.truncateLength = truncateLength;

  tokenizerFactory = new DefaultTokenizerFactory();
  tokenizerFactory.setTokenPreProcessor(new LowCasePreProcessor());
}
 
开发者ID:keigohtr,项目名称:sentiment-rnn,代码行数:27,代码来源:SentimentRecurrentIterator.java

示例5: makeParagraphVectors

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
void makeParagraphVectors() throws Exception {

        // build a iterator for our dataset
        File dir = TYPE_LEARNING_DIR;
        dir.mkdirs();
        iterator = new FileLabelAwareIterator.Builder()
                           .addSourceFolder(new File(dir, "corpus"))
                           .build();

        tokenizerFactory = new DefaultTokenizerFactory();
        tokenizerFactory.setTokenPreProcessor(new CommonPreprocessor());

        // ParagraphVectors training configuration
        paragraphVectors = new ParagraphVectors.Builder()
                                   .learningRate(0.025)
                                   .minLearningRate(0.001)
                                   .batchSize(1000)
                                   .epochs(5)
                                   .iterate(iterator)
                                   .trainWordVectors(true)
                                   .tokenizerFactory(tokenizerFactory)
                                   .build();

        // Start model training
        paragraphVectors.fit();
    }
 
开发者ID:sillelien,项目名称:dollar,代码行数:27,代码来源:ParagraphVectorsClassifierExample.java

示例6: testWord2VecMultiEpoch

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
@Test
public void testWord2VecMultiEpoch() throws Exception {
    SentenceIterator iter = new BasicLineIterator(inputFile.getAbsolutePath());

    TokenizerFactory t = new DefaultTokenizerFactory();
    t.setTokenPreProcessor(new CommonPreprocessor());

    Word2Vec vec = new Word2Vec.Builder().minWordFrequency(1).iterations(5).learningRate(0.025).layerSize(150)
                    .seed(42).sampling(0).negativeSample(0).useHierarchicSoftmax(true).windowSize(5).epochs(3)
                    .modelUtils(new BasicModelUtils<VocabWord>()).useAdaGrad(false).iterate(iter).workers(8)
                    .tokenizerFactory(t).elementsLearningAlgorithm(new CBOW<VocabWord>()).build();

    vec.fit();

    Collection<String> lst = vec.wordsNearest("day", 10);
    log.info(Arrays.toString(lst.toArray()));

    //   assertEquals(10, lst.size());

    double sim = vec.similarity("day", "night");
    log.info("Day/night similarity: " + sim);

    assertTrue(lst.contains("week"));
    assertTrue(lst.contains("night"));
    assertTrue(lst.contains("year"));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:27,代码来源:Word2VecTests.java

示例7: testWord2VecGoogleModelUptraining

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
@Ignore
@Test
public void testWord2VecGoogleModelUptraining() throws Exception {
    long time1 = System.currentTimeMillis();
    Word2Vec vec = WordVectorSerializer.readWord2VecModel(
                    new File("C:\\Users\\raver\\Downloads\\GoogleNews-vectors-negative300.bin.gz"), false);
    long time2 = System.currentTimeMillis();
    log.info("Model loaded in {} msec", time2 - time1);
    SentenceIterator iter = new BasicLineIterator(inputFile.getAbsolutePath());
    // Split on white spaces in the line to get words
    TokenizerFactory t = new DefaultTokenizerFactory();
    t.setTokenPreProcessor(new CommonPreprocessor());

    vec.setTokenizerFactory(t);
    vec.setSentenceIterator(iter);
    vec.getConfiguration().setUseHierarchicSoftmax(false);
    vec.getConfiguration().setNegative(5.0);
    vec.setElementsLearningAlgorithm(new CBOW<VocabWord>());

    vec.fit();
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:22,代码来源:Word2VecTests.java

示例8: testGoogleModelForInference

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
@Ignore
@Test
public void testGoogleModelForInference() throws Exception {
    WordVectors googleVectors = WordVectorSerializer.loadGoogleModelNonNormalized(
                    new File("/ext/GoogleNews-vectors-negative300.bin.gz"), true, false);

    TokenizerFactory t = new DefaultTokenizerFactory();
    t.setTokenPreProcessor(new CommonPreprocessor());

    ParagraphVectors pv =
                    new ParagraphVectors.Builder().tokenizerFactory(t).iterations(10).useHierarchicSoftmax(false)
                                    .trainWordVectors(false).iterations(10).useExistingWordVectors(googleVectors)
                                    .negativeSample(10).sequenceLearningAlgorithm(new DM<VocabWord>()).build();

    INDArray vec1 = pv.inferVector("This text is pretty awesome");
    INDArray vec2 = pv.inferVector("Fantastic process of crazy things happening inside just for history purposes");

    log.info("vec1/vec2: {}", Transforms.cosineSim(vec1, vec2));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:ParagraphVectorsTest.java

示例9: testDefaultTokenizer1

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
@Test
public void testDefaultTokenizer1() throws Exception {
    String toTokenize = "Mary had a little lamb.";
    TokenizerFactory t = new DefaultTokenizerFactory();
    Tokenizer tokenizer = t.create(toTokenize);
    Tokenizer tokenizer2 = t.create(new ByteArrayInputStream(toTokenize.getBytes()));
    int position = 1;
    while (tokenizer2.hasMoreTokens()) {
        String tok1 = tokenizer.nextToken();
        String tok2 = tokenizer2.nextToken();
        log.info("Position: [" + position + "], token1: '" + tok1 + "', token 2: '" + tok2 + "'");
        position++;
        assertEquals(tok1, tok2);
    }


    ClassPathResource resource = new ClassPathResource("reuters/5250");
    String str = FileUtils.readFileToString(resource.getFile());
    int stringCount = t.create(str).countTokens();
    int stringCount2 = t.create(resource.getInputStream()).countTokens();
    assertTrue(Math.abs(stringCount - stringCount2) < 2);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:23,代码来源:DefaulTokenizerTests.java

示例10: testDefaultTokenizer2

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
@Test
public void testDefaultTokenizer2() throws Exception {
    String toTokenize = "Mary had a little lamb.";
    TokenizerFactory t = new DefaultTokenizerFactory();
    Tokenizer tokenizer = t.create(toTokenize);
    Tokenizer tokenizer2 = t.create(new ByteArrayInputStream(toTokenize.getBytes()));
    tokenizer2.countTokens();
    while (tokenizer.hasMoreTokens()) {
        String tok1 = tokenizer.nextToken();
        String tok2 = tokenizer2.nextToken();
        assertEquals(tok1, tok2);
    }


    System.out.println("-----------------------------------------------");

    ClassPathResource resource = new ClassPathResource("reuters/5250");
    String str = FileUtils.readFileToString(resource.getFile());
    int stringCount = t.create(str).countTokens();
    int stringCount2 = t.create(resource.getInputStream()).countTokens();

    log.info("String tok: [" + stringCount + "], Stream tok: [" + stringCount2 + "], Difference: "
                    + Math.abs(stringCount - stringCount2));

    assertTrue(Math.abs(stringCount - stringCount2) < 2);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:27,代码来源:DefaulTokenizerTests.java

示例11: testDefaultStreamTokenizer

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
@Test
public void testDefaultStreamTokenizer() throws Exception {
    String toTokenize = "Mary had a little lamb.";
    TokenizerFactory t = new DefaultTokenizerFactory();
    Tokenizer tokenizer2 = t.create(new ByteArrayInputStream(toTokenize.getBytes()));

    assertEquals(5, tokenizer2.countTokens());

    int cnt = 0;
    while (tokenizer2.hasMoreTokens()) {
        String tok1 = tokenizer2.nextToken();
        log.info(tok1);
        cnt++;
    }

    assertEquals(5, cnt);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:18,代码来源:DefaulTokenizerTests.java

示例12: loadParagraphVectors

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
private static ParagraphVectors loadParagraphVectors() {
    ParagraphVectors paragraphVectors = null;
    try {
        paragraphVectors = WordVectorSerializer.readParagraphVectors((PARAGRAPHVECTORMODELPATH));
        TokenizerFactory t = new DefaultTokenizerFactory();
        t.setTokenPreProcessor(new CommonPreprocessor());
        paragraphVectors.setTokenizerFactory(t);
        paragraphVectors.getConfiguration().setIterations(10); // please note, we set iterations to 1 here, just to speedup inference

    } catch (IOException e) {
        e.printStackTrace();
    }
    return paragraphVectors;
}
 
开发者ID:gizemsogancioglu,项目名称:biosses,代码行数:15,代码来源:SentenceVectorsBasedSimilarity.java

示例13: trainParagraghVecModel

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
public void trainParagraghVecModel(String locationToSave) throws FileNotFoundException {
    ClassPathResource resource = new ClassPathResource("/paragraphVectors/paragraphVectorTraining.txt");
    File file = resource.getFile();
    SentenceIterator iter = new BasicLineIterator(file);
    AbstractCache<VocabWord> cache = new AbstractCache<VocabWord>();
    TokenizerFactory t = new DefaultTokenizerFactory();
    t.setTokenPreProcessor(new CommonPreprocessor());
    /*
         if you don't have LabelAwareIterator handy, you can use synchronized labels generator
          it will be used to label each document/sequence/line with it's own label.
          But if you have LabelAwareIterator ready, you can can provide it, for your in-house labels
    */
    LabelsSource source = new LabelsSource("DOC_");

    ParagraphVectors vec = new ParagraphVectors.Builder()
            .minWordFrequency(1)
            .iterations(100)
            .epochs(1)
            .layerSize(50)
            .learningRate(0.02)
            .labelsSource(source)
            .windowSize(5)
            .iterate(iter)
            .trainWordVectors(true)
            .vocabCache(cache)
            .tokenizerFactory(t)
            .sampling(0)
            .build();

    vec.fit();

    WordVectorSerializer.writeParagraphVectors(vec, locationToSave);
}
 
开发者ID:gizemsogancioglu,项目名称:biosses,代码行数:34,代码来源:SentenceVectorsBasedSimilarity.java

示例14: main

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    ClassPathResource r = new ClassPathResource("/train.tsv");
    if(r.exists()) {
        InputStream is = r.getInputStream();
        BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(new File("train.tsv")));
        IOUtils.copy(is, bos);
        bos.flush();
        bos.close();
        is.close();
    }
    SentenceIterator docIter = new CollectionSentenceIterator(new SentenceToPhraseMapper(new File("train.tsv")).sentences());
    TokenizerFactory factory = new DefaultTokenizerFactory();
    Word2Vec  vec = new Word2Vec.Builder().iterate(docIter)
            .tokenizerFactory(factory).batchSize(10000)
            .learningRate(2.5e-2).sampling(5).learningRateDecayWords(10000)
            .iterations(3).minWordFrequency(1)
            .layerSize(300).windowSize(5).build();
    vec.fit();
    FileUtils.writeLines(new File("vocab.csv"),vec.getCache().words());


    String word = "amusing";
    String otherWord = "turd";
    System.out.println("Words nearest  " + word +  " " + vec.wordsNearest(word,10));
    System.out.println("Words nearest  " + otherWord +  " " + vec.wordsNearest(otherWord,10));



    Tsne t = new Tsne.Builder()
            .setMaxIter(100).stopLyingIteration(20).build();


    vec.getCache().plotVocab(t);

}
 
开发者ID:ihuerga,项目名称:deeplearning4j-nlp-examples,代码行数:36,代码来源:Visualization.java

示例15: testWord2VecAdaGrad

import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory; //导入依赖的package包/类
@Test
public void testWord2VecAdaGrad() throws Exception {
    SentenceIterator iter = new BasicLineIterator(inputFile.getAbsolutePath());

    TokenizerFactory t = new DefaultTokenizerFactory();
    t.setTokenPreProcessor(new CommonPreprocessor());

    Word2Vec vec = new Word2Vec.Builder().minWordFrequency(5).iterations(5).learningRate(0.025).layerSize(100)
                    .seed(42).batchSize(13500).sampling(0).negativeSample(0)
                    //.epochs(10)
                    .windowSize(5).modelUtils(new BasicModelUtils<VocabWord>()).useAdaGrad(false)
                    .useHierarchicSoftmax(true).iterate(iter).workers(4).tokenizerFactory(t).build();

    vec.fit();

    Collection<String> lst = vec.wordsNearest("day", 10);
    log.info(Arrays.toString(lst.toArray()));

    //   assertEquals(10, lst.size());

    double sim = vec.similarity("day", "night");
    log.info("Day/night similarity: " + sim);

    assertTrue(lst.contains("week"));
    assertTrue(lst.contains("night"));
    assertTrue(lst.contains("year"));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:28,代码来源:Word2VecTests.java


注:本文中的org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。