当前位置: 首页>>代码示例>>Java>>正文


Java BloomFilterFactory.isGeneralBloomEnabled方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.BloomFilterFactory.isGeneralBloomEnabled方法的典型用法代码示例。如果您正苦于以下问题:Java BloomFilterFactory.isGeneralBloomEnabled方法的具体用法?Java BloomFilterFactory.isGeneralBloomEnabled怎么用?Java BloomFilterFactory.isGeneralBloomEnabled使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.BloomFilterFactory的用法示例。


在下文中一共展示了BloomFilterFactory.isGeneralBloomEnabled方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: StoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a
 * substantial amount of ram depending on the underlying files (10-20MB?).
 *
 * @param fs  The current file system to use.
 * @param fileInfo  The store file information.
 * @param conf  The current configuration.
 * @param cacheConf  The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified
 *          by column family configuration. This may or may not be the same
 *          as the Bloom filter type actually present in the HFile, because
 *          column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
                 final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException {
    this.fs = fs;
    this.fileInfo = fileInfo;
    this.cacheConf = cacheConf;

    if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
        this.cfBloomType = cfBloomType;
    } else {
        LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " +
                "cfBloomType=" + cfBloomType + " (disabled in config)");
        this.cfBloomType = BloomType.NONE;
    }

    // cache the modification time stamp of this store file
    this.modificationTimeStamp = fileInfo.getModificationTime();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:33,代码来源:StoreFile.java

示例2: StoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a
 * substantial amount of ram depending on the underlying files (10-20MB?).
 *
 * @param fs  The current file system to use.
 * @param fileInfo  The store file information.
 * @param conf  The current configuration.
 * @param cacheConf  The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified
 *          by column family configuration. This may or may not be the same
 *          as the Bloom filter type actually present in the HFile, because
 *          column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
    final CacheConfig cacheConf,  final BloomType cfBloomType) throws IOException {
  this.fs = fs;
  this.fileInfo = fileInfo;
  this.cacheConf = cacheConf;

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " +
        "cfBloomType=" + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  this.modificationTimeStamp = fileInfo.getModificationTime();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:33,代码来源:StoreFile.java

示例3: HStoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 * @param fs fs The current file system to use.
 * @param fileInfo The store file information.
 * @param conf The current configuration.
 * @param cacheConf The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column
 *          family configuration. This may or may not be the same as the Bloom filter type
 *          actually present in the HFile, because column family configuration might change. If
 *          this is {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param primaryReplica true if this is a store file for primary replica, otherwise false.
 */
public HStoreFile(FileSystem fs, StoreFileInfo fileInfo, Configuration conf, CacheConfig cacheConf,
    BloomType cfBloomType, boolean primaryReplica) {
  this.fs = fs;
  this.fileInfo = fileInfo;
  this.cacheConf = cacheConf;
  this.noReadahead =
      conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD);
  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " + "cfBloomType=" +
        cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }
  this.primaryReplica = primaryReplica;
}
 
开发者ID:apache,项目名称:hbase,代码行数:30,代码来源:HStoreFile.java

示例4: StoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 *
 * @param fs          The current file system to use.
 * @param fileInfo    The store file information.
 * @param conf        The current configuration.
 * @param cacheConf   The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column family
 *                    configuration. This may or may not be the same as the Bloom filter type actually
 *                    present in the HFile, because column family configuration might change. If this is
 *                    {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
    final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException {
  this.fs = fs;
  this.fileInfo = fileInfo;
  this.cacheConf = cacheConf;

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " + "cfBloomType="
        + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:StoreFile.java

示例5: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when done. If metadata,
 * add BEFORE closing using {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " + "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    fs.mkdirs(dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath, conf, cacheConf, comparator, bloomType, maxKeyCount,
      favoredNodes, fileContext);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:StoreFile.java

示例6: StoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 * @param fs The current file system to use.
 * @param p The path of the file.
 * @param blockcache <code>true</code> if the block cache is enabled.
 * @param conf The current configuration.
 * @param cacheConf The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column family
 *          configuration. This may or may not be the same as the Bloom filter type actually
 *          present in the HFile, because column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param dataBlockEncoder data block encoding algorithm.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
    final CacheConfig cacheConf, final BloomType cfBloomType,
    final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
  this.fs = fs;
  this.path = p;
  this.cacheConf = cacheConf;
  this.dataBlockEncoder =
      dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE : dataBlockEncoder;
  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + path + ": " + "cfBloomType=" + cfBloomType
        + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  FileStatus[] stats = FSUtils.listStatus(fs, p, null);
  if (stats != null && stats.length == 1) {
    this.modificationTimeStamp = stats[0].getModificationTime();
  } else {
    this.modificationTimeStamp = 0;
  }
  SchemaMetrics.configureGlobally(conf);
  initPossibleIndexesAndReference(fs, p, conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:42,代码来源:StoreFile.java

示例7: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when done. If metadata,
 * add BEFORE closing using {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " + "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    HBaseFileSystem.makeDirOnFileSystem(fs, dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (compressAlgo == null) {
    compressAlgo = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath, blockSize, compressAlgo, dataBlockEncoder, conf, cacheConf,
      comparator, bloomType, maxKeyCount, checksumType, bytesPerChecksum, includeMVCCReadpoint);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:34,代码来源:StoreFile.java

示例8: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when
 * done. If metadata, add BEFORE closing using
 * {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
    if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
        throw new IllegalArgumentException("Either specify parent directory " +
                "or file path");
    }

    if (dir == null) {
        dir = filePath.getParent();
    }

    if (!fs.exists(dir)) {
        fs.mkdirs(dir);
    }

    if (filePath == null) {
        filePath = getUniqueFile(fs, dir);
        if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
            bloomType = BloomType.NONE;
        }
    }

    if (comparator == null) {
        comparator = KeyValue.COMPARATOR;
    }
    return new Writer(fs, filePath,
            conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:33,代码来源:StoreFile.java

示例9: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when
 * done. If metadata, add BEFORE closing using
 * {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " +
        "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    fs.mkdirs(dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath, 
      conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:33,代码来源:StoreFile.java

示例10: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when
 * done. If metadata, add BEFORE closing using
 * {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " +
        "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    HBaseFileSystem.makeDirOnFileSystem(fs, dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (compressAlgo == null) {
    compressAlgo = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath, blockSize, compressAlgo, dataBlockEncoder,
      conf, cacheConf, comparator, bloomType, maxKeyCount, checksumType,
      bytesPerChecksum, includeMVCCReadpoint);
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:37,代码来源:StoreFile.java

示例11: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when
 * done. If metadata, add BEFORE closing using
 * {@link StoreFileWriter#appendMetadata}.
 */
public StoreFileWriter build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " +
        "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    // Handle permission for non-HDFS filesystem properly
    // See HBASE-17710
    HRegionFileSystem.mkdirs(fs, conf, dir);
  }

  // set block storage policy for temp path
  String policyName = this.conf.get(ColumnFamilyDescriptorBuilder.STORAGE_POLICY);
  if (null == policyName) {
    policyName = this.conf.get(HStore.BLOCK_STORAGE_POLICY_KEY);
  }
  FSUtils.setStoragePolicy(this.fs, dir, policyName);

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (comparator == null) {
    comparator = CellComparator.getInstance();
  }
  return new StoreFileWriter(fs, filePath,
      conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext,
      shouldDropCacheBehind);
}
 
开发者ID:apache,项目名称:hbase,代码行数:43,代码来源:StoreFileWriter.java

示例12: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when
 * done. If metadata, add BEFORE closing using
 * {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " +
        "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    fs.mkdirs(dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath,
      conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext);
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:33,代码来源:StoreFile.java

示例13: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when
 * done. If metadata, add BEFORE closing using
 * {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " +
        "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    fs.mkdirs(dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (compressAlgo == null) {
    compressAlgo = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath, blockSize, compressAlgo, dataBlockEncoder,
      conf, cacheConf, comparator, bloomType, maxKeyCount, checksumType,
      bytesPerChecksum, includeMVCCReadpoint, favoredNodes);
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:37,代码来源:StoreFile.java

示例14: build

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when
 * done. If metadata, add BEFORE closing using
 * {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " +
        "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    fs.mkdirs(dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (compressAlgo == null) {
    compressAlgo = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath, blockSize, compressAlgo, dataBlockEncoder,
      conf, cacheConf, comparator, bloomType, maxKeyCount, checksumType,
      bytesPerChecksum);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:37,代码来源:StoreFile.java

示例15: StoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a
 * substantial amount of ram depending on the underlying files (10-20MB?).
 *
 * @param fs  The current file system to use.
 * @param p  The path of the file.
 * @param blockcache  <code>true</code> if the block cache is enabled.
 * @param conf  The current configuration.
 * @param cacheConf  The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified
 *          by column family configuration. This may or may not be the same
 *          as the Bloom filter type actually present in the HFile, because
 *          column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param dataBlockEncoder data block encoding algorithm.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs,
          final Path p,
          final Configuration conf,
          final CacheConfig cacheConf,
          final BloomType cfBloomType,
          final HFileDataBlockEncoder dataBlockEncoder)
    throws IOException {
  this.fs = fs;
  this.path = p;
  
  Path tmpPath=getIndexPathFromPath(path);
  if(fs.exists(tmpPath)){
    this.indexPath=tmpPath;
    this.hasIndex=true;
  }
  
  this.cacheConf = cacheConf;
  this.dataBlockEncoder =
      dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
          : dataBlockEncoder;

  //TODO add link index file support
  if (HFileLink.isHFileLink(p)) {
    this.link = new HFileLink(conf, p);
    LOG.debug("Store file " + p + " is a link");
  } else if (isReference(p)) {
    this.reference = Reference.read(fs, p);
    this.referencePath = getReferredToFile(this.path);
    if (HFileLink.isHFileLink(this.referencePath)) {
      this.link = new HFileLink(conf, this.referencePath);
    }
    //index
    tmpPath=getIndexPathFromPath(referencePath);
    if(fs.exists(tmpPath)){
      this.indexReferencePath=tmpPath;
      this.hasIndex=true;
    }
    LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
      " reference to " + this.referencePath);
  } else if (!isHFile(p)) {
    throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
  }

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + path + ": " +
        "cfBloomType=" + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  FileStatus[] stats = FSUtils.listStatus(fs, p, null);
  if (stats != null && stats.length == 1) {
    this.modificationTimeStamp = stats[0].getModificationTime();
  } else {
    this.modificationTimeStamp = 0;
  }

  SchemaMetrics.configureGlobally(conf);
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:79,代码来源:StoreFile.java


注:本文中的org.apache.hadoop.hbase.util.BloomFilterFactory.isGeneralBloomEnabled方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。