当前位置: 首页>>代码示例>>Java>>正文


Java Descriptor.fromFilename方法代码示例

本文整理汇总了Java中org.apache.cassandra.io.sstable.Descriptor.fromFilename方法的典型用法代码示例。如果您正苦于以下问题:Java Descriptor.fromFilename方法的具体用法?Java Descriptor.fromFilename怎么用?Java Descriptor.fromFilename使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.io.sstable.Descriptor的用法示例。


在下文中一共展示了Descriptor.fromFilename方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: cleanup

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public void cleanup()
{
    List<String> files = readLockfile(lockfile);
    for (String file : files)
    {
        try
        {
            Descriptor desc = Descriptor.fromFilename(file, true);
            SSTable.delete(desc, SSTable.componentsFor(desc));
        }
        catch (Exception e)
        {
            JVMStabilityInspector.inspectThrowable(e);
            logger.warn("failed to delete a potentially stale sstable {}", file);
        }
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:18,代码来源:StreamLockfile.java

示例2: main

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
 * @param args a list of sstables whose metadata we're interested in
 */
public static void main(String[] args) throws IOException
{
    PrintStream out = System.out;
    if (args.length == 0)
    {
        out.println("Usage: sstablemetadata <sstable filenames>");
        System.exit(1);
    }

    for (String fname : args)
    {
        Descriptor descriptor = Descriptor.fromFilename(fname);
        SSTableMetadata metadata = SSTableMetadata.serializer.deserialize(descriptor).left;

        out.printf("SSTable: %s%n", descriptor);
        out.printf("Partitioner: %s%n", metadata.partitioner);
        out.printf("Maximum timestamp: %s%n", metadata.maxTimestamp);
        out.printf("SSTable max local deletion time: %s%n", metadata.maxLocalDeletionTime);
        out.printf("Compression ratio: %s%n", metadata.compressionRatio);
        out.printf("Estimated droppable tombstones: %s%n", metadata.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000)));
        out.printf("SSTable Level: %d%n", metadata.sstableLevel);
        out.println(metadata.replayPosition);
        printHistograms(metadata, out);
    }
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:29,代码来源:SSTableMetadataViewer.java

示例3: create

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
 * Create metadata about given compressed file including uncompressed data length, chunk size
 * and list of the chunk offsets of the compressed data.
 *
 * This is an expensive operation! Don't create more than one for each
 * sstable.
 *
 * @param dataFilePath Path to the compressed file
 *
 * @return metadata about given compressed file.
 */
//static Map<String, CompressionMetadata> fileCompressionMetadata = new ConcurrentHashMap<>();
//private static Object lock = new Object();
public static CompressionMetadata create(String dataFilePath, long dataFileSize, Configuration configuration)
{
    Descriptor desc = Descriptor.fromFilename(dataFilePath, configuration);
    logger.info("Creating CompressionMetadata for file [" + dataFilePath + "]");
    return new CompressionMetadata(desc.filenameFor(Component.COMPRESSION_INFO),
            dataFileSize,
            desc.version.compressedChecksumType(),
            configuration);
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:23,代码来源:CompressionMetadata.java

示例4: initialization

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
 * Initialization with provided keyspace, table, and key names.
 * @param keyspaceName keyspace name
 * @param tableName table name
 * @param partitionKeyNames list of partition key names
 * @param clusteringKeyNames list of clustering key names
 * @throws IOException when file location is not valid
 */
private void initialization(final String keyspaceName,
                            final String tableName,
                            final List<String> partitionKeyNames,
                            final List<String> clusteringKeyNames) throws IOException {
    descriptor = Descriptor.fromFilename(HadoopFileUtils.normalizeFileName(fileLocation), conf);
    cfMetaData = SSTableUtils.metadataFromSSTable(descriptor, keyspaceName, tableName, partitionKeyNames, clusteringKeyNames);
    initHelper();
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:17,代码来源:SSTableSingleReader.java

示例5: main

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public static void main(String[] args) {
    String inputSSTableFullPathFileName = new File(getInputFile(args)).getAbsolutePath();
    LOGGER.info("Input file name: " + inputSSTableFullPathFileName);
    Configuration conf = new Configuration();
    final Descriptor inputSSTableDescriptor = Descriptor.fromFilename(inputSSTableFullPathFileName, conf);
    SSTableWriter writer = null;

    try {
        final CFMetaData inputCFMetaData =
                SSTableUtils.metaDataFromSSTable(inputSSTableFullPathFileName, conf);
        final CFMetaData outputCFMetaData = SSTableUtils.createNewCFMetaData(inputSSTableDescriptor, inputCFMetaData);

        final SSTableReader inputSStable = SSTableReader.openNoValidation(inputSSTableDescriptor, inputCFMetaData);
        writer = SSTableUtils.createSSTableWriter(inputSSTableDescriptor, outputCFMetaData, inputSStable);

        final ISSTableScanner currentScanner = inputSStable.getScanner();

        while (currentScanner.hasNext()) {
            final UnfilteredRowIterator row = currentScanner.next();
            writer.append(row);
        }
        writer.finish(false);
    } catch (IOException e) {
        e.printStackTrace(System.err);
    } finally {
        FileUtils.closeQuietly(writer);
    }

}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:30,代码来源:StandaloneRunner.java

示例6: metaDataFromSSTable

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
 * Construct table schema from a file.
 *
 * @param filePath           SSTable file location
 * @param partitionKeyNames  list of partition key names
 * @param clusteringKeyNames list of clustering key names
 * @return Restored CFMetaData
 * @throws IOException when Stats.db cannot be read
 */
public static CFMetaData metaDataFromSSTable(final String filePath,
                                             final List<String> partitionKeyNames,
                                             final List<String> clusteringKeyNames,
                                             final Configuration configuration) throws IOException {
    final Descriptor descriptor = Descriptor.fromFilename(filePath, configuration);

    return metadataFromSSTable(descriptor, null, null,
            partitionKeyNames, clusteringKeyNames);
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:19,代码来源:SSTableUtils.java

示例7: testWritingToLocalSSTable

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/******************************************************
 * 1. Input data
 * This is the schema definition of the table that is used to generate the non-compressed input data:
 * <p>
 * CREATE TABLE bills_nc (
 * user text,
 * balance int static,
 * expense_id int,
 * amount int,
 * name text,
 * PRIMARY KEY (user, expense_id))
 * WITH compression = { 'sstable_compression' : '' };
 * <p>
 * <p>
 * 2. Compressing and producing output data
 * Running this main will convert data file under src/test/resources/data/bills_compress/mc-6-big-Data.db
 * in to the corresponding compressed file, using LZ4 compression, along with auxiliary
 * files (CompressionInfo.db, Index.db, etc).
 * <p>
 * The output is under cassanrda/compresseddata/cassandra/data directory
 * <p>
 * 3. Verification
 * Since this is C* 3.0 format, you should use sstabledump command to dump out the json contents
 * for both intput data and output data to verify.
 * %>sstabledump cassandra/data/mc-1-big-Data.db
 * and
 * %>sstabledump cassandra/compresseddata/cassandra/data/mc-1-big-Data.db
 *******************************************************/
@Test
public void testWritingToLocalSSTable() {
    final String inputSSTableFullPathFileName = CASS3_DATA_DIR + "keyspace1/bills_compress/mc-6-big-Data.db";
    LOGGER.info("Input file name: " + inputSSTableFullPathFileName);

    final Descriptor inputSSTableDescriptor = Descriptor.fromFilename(inputSSTableFullPathFileName,
                                                            TestBaseSSTableFunSuite.HADOOP_CONF);
    SSTableWriter writer = null;

    try {
        SSTableSingleReader reader = new SSTableSingleReader(inputSSTableFullPathFileName,
                                                             "casspactor",
                                                             "bills_nc",
                                                             TestBaseSSTableFunSuite.HADOOP_CONF);
        final CFMetaData inputCFMetaData = reader.getCfMetaData();
        final ISSTableScanner currentScanner = reader.getSSTableScanner();
        final SSTableReader inputSStable = reader.getSstableReader();

        //Create writer
        final CFMetaData outputCFMetaData = SSTableUtils.createNewCFMetaData(inputSSTableDescriptor, inputCFMetaData);
        writer = SSTableUtils.createSSTableWriter(inputSSTableDescriptor, outputCFMetaData, inputSStable);

        while (currentScanner.hasNext()) {
            final UnfilteredRowIterator row = currentScanner.next();
            writer.append(row);
        }
        writer.finish(false);
    } catch (IOException e) {
        e.printStackTrace(System.err);
    } finally {
        FileUtils.closeQuietly(writer);
    }
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:62,代码来源:TestSSTableDataWriter.java

示例8: Compact

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
public Compact(String... args) {
    for(String path : args) {
        try {
            for (File f : CassandraUtils.sstablesFromPath(path)) {
                if (metadata == null) {
                    metadata = CassandraUtils.tableFromSSTable(f);
                }
                Descriptor d = Descriptor.fromFilename(f.getAbsolutePath());
                sstables.add(SSTableReader.openNoValidation(d, metadata));
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
 
开发者ID:tolbertam,项目名称:sstable-tools,代码行数:16,代码来源:Compact.java

示例9: createWriter

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
protected SSTableWriter createWriter(ColumnFamilyStore cfs, long totalSize, long repairedAt) throws IOException
{
    Directories.DataDirectory localDir = cfs.directories.getWriteableLocation(totalSize);
    if (localDir == null)
        throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
    desc = Descriptor.fromFilename(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(localDir)));

    return new SSTableWriter(desc.filenameFor(Component.DATA), estimatedKeys, repairedAt);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:10,代码来源:StreamReader.java

示例10: createWriter

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
protected SSTableWriter createWriter(ColumnFamilyStore cfs, long totalSize) throws IOException
{
    Directories.DataDirectory localDir = cfs.directories.getWriteableLocation();
    if (localDir == null)
        throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
    desc = Descriptor.fromFilename(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(localDir)));

    return new SSTableWriter(desc.filenameFor(Component.DATA), estimatedKeys);
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:10,代码来源:StreamReader.java

示例11: createWriter

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
protected SSTableMultiWriter createWriter(ColumnFamilyStore cfs, long totalSize, long repairedAt, SSTableFormat.Type format) throws IOException
{
    Directories.DataDirectory localDir = cfs.getDirectories().getWriteableLocation(totalSize);
    if (localDir == null)
        throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
    desc = Descriptor.fromFilename(cfs.getSSTablePath(cfs.getDirectories().getLocationForDisk(localDir), format));

    return cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, sstableLevel, getHeader(cfs.metadata), session.getTransaction(cfId));
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:10,代码来源:StreamReader.java

示例12: testCreatingSSTableWithTnx

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
 * Test creating sstable files using SSTableTxnWriter.
 * @throws IOException
 */
@Test
public void testCreatingSSTableWithTnx() throws IOException {
    final String inputSSTableFullPathFileName = CASS3_DATA_DIR + "keyspace1/bills_compress/mc-6-big-Data.db";

    final Descriptor descriptor = Descriptor.fromFilename(inputSSTableFullPathFileName,
                                                          TestBaseSSTableFunSuite.HADOOP_CONF);
    final CFMetaData inputCFMetaData =
            SSTableUtils.metaDataFromSSTable(inputSSTableFullPathFileName,
                                                    "casspactor",
                                                    "bills_compress",
                                                    Collections.<String>emptyList(),
                                                    Collections.<String>emptyList(),
                                                    TestBaseSSTableFunSuite.HADOOP_CONF);

    final CFMetaData outputCFMetaData = SSTableUtils.createNewCFMetaData(descriptor, inputCFMetaData);
    final SerializationHeader header = new SerializationHeader(true, outputCFMetaData,
        inputCFMetaData.partitionColumns(),
        EncodingStats.NO_STATS);

    final Descriptor outDescriptor = new Descriptor(
        SSTableFormat.Type.BIG.info.getLatestVersion().getVersion(),
        "/tmp",
        "casspactor",
        "bills_compress",
        9,
        SSTableFormat.Type.BIG, TestBaseSSTableFunSuite.HADOOP_CONF);

    final SSTableTxnWriter writer = SSTableTxnWriter.create(outputCFMetaData,
                                                            outDescriptor,
                                                            4,
                                                            -1,
                                                            1,
                                                            header);

    final ColumnDefinition staticCollDef =
        ColumnDefinition.staticDef(inputCFMetaData, ByteBuffer.wrap("balance".getBytes()), Int32Type.instance);
    final ColumnDefinition regCollDef1 =
        ColumnDefinition.regularDef(inputCFMetaData, ByteBuffer.wrap("amount".getBytes()), Int32Type.instance);
    final ColumnDefinition regCollDef2 =
        ColumnDefinition.regularDef(inputCFMetaData, ByteBuffer.wrap("name".getBytes()), UTF8Type.instance);

    final DecoratedKey key = Murmur3Partitioner.instance.decorateKey(ByteBuffer.wrap("user1".getBytes()));
    final long now = System.currentTimeMillis();

    final Row.Builder builder = BTreeRow.sortedBuilder();
    builder.newRow(Clustering.STATIC_CLUSTERING);
    builder.addCell(BufferCell.live(staticCollDef, now, Int32Type.instance.decompose(123)));
    final PartitionUpdate partitionUpdate = PartitionUpdate.singleRowUpdate(inputCFMetaData,
        key, builder.build());
    final Row.Builder builder2 = BTreeRow.sortedBuilder();
    final Clustering clustering2 = new BufferClustering(Int32Type.instance.decompose(10000));
    builder2.newRow(clustering2);
    builder2.addCell(BufferCell.live(regCollDef1, now, Int32Type.instance.decompose(5)));
    builder2.addCell(BufferCell.live(regCollDef2, now, UTF8Type.instance.decompose("minh1")));

    final PartitionUpdate partitionUpdate2 = PartitionUpdate.singleRowUpdate(inputCFMetaData,
        key, builder2.build());

    final List<PartitionUpdate> partitionUpdates = new ArrayList<PartitionUpdate>() {
        private static final long serialVersionUID = 1L;
        {
            add(partitionUpdate);
            add(partitionUpdate2);
        }
    };

    final PartitionUpdate mergedUpdate = PartitionUpdate.merge(partitionUpdates);

    writer.append(mergedUpdate.unfilteredIterator());
    writer.finish(false);
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:76,代码来源:TestSSTableDataWriter.java

示例13: tableFromSSTable

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public static CFMetaData tableFromSSTable(File path) throws IOException, NoSuchFieldException, IllegalAccessException {
    Preconditions.checkNotNull(path);
    Descriptor desc = Descriptor.fromFilename(path.getAbsolutePath());

    EnumSet<MetadataType> types = EnumSet.of(MetadataType.VALIDATION, MetadataType.STATS, MetadataType.HEADER);
    Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
    ValidationMetadata validationMetadata = (ValidationMetadata) sstableMetadata.get(MetadataType.VALIDATION);
    Preconditions.checkNotNull(validationMetadata, "Validation Metadata could not be resolved, accompanying Statistics.db file must be missing.");
    SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
    Preconditions.checkNotNull(header, "Metadata could not be resolved, accompanying Statistics.db file must be missing.");

    IPartitioner partitioner = validationMetadata.partitioner.endsWith("LocalPartitioner") ?
            new LocalPartitioner(header.getKeyType()) :
            FBUtilities.newPartitioner(validationMetadata.partitioner);

    DatabaseDescriptor.setPartitionerUnsafe(partitioner);
    AbstractType<?> keyType = header.getKeyType();
    List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
    Map<ByteBuffer, AbstractType<?>> staticColumns = header.getStaticColumns();
    Map<ByteBuffer, AbstractType<?>> regularColumns = header.getRegularColumns();
    int id = cfCounter.incrementAndGet();
    CFMetaData.Builder builder = CFMetaData.Builder.create("turtle" + id, "turtles" + id);
    staticColumns.entrySet().stream()
            .forEach(entry ->
                    builder.addStaticColumn(UTF8Type.instance.getString(entry.getKey()), entry.getValue()));
    regularColumns.entrySet().stream()
            .forEach(entry ->
                    builder.addRegularColumn(UTF8Type.instance.getString(entry.getKey()), entry.getValue()));
    List<AbstractType<?>> partTypes = keyType.getComponents();
    for(int i = 0; i < partTypes.size(); i++) {
        builder.addPartitionKey("partition" + (i > 0 ? i : ""), partTypes.get(i));
    }
    for (int i = 0; i < clusteringTypes.size(); i++) {
        builder.addClusteringColumn("row" + (i > 0 ? i : ""), clusteringTypes.get(i));
    }
    CFMetaData metaData = builder.build();
    Schema.instance.setKeyspaceMetadata(KeyspaceMetadata.create(metaData.ksName, KeyspaceParams.local(),
            Tables.of(metaData), Views.none(), getTypes(), Functions.none()));
    return metaData;
}
 
开发者ID:tolbertam,项目名称:sstable-tools,代码行数:42,代码来源:CassandraUtils.java

示例14: main

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
 * @param args a list of sstables whose metadata we're interested in
 */
public static void main(String[] args) throws IOException
{
    PrintStream out = System.out;
    if (args.length == 0)
    {
        out.println("Usage: sstablemetadata <sstable filenames>");
        System.exit(1);
    }

    for (String fname : args)
    {
        if (new File(fname).exists())
        {
            Descriptor descriptor = Descriptor.fromFilename(fname);
            Map<MetadataType, MetadataComponent> metadata = descriptor.getMetadataSerializer().deserialize(descriptor, EnumSet.allOf(MetadataType.class));
            ValidationMetadata validation = (ValidationMetadata) metadata.get(MetadataType.VALIDATION);
            StatsMetadata stats = (StatsMetadata) metadata.get(MetadataType.STATS);
            CompactionMetadata compaction = (CompactionMetadata) metadata.get(MetadataType.COMPACTION);

            out.printf("SSTable: %s%n", descriptor);
            if (validation != null)
            {
                out.printf("Partitioner: %s%n", validation.partitioner);
                out.printf("Bloom Filter FP chance: %f%n", validation.bloomFilterFPChance);
            }
            if (stats != null)
            {
                out.printf("Minimum timestamp: %s%n", stats.minTimestamp);
                out.printf("Maximum timestamp: %s%n", stats.maxTimestamp);
                out.printf("SSTable max local deletion time: %s%n", stats.maxLocalDeletionTime);
                out.printf("Compression ratio: %s%n", stats.compressionRatio);
                out.printf("Estimated droppable tombstones: %s%n", stats.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000)));
                out.printf("SSTable Level: %d%n", stats.sstableLevel);
                out.printf("Repaired at: %d%n", stats.repairedAt);
                out.println(stats.replayPosition);
                out.println("Estimated tombstone drop times:%n");
                for (Map.Entry<Double, Long> entry : stats.estimatedTombstoneDropTime.getAsMap().entrySet())
                {
                    out.printf("%-10s:%10s%n",entry.getKey().intValue(), entry.getValue());
                }
                printHistograms(stats, out);
            }
            if (compaction != null)
            {
                out.printf("Ancestors: %s%n", compaction.ancestors.toString());
                out.printf("Estimated cardinality: %s%n", compaction.cardinalityEstimator.cardinality());

            }
        }
        else
        {
            out.println("No such file: " + fname);
        }
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:59,代码来源:SSTableMetadataViewer.java

示例15: main

import org.apache.cassandra.io.sstable.Descriptor; //导入方法依赖的package包/类
/**
 * @param args a list of sstables whose metadata we are changing
 */
public static void main(final String[] args) throws IOException
{
    PrintStream out = System.out;
    if (args.length == 0)
    {
        out.println("This command should be run with Cassandra stopped!");
        out.println("Usage: sstablerepairedset [--is-repaired | --is-unrepaired] [-f <sstable-list> | <sstables>]");
        System.exit(1);
    }

    if (args.length < 3 || !args[0].equals("--really-set") || (!args[1].equals("--is-repaired") && !args[1].equals("--is-unrepaired")))
    {
        out.println("This command should be run with Cassandra stopped, otherwise you will get very strange behavior");
        out.println("Verify that Cassandra is not running and then execute the command like this:");
        out.println("Usage: sstablerepairedset --really-set [--is-repaired | --is-unrepaired] [-f <sstable-list> | <sstables>]");
        System.exit(1);
    }

    boolean setIsRepaired = args[1].equals("--is-repaired");

    List<String> fileNames;
    if (args[2].equals("-f"))
    {
        fileNames = Files.readAllLines(Paths.get(args[3]), Charset.defaultCharset());
    }
    else
    {
        fileNames = Arrays.asList(args).subList(2, args.length);
    }

    for (String fname: fileNames)
    {
        Descriptor descriptor = Descriptor.fromFilename(fname);
        if (descriptor.version.hasRepairedAt)
        {
            if (setIsRepaired)
            {
                FileTime f = Files.getLastModifiedTime(new File(descriptor.filenameFor(Component.DATA)).toPath());
                descriptor.getMetadataSerializer().mutateRepairedAt(descriptor, f.toMillis());
            }
            else
            {
                descriptor.getMetadataSerializer().mutateRepairedAt(descriptor, ActiveRepairService.UNREPAIRED_SSTABLE);
            }
        }
        else
        {
            System.err.println("SSTable " + fname + " does not have repaired property, run upgradesstables");
        }
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:55,代码来源:SSTableRepairedAtSetter.java


注:本文中的org.apache.cassandra.io.sstable.Descriptor.fromFilename方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。