当前位置: 首页>>代码示例>>Java>>正文


Java GenotypeBuilder类代码示例

本文整理汇总了Java中htsjdk.variant.variantcontext.GenotypeBuilder的典型用法代码示例。如果您正苦于以下问题:Java GenotypeBuilder类的具体用法?Java GenotypeBuilder怎么用?Java GenotypeBuilder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


GenotypeBuilder类属于htsjdk.variant.variantcontext包,在下文中一共展示了GenotypeBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fixAD

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
/**
 * Fix the AD for the given Genotype
 *
 * @param genotype              the original Genotype
 * @param alleleIndexesToUse    a bitset describing whether or not to keep a given index
 * @param nAllelesToUse         how many alleles we are keeping
 * @return a non-null Genotype
 */
private static Genotype fixAD(final Genotype genotype, final boolean[] alleleIndexesToUse, final int nAllelesToUse) {
    // if it ain't broke don't fix it
    if ( !genotype.hasAD() )
        return genotype;

    final GenotypeBuilder builder = new GenotypeBuilder(genotype);

    final int[] oldAD = genotype.getAD();
    if ( oldAD.length != alleleIndexesToUse.length ) {
        builder.noAD();
    } else {
        final int[] newAD = new int[nAllelesToUse];
        int currentIndex = 0;
        for ( int i = 0; i < oldAD.length; i++ ) {
            if ( alleleIndexesToUse[i] )
                newAD[currentIndex++] = oldAD[i];
        }
        builder.AD(newAD);
    }
    return builder.make();
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:30,代码来源:GATKVariantContextUtils.java

示例2: mergeGenotypes

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
private static void mergeGenotypes(GenotypesContext mergedGenotypes, VariantContext oneVC, AlleleMapper alleleMapping, boolean uniquifySamples) {
    //TODO: should we add a check for cases when the genotypeMergeOption is REQUIRE_UNIQUE
    for ( final Genotype g : oneVC.getGenotypes() ) {
        final String name = mergedSampleName(oneVC.getSource(), g.getSampleName(), uniquifySamples);
        if ( ! mergedGenotypes.containsSample(name) ) {
            // only add if the name is new
            Genotype newG = g;

            if ( uniquifySamples || alleleMapping.needsRemapping() ) {
                final List<Allele> alleles = alleleMapping.needsRemapping() ? alleleMapping.remap(g.getAlleles()) : g.getAlleles();
                newG = new GenotypeBuilder(g).name(name).alleles(alleles).make();
            }

            mergedGenotypes.add(newG);
        }
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:18,代码来源:GATKVariantContextUtils.java

示例3: pruneVariantContext

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
public static VariantContextBuilder pruneVariantContext(final VariantContextBuilder builder, Collection<String> keysToPreserve ) {
    final VariantContext vc = builder.make();
    if ( keysToPreserve == null ) keysToPreserve = Collections.emptyList();

    // VC info
    final Map<String, Object> attributes = subsetAttributes(vc.getCommonInfo(), keysToPreserve);

    // Genotypes
    final GenotypesContext genotypes = GenotypesContext.create(vc.getNSamples());
    for ( final Genotype g : vc.getGenotypes() ) {
        final GenotypeBuilder gb = new GenotypeBuilder(g);
        // remove AD, DP, PL, and all extended attributes, keeping just GT and GQ
        gb.noAD().noDP().noPL().noAttributes();
        genotypes.add(gb.make());
    }

    return builder.genotypes(genotypes).attributes(attributes);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:19,代码来源:GATKVariantContextUtils.java

示例4: purgeUnallowedGenotypeAttributes

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
public static VariantContext purgeUnallowedGenotypeAttributes(VariantContext vc, Set<String> allowedAttributes) {
    if ( allowedAttributes == null )
        return vc;

    final GenotypesContext newGenotypes = GenotypesContext.create(vc.getNSamples());
    for ( final Genotype genotype : vc.getGenotypes() ) {
        final Map<String, Object> attrs = new HashMap<>();
        for ( final Map.Entry<String, Object> attr : genotype.getExtendedAttributes().entrySet() ) {
            if ( allowedAttributes.contains(attr.getKey()) )
                attrs.put(attr.getKey(), attr.getValue());
        }
        newGenotypes.add(new GenotypeBuilder(genotype).attributes(attrs).make());
    }

    return new VariantContextBuilder(vc).genotypes(newGenotypes).make();
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:17,代码来源:GATKVariantContextUtils.java

示例5: blockToVCF

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
/**
 * Convert a HomRefBlock into a VariantContext
 *
 * @param block the block to convert
 * @return a VariantContext representing the gVCF encoding for this block.
 * It will return {@code null} if input {@code block} is {@code null}, indicating that there
 * is no variant-context to be output into the VCF.
 */
private VariantContext blockToVCF(final HomRefBlock block) {
    if ( block == null ) return null;

    final VariantContextBuilder vcb = new VariantContextBuilder(block.getStartingVC());
    vcb.attributes(new HashMap<String, Object>(2)); // clear the attributes
    vcb.stop(block.getStop());
    vcb.attribute(VCFConstants.END_KEY, block.getStop());

    // create the single Genotype with GQ and DP annotations
    final GenotypeBuilder gb = new GenotypeBuilder(sampleName, GATKVariantContextUtils.homozygousAlleleList(block.getRef(),block.getPloidy()));
    gb.noAD().noPL().noAttributes(); // clear all attributes
    gb.GQ(block.getMedianGQ());
    gb.DP(block.getMedianDP());
    gb.attribute(MIN_DP_FORMAT_FIELD, block.getMinDP());
    gb.PL(block.getMinPLs());

    // This annotation is no longer standard
    //gb.attribute(MIN_GQ_FORMAT_FIELD, block.getMinGQ());

    return vcb.genotypes(gb.make()).make();
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:30,代码来源:GVCFWriter.java

示例6: assignGenotype

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
/**
 * Assign genotypes (GTs) to the samples in the Variant Context greedily based on the PLs
 *
 * @param newLikelihoods the PL array
 * @param allelesToUse   the list of alleles to choose from (corresponding to the PLs)
 * @param numChromosomes Number of chromosomes per pool
 */
private void assignGenotype(final GenotypeBuilder gb,
                            final double[] newLikelihoods,
                            final List<Allele> allelesToUse,
                            final int numChromosomes) {
    final int numNewAltAlleles = allelesToUse.size() - 1;

    // find the genotype with maximum likelihoods
    final int PLindex = numNewAltAlleles == 0 ? 0 : MathUtils.maxElementIndex(newLikelihoods);
    final GenotypeLikelihoodCalculator calculator = GenotypeLikelihoodCalculators.getInstance(numChromosomes, allelesToUse.size());
    final GenotypeAlleleCounts alleleCounts = calculator.genotypeAlleleCountsAt(PLindex);

    gb.alleles(alleleCounts.asAlleleList(allelesToUse));

    // remove PLs if necessary
    if (newLikelihoods.length > MAX_LENGTH_FOR_POOL_PL_LOGGING)
        gb.noPL();

    // TODO - deprecated so what is the appropriate method to call?
    if (numNewAltAlleles > 0)
        gb.log10PError(GenotypeLikelihoods.getGQLog10FromLikelihoods(PLindex, newLikelihoods));
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:29,代码来源:GeneralPloidyExactAFCalculator.java

示例7: annotate

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
public void annotate(final RefMetaDataTracker tracker,
                     final AnnotatorCompatible walker,
                     final ReferenceContext ref,
                     final AlignmentContext stratifiedContext,
                     final VariantContext vc,
                     final Genotype g,
                     final GenotypeBuilder gb,
                     final PerReadAlleleLikelihoodMap alleleLikelihoodMap) {
    if ( g == null || !g.isCalled() || ( stratifiedContext == null && alleleLikelihoodMap == null) )
        return;

    if (alleleLikelihoodMap != null && !alleleLikelihoodMap.isEmpty())
        annotateWithLikelihoods(alleleLikelihoodMap, vc, gb);
    else if ( stratifiedContext != null && (vc.isSNP()))
        annotateWithPileup(stratifiedContext, vc, gb);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:17,代码来源:DepthPerAlleleBySample.java

示例8: annotateWithPileup

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
private void annotateWithPileup(final AlignmentContext stratifiedContext, final VariantContext vc, final GenotypeBuilder gb) {

        final HashMap<Byte, Integer> alleleCounts = new HashMap<>();
        for ( final Allele allele : vc.getAlleles() )
            alleleCounts.put(allele.getBases()[0], 0);

        final ReadBackedPileup pileup = stratifiedContext.getBasePileup();
        for ( final PileupElement p : pileup ) {
            if ( alleleCounts.containsKey(p.getBase()) )
                alleleCounts.put(p.getBase(), alleleCounts.get(p.getBase())+1);
        }

        // we need to add counts in the correct order
        final int[] counts = new int[alleleCounts.size()];
        counts[0] = alleleCounts.get(vc.getReference().getBases()[0]);
        for (int i = 0; i < vc.getAlternateAlleles().size(); i++)
            counts[i+1] = alleleCounts.get(vc.getAlternateAllele(i).getBases()[0]);

        gb.AD(counts);
    }
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:21,代码来源:DepthPerAlleleBySample.java

示例9: annotateWithLikelihoods

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
private void annotateWithLikelihoods(final PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap, final VariantContext vc, final GenotypeBuilder gb) {
    final Set<Allele> alleles = new HashSet<>(vc.getAlleles());

    // make sure that there's a meaningful relationship between the alleles in the perReadAlleleLikelihoodMap and our VariantContext
    if ( ! perReadAlleleLikelihoodMap.getAllelesSet().containsAll(alleles) )
        throw new IllegalStateException("VC alleles " + alleles + " not a strict subset of per read allele map alleles " + perReadAlleleLikelihoodMap.getAllelesSet());

    final HashMap<Allele, Integer> alleleCounts = new HashMap<>();
    for ( final Allele allele : vc.getAlleles() ) { alleleCounts.put(allele, 0); }

    for ( final Map.Entry<GATKSAMRecord,Map<Allele,Double>> el : perReadAlleleLikelihoodMap.getLikelihoodReadMap().entrySet()) {
        final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue(), alleles);
        if (! a.isInformative() ) continue; // read is non-informative
        final GATKSAMRecord read = el.getKey();
        final int prevCount = alleleCounts.get(a.getMostLikelyAllele());
        alleleCounts.put(a.getMostLikelyAllele(), prevCount + 1);
    }

    final int[] counts = new int[alleleCounts.size()];
    counts[0] = alleleCounts.get(vc.getReference());
    for (int i = 0; i < vc.getAlternateAlleles().size(); i++)
        counts[i+1] = alleleCounts.get( vc.getAlternateAllele(i) );

    gb.AD(counts);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:26,代码来源:DepthPerAlleleBySample.java

示例10: annotate

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
@Override
public void annotate(final RefMetaDataTracker tracker,
                     final AnnotatorCompatible walker,
                     final ReferenceContext ref,
                     final AlignmentContext stratifiedContext,
                     final VariantContext vc,
                     final Genotype g,
                     final GenotypeBuilder gb,
                     final PerReadAlleleLikelihoodMap alleleLikelihoodMap) {
    if ( ! isAppropriateInput(alleleLikelihoodMap, g) )
        return;

    final int[][] table = FisherStrand.getContingencyTable(Collections.singletonMap(g.getSampleName(), alleleLikelihoodMap), vc, 0);

    gb.attribute(STRAND_BIAS_BY_SAMPLE_KEY_NAME, FisherStrand.getContingencyArray(table));
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:17,代码来源:StrandBiasBySample.java

示例11: annotateGenotypes

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
private GenotypesContext annotateGenotypes(final RefMetaDataTracker tracker,
                                           final ReferenceContext ref, final Map<String, AlignmentContext> stratifiedContexts,
                                           final VariantContext vc,
                                           final Map<String, PerReadAlleleLikelihoodMap> stratifiedPerReadAlleleLikelihoodMap) {
    if (requestedGenotypeAnnotations.isEmpty())
        return vc.getGenotypes();

    final GenotypesContext genotypes = GenotypesContext.create(vc.getNSamples());
    for (final Genotype genotype : vc.getGenotypes()) {
        AlignmentContext context = null;
        PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap = null;
        if (stratifiedContexts != null)
            context = stratifiedContexts.get(genotype.getSampleName());
        if (stratifiedPerReadAlleleLikelihoodMap != null)
            perReadAlleleLikelihoodMap = stratifiedPerReadAlleleLikelihoodMap.get(genotype.getSampleName());


        final GenotypeBuilder gb = new GenotypeBuilder(genotype);
        for (final GenotypeAnnotation annotation : requestedGenotypeAnnotations) {
            annotation.annotate(tracker, walker, ref, context, vc, genotype, gb, perReadAlleleLikelihoodMap);
        }
        genotypes.add(gb.make());
    }

    return genotypes;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:27,代码来源:VariantAnnotatorEngine.java

示例12: annotate

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
public void annotate(final RefMetaDataTracker tracker,
                     final AnnotatorCompatible walker,
                     final ReferenceContext ref,
                     final AlignmentContext stratifiedContext,
                     final VariantContext vc,
                     final Genotype g,
                     final GenotypeBuilder gb,
                     final PerReadAlleleLikelihoodMap alleleLikelihoodMap){
    if ( g == null || !g.isCalled() || stratifiedContext == null )
        return;

    int mq0 = 0;
    final ReadBackedPileup pileup = stratifiedContext.getBasePileup();
    for (PileupElement p : pileup ) {
        if ( p.getMappingQual() == 0 )
            mq0++;
    }

    gb.attribute(getKeyNames().get(0), mq0);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:21,代码来源:MappingQualityZeroBySample.java

示例13: annotateWithPileup

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
private void annotateWithPileup(final Pileup pileup, final VariantContext vc, final GenotypeBuilder gb) {
    //System.out.println("annotated with pileup");
    HashMap<Byte, Integer> alleleCounts = new HashMap<Byte, Integer>();
    for ( Allele allele : vc.getAlleles() ) {
        alleleCounts.put(allele.getBases()[0], 0);
        //System.out.println("genotype allele:" + allele.getBases()[0]);
    }

    for ( PileupReadInfo p : pileup.getTotalPileup() ) {
        if ( alleleCounts.containsKey(p.getByteBase()) ) {
            //System.out.println("pileup allele:" + p.getBase());
            alleleCounts.put(p.getByteBase(), alleleCounts.get(p.getByteBase())+1);
        }
    }

    // we need to add counts in the correct order
    int[] counts = new int[alleleCounts.size()];
    counts[0] = alleleCounts.get(vc.getReference().getBases()[0]);
    for (int i = 0; i < vc.getAlternateAlleles().size(); i++)
        counts[i+1] = alleleCounts.get(vc.getAlternateAllele(i).getBases()[0]);

    gb.AD(counts);
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:24,代码来源:DepthPerAlleleBySample.java

示例14: annotate

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
public void annotate(final VariantDataTracker tracker,
                     final ChromosomeInformationShare ref,
                     final Pileup pileup,
                     final VariantContext vc,
                     final Genotype g,
                     final GenotypeBuilder gb,
                     final PerReadAlleleLikelihoodMap alleleLikelihoodMap){
    if ( g == null || !g.isCalled() || pileup == null )
        return;

    int mq0 = 0;
    for (PileupReadInfo p : pileup.getTotalPileup() ) {
        if ( p.getMappingQuality() == 0 )
            mq0++;
    }

    gb.attribute(getKeyNames().get(0), mq0);
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:19,代码来源:MappingQualityZeroBySample.java

示例15: annotate

import htsjdk.variant.variantcontext.GenotypeBuilder; //导入依赖的package包/类
public void annotate(final VariantDataTracker tracker,
                     final ChromosomeInformationShare ref,
                     final Pileup pileup,
                     final VariantContext vc,
                     final Genotype g,
                     final GenotypeBuilder gb,
                     final PerReadAlleleLikelihoodMap alleleLikelihoodMap){
    if ( pileup == null )
        return;

    Double ratio = annotateSNP(pileup, vc, g);
    if (ratio == null)
        return;

    gb.attribute(getKeyNames().get(0), Double.valueOf(String.format("%.2f", ratio.doubleValue())));
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:17,代码来源:AlleleBalanceBySample.java


注:本文中的htsjdk.variant.variantcontext.GenotypeBuilder类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。