当前位置: 首页>>代码示例>>Java>>正文


Java Genotype类代码示例

本文整理汇总了Java中htsjdk.variant.variantcontext.Genotype的典型用法代码示例。如果您正苦于以下问题:Java Genotype类的具体用法?Java Genotype怎么用?Java Genotype使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Genotype类属于htsjdk.variant.variantcontext包,在下文中一共展示了Genotype类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fixADFromSubsettedAlleles

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
/**
 * Fix the AD for the GenotypesContext of a VariantContext that has been subset
 *
 * @param originalGs       the original GenotypesContext
 * @param originalVC       the original VariantContext
 * @param allelesToUse     the new (sub)set of alleles to use
 * @return a new non-null GenotypesContext
 */
static private GenotypesContext fixADFromSubsettedAlleles(final GenotypesContext originalGs, final VariantContext originalVC, final List<Allele> allelesToUse) {

    // the bitset representing the allele indexes we want to keep
    final boolean[] alleleIndexesToUse = getAlleleIndexBitset(originalVC, allelesToUse);

    // the new genotypes to create
    final GenotypesContext newGTs = GenotypesContext.create(originalGs.size());

    // the samples
    final List<String> sampleIndices = originalGs.getSampleNamesOrderedByName();

    // create the new genotypes
    for ( int k = 0; k < originalGs.size(); k++ ) {
        final Genotype g = originalGs.get(sampleIndices.get(k));
        newGTs.add(fixAD(g, alleleIndexesToUse, allelesToUse.size()));
    }

    return newGTs;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:28,代码来源:GATKVariantContextUtils.java

示例2: fixAD

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
/**
 * Fix the AD for the given Genotype
 *
 * @param genotype              the original Genotype
 * @param alleleIndexesToUse    a bitset describing whether or not to keep a given index
 * @param nAllelesToUse         how many alleles we are keeping
 * @return a non-null Genotype
 */
private static Genotype fixAD(final Genotype genotype, final boolean[] alleleIndexesToUse, final int nAllelesToUse) {
    // if it ain't broke don't fix it
    if ( !genotype.hasAD() )
        return genotype;

    final GenotypeBuilder builder = new GenotypeBuilder(genotype);

    final int[] oldAD = genotype.getAD();
    if ( oldAD.length != alleleIndexesToUse.length ) {
        builder.noAD();
    } else {
        final int[] newAD = new int[nAllelesToUse];
        int currentIndex = 0;
        for ( int i = 0; i < oldAD.length; i++ ) {
            if ( alleleIndexesToUse[i] )
                newAD[currentIndex++] = oldAD[i];
        }
        builder.AD(newAD);
    }
    return builder.make();
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:30,代码来源:GATKVariantContextUtils.java

示例3: mergeGenotypes

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
private static void mergeGenotypes(GenotypesContext mergedGenotypes, VariantContext oneVC, AlleleMapper alleleMapping, boolean uniquifySamples) {
    //TODO: should we add a check for cases when the genotypeMergeOption is REQUIRE_UNIQUE
    for ( final Genotype g : oneVC.getGenotypes() ) {
        final String name = mergedSampleName(oneVC.getSource(), g.getSampleName(), uniquifySamples);
        if ( ! mergedGenotypes.containsSample(name) ) {
            // only add if the name is new
            Genotype newG = g;

            if ( uniquifySamples || alleleMapping.needsRemapping() ) {
                final List<Allele> alleles = alleleMapping.needsRemapping() ? alleleMapping.remap(g.getAlleles()) : g.getAlleles();
                newG = new GenotypeBuilder(g).name(name).alleles(alleles).make();
            }

            mergedGenotypes.add(newG);
        }
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:18,代码来源:GATKVariantContextUtils.java

示例4: pruneVariantContext

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
public static VariantContextBuilder pruneVariantContext(final VariantContextBuilder builder, Collection<String> keysToPreserve ) {
    final VariantContext vc = builder.make();
    if ( keysToPreserve == null ) keysToPreserve = Collections.emptyList();

    // VC info
    final Map<String, Object> attributes = subsetAttributes(vc.getCommonInfo(), keysToPreserve);

    // Genotypes
    final GenotypesContext genotypes = GenotypesContext.create(vc.getNSamples());
    for ( final Genotype g : vc.getGenotypes() ) {
        final GenotypeBuilder gb = new GenotypeBuilder(g);
        // remove AD, DP, PL, and all extended attributes, keeping just GT and GQ
        gb.noAD().noDP().noPL().noAttributes();
        genotypes.add(gb.make());
    }

    return builder.genotypes(genotypes).attributes(attributes);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:19,代码来源:GATKVariantContextUtils.java

示例5: purgeUnallowedGenotypeAttributes

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
public static VariantContext purgeUnallowedGenotypeAttributes(VariantContext vc, Set<String> allowedAttributes) {
    if ( allowedAttributes == null )
        return vc;

    final GenotypesContext newGenotypes = GenotypesContext.create(vc.getNSamples());
    for ( final Genotype genotype : vc.getGenotypes() ) {
        final Map<String, Object> attrs = new HashMap<>();
        for ( final Map.Entry<String, Object> attr : genotype.getExtendedAttributes().entrySet() ) {
            if ( allowedAttributes.contains(attr.getKey()) )
                attrs.put(attr.getKey(), attr.getValue());
        }
        newGenotypes.add(new GenotypeBuilder(genotype).attributes(attrs).make());
    }

    return new VariantContextBuilder(vc).genotypes(newGenotypes).make();
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:17,代码来源:GATKVariantContextUtils.java

示例6: add

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
/**
 * Add information from this Genotype to this band
 * @param g a non-null Genotype with GQ and DP attributes
 */
public void add(final int pos, final Genotype g) {
    if ( g == null ) throw new IllegalArgumentException("g cannot be null");
    if ( ! g.hasGQ() ) throw new IllegalArgumentException("g must have GQ field");
    if ( ! g.hasPL() ) throw new IllegalArgumentException("g must have PL field");
    if ( pos != stop + 1 ) throw new IllegalArgumentException("adding genotype at pos " + pos + " isn't contiguous with previous stop " + stop);
    if ( g.getPloidy() != ploidy)
        throw new IllegalArgumentException("cannot add a genotype with a different ploidy: " + g.getPloidy() + " != " + ploidy);

    if( minPLs == null )
        minPLs = g.getPL();
    else { // otherwise take the min with the provided genotype's PLs
        final int[] PL = g.getPL();
        if (PL.length != minPLs.length)
            throw new IllegalStateException("trying to merge different PL array sizes: " + PL.length + " != " + minPLs.length);
        for (int i = 0; i < PL.length; i++)
            if (minPLs[i] > PL[i])
                minPLs[i] = PL[i];
    }
    stop = pos;
    GQs.add(Math.min(g.getGQ(), 99)); // cap the GQs by the max. of 99 emission
    DPs.add(Math.max(g.getDP(),0));
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:27,代码来源:HomRefBlock.java

示例7: addHomRefSite

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
/**
 * Add hom-ref site from vc to this gVCF hom-ref state tracking, emitting any pending states if appropriate
 *
 * @param vc a non-null VariantContext
 * @param g a non-null genotype from VariantContext
 * @return a VariantContext to be emitted, or null if non is appropriate
 */
protected VariantContext addHomRefSite(final VariantContext vc, final Genotype g) {

    if ( nextAvailableStart != -1 ) {
        // don't create blocks while the hom-ref site falls before nextAvailableStart (for deletions)
        if ( vc.getStart() <= nextAvailableStart && vc.getChr().equals(contigOfNextAvailableStart) )
            return null;
        // otherwise, reset to non-relevant
        nextAvailableStart = -1;
        contigOfNextAvailableStart = null;
    }

    final VariantContext result;
    if (genotypeCanBeMergedInCurrentBlock(g)) {
        currentBlock.add(vc.getStart(), g);
        result = null;
    } else {
        result = blockToVCF(currentBlock);
        currentBlock = createNewBlock(vc, g);
    }
    return result;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:29,代码来源:GVCFWriter.java

示例8: createNewBlock

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
/**
 * Helper function to create a new HomRefBlock from a variant context and current genotype
 *
 * @param vc the VariantContext at the site where want to start the band
 * @param g the genotype of the sample from vc that should be used to initialize the block
 * @return a newly allocated and initialized block containing g already
 */
private HomRefBlock createNewBlock(final VariantContext vc, final Genotype g) {
    // figure out the GQ limits to use based on the GQ of g
    HomRefBlock partition = null;
    for ( final HomRefBlock maybePartition : GQPartitions ) {
        if ( maybePartition.withinBounds(g.getGQ()) ) {
            partition = maybePartition;
            break;
        }
    }

    if ( partition == null )
        throw new IllegalStateException("GQ " + g + " from " + vc + " didn't fit into any partition");

    // create the block, add g to it, and return it for use
    final HomRefBlock block = new HomRefBlock(vc, partition.getGQLowerBound(), partition.getGQUpperBound(), defaultPloidy);
    block.add(vc.getStart(), g);
    return block;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:26,代码来源:GVCFWriter.java

示例9: getGLs

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
/**
 * Unpack GenotypesContext into arraylist of doubel values
 * @param GLs            Input genotype context
 * @return               ArrayList of doubles corresponding to GL vectors
 */
protected static ArrayList<double[]> getGLs(final GenotypesContext GLs, final boolean includeDummy) {
    final ArrayList<double[]> genotypeLikelihoods = new ArrayList<>(GLs.size() + 1);

    if ( includeDummy ) genotypeLikelihoods.add(new double[]{0.0,0.0,0.0}); // dummy
    for ( Genotype sample : GLs.iterateInSampleNameOrder() ) {
        if ( sample.hasLikelihoods() ) {
            final double[] gls = sample.getLikelihoods().getAsVector();

            if ( MathUtils.sum(gls) < GATKVariantContextUtils.SUM_GL_THRESH_NOCALL )
                genotypeLikelihoods.add(gls);
        }
    }

    return genotypeLikelihoods;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:21,代码来源:ExactAFCalculator.java

示例10: reduceScopeCalculateLikelihoodSums

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
@Override
protected void reduceScopeCalculateLikelihoodSums(final VariantContext vc, final int defaultPloidy, final LikelihoodSum[] likelihoodSums) {
    final int numOriginalAltAlleles = likelihoodSums.length;
    final GenotypesContext genotypes = vc.getGenotypes();
    for (final Genotype genotype : genotypes.iterateInSampleNameOrder()) {
        if (!genotype.hasPL())
            continue;
        final double[] gls = genotype.getLikelihoods().getAsVector();
        if (MathUtils.sum(gls) >= GATKVariantContextUtils.SUM_GL_THRESH_NOCALL)
            continue;

        final int PLindexOfBestGL = MathUtils.maxElementIndex(gls);

        final double bestToHomRefDiffGL = PLindexOfBestGL == PL_INDEX_OF_HOM_REF ? 0.0 : gls[PLindexOfBestGL] - gls[PL_INDEX_OF_HOM_REF];
        final int declaredPloidy = genotype.getPloidy();
        final int ploidy = declaredPloidy <= 0 ? defaultPloidy : declaredPloidy;

        final int[] acCount = GeneralPloidyGenotypeLikelihoods.getAlleleCountFromPLIndex(1 + numOriginalAltAlleles, ploidy, PLindexOfBestGL);
        // by convention, first count coming from getAlleleCountFromPLIndex comes from reference allele
        for (int k = 1; k < acCount.length; k++)
            if (acCount[k] > 0)
                likelihoodSums[k - 1].sum += acCount[k] * bestToHomRefDiffGL;
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:25,代码来源:GeneralPloidyExactAFCalculator.java

示例11: getInstance

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
/**
 * Returns a AF calculator capable to handle a particular variant-context.
 * @param variantContext the target context build.
 * @param defaultPloidy the assumed ploidy in case that there is no a GT call present to determine it.
 * @return never {@code null}
 */
public AFCalculator getInstance(final VariantContext variantContext, final int defaultPloidy, final int maximumAltAlleles) {
    if (variantContext == null)
        throw new IllegalArgumentException("variant context cannot be null");

    final int sampleCount = variantContext.getNSamples();
    if  (sampleCount == 0)
        return getInstance(defaultPloidy,maximumAltAlleles);

    final GenotypesContext genotypes = variantContext.getGenotypes();

    final Genotype firstGenotype = genotypes.get(0);
    int ploidy = firstGenotype.getPloidy();
    if (ploidy <= 0) ploidy = defaultPloidy;
    for (int i = 1 ; i < sampleCount; i++) {
        final Genotype genotype = genotypes.get(i);
        final int declaredPloidy = genotype.getPloidy();
        final int actualPloidy = declaredPloidy <= 0 ? defaultPloidy : declaredPloidy;
        if (actualPloidy != ploidy) {
            ploidy = AFCalculatorImplementation.UNBOUND_PLOIDY;
            break;
        }
    }
    return getInstance(ploidy,Math.min(variantContext.getNAlleles() - 1, maximumAltAlleles));
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:31,代码来源:AFCalculatorProvider.java

示例12: biallelicCombinedGLs

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
/**
 * Create a single bi-allelic variant context from rootVC with alt allele with index altAlleleIndex
 *
 * @param rootVC the root (potentially multi-allelic) variant context
 * @param altAlleleIndex index of the alt allele, from 0 == first alt allele
 * @return a bi-allelic variant context based on rootVC
 */
protected final VariantContext biallelicCombinedGLs(final VariantContext rootVC, final int altAlleleIndex) {
    if ( rootVC.isBiallelic() ) {
        return rootVC;
    } else {
        final int nAlts = rootVC.getNAlleles() - 1;
        final List<Genotype> biallelicGenotypes = new ArrayList<Genotype>(rootVC.getNSamples());
        for ( final Genotype g : rootVC.getGenotypes() )
            biallelicGenotypes.add(combineGLsPrecise(g, altAlleleIndex, nAlts));

        final VariantContextBuilder vcb = new VariantContextBuilder(rootVC);
        final Allele altAllele = rootVC.getAlternateAllele(altAlleleIndex - 1);
        vcb.alleles(Arrays.asList(rootVC.getReference(), altAllele));
        vcb.genotypes(biallelicGenotypes);
        return vcb.make();
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:24,代码来源:IndependentAllelesDiploidExactAFCalculator.java

示例13: combineAltAlleleLikelihoods

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
private boolean combineAltAlleleLikelihoods(final Genotype g, final int plMaxIndex, final double[] dest,
                                            final double[] hetLikelihoods, final double[] homAltLikelihoods) {

    final int[] pls = g.getPL();
    if (pls == null)
        return false;
    int hetNextIndex = 0;
    int homAltNextIndex = 0;
    for (int plIndex = 1; plIndex < plMaxIndex; plIndex++) {
        final GenotypeLikelihoods.GenotypeLikelihoodsAllelePair alleles = GenotypeLikelihoods.getAllelePair(plIndex);
        if (alleles.alleleIndex1 == 0 || alleles.alleleIndex2 == 0)
            hetLikelihoods[hetNextIndex++] = pls[plIndex] * PHRED_2_LOG10_COEFF;
        else
            homAltLikelihoods[homAltNextIndex++] = pls[plIndex] * PHRED_2_LOG10_COEFF;
    }
    dest[0] = pls[0] * PHRED_2_LOG10_COEFF;
    dest[1] = MathUtils.approximateLog10SumLog10(hetLikelihoods);
    dest[2] = MathUtils.approximateLog10SumLog10(homAltLikelihoods);
    return true;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:21,代码来源:IndependentAllelesDiploidExactAFCalculator.java

示例14: annotate

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
public void annotate(final RefMetaDataTracker tracker,
                     final AnnotatorCompatible walker,
                     final ReferenceContext ref,
                     final AlignmentContext stratifiedContext,
                     final VariantContext vc,
                     final Genotype g,
                     final GenotypeBuilder gb,
                     final PerReadAlleleLikelihoodMap alleleLikelihoodMap) {
    if ( g == null || !g.isCalled() || ( stratifiedContext == null && alleleLikelihoodMap == null) )
        return;

    if (alleleLikelihoodMap != null && !alleleLikelihoodMap.isEmpty())
        annotateWithLikelihoods(alleleLikelihoodMap, vc, gb);
    else if ( stratifiedContext != null && (vc.isSNP()))
        annotateWithPileup(stratifiedContext, vc, gb);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:17,代码来源:DepthPerAlleleBySample.java

示例15: annotate

import htsjdk.variant.variantcontext.Genotype; //导入依赖的package包/类
@Override
public void annotate(final RefMetaDataTracker tracker,
                     final AnnotatorCompatible walker,
                     final ReferenceContext ref,
                     final AlignmentContext stratifiedContext,
                     final VariantContext vc,
                     final Genotype g,
                     final GenotypeBuilder gb,
                     final PerReadAlleleLikelihoodMap alleleLikelihoodMap) {
    if ( ! isAppropriateInput(alleleLikelihoodMap, g) )
        return;

    final int[][] table = FisherStrand.getContingencyTable(Collections.singletonMap(g.getSampleName(), alleleLikelihoodMap), vc, 0);

    gb.attribute(STRAND_BIAS_BY_SAMPLE_KEY_NAME, FisherStrand.getContingencyArray(table));
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:17,代码来源:StrandBiasBySample.java


注:本文中的htsjdk.variant.variantcontext.Genotype类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。