当前位置: 首页>>代码示例>>Java>>正文


Java GenotypesContext类代码示例

本文整理汇总了Java中htsjdk.variant.variantcontext.GenotypesContext的典型用法代码示例。如果您正苦于以下问题:Java GenotypesContext类的具体用法?Java GenotypesContext怎么用?Java GenotypesContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


GenotypesContext类属于htsjdk.variant.variantcontext包,在下文中一共展示了GenotypesContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: subsetDiploidAlleles

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
/**
 * subset the Variant Context to the specific set of alleles passed in (pruning the PLs appropriately)
 *
 * @param vc                 variant context with genotype likelihoods
 * @param allelesToUse       which alleles from the vc are okay to use; *** must be in the same relative order as those in the original VC ***
 * @param assignGenotypes    assignment strategy for the (subsetted) PLs
 * @return a new non-null GenotypesContext
 */
public static GenotypesContext subsetDiploidAlleles(final VariantContext vc,
                                                    final List<Allele> allelesToUse,
                                                    final GenotypeAssignmentMethod assignGenotypes) {
    if ( allelesToUse.get(0).isNonReference() ) throw new IllegalArgumentException("First allele must be the reference allele");
    if ( allelesToUse.size() == 1 ) throw new IllegalArgumentException("Cannot subset to only 1 alt allele");

    // optimization: if no input genotypes, just exit
    if (vc.getGenotypes().isEmpty()) return GenotypesContext.create();

    // we need to determine which of the alternate alleles (and hence the likelihoods) to use and carry forward
    final List<Integer> likelihoodIndexesToUse = determineLikelihoodIndexesToUse(vc, allelesToUse);

    // create the new genotypes
    return createGenotypesWithSubsettedLikelihoods(vc.getGenotypes(), vc, allelesToUse, likelihoodIndexesToUse, assignGenotypes);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:24,代码来源:GATKVariantContextUtils.java

示例2: updatePLsAndAD

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
/**
 * Updates the PLs and AD of the Genotypes in the newly selected VariantContext to reflect the fact that some alleles
 * from the original VariantContext are no longer present.
 *
 * @param selectedVC  the selected (new) VariantContext
 * @param originalVC  the original VariantContext
 * @return a new non-null GenotypesContext
 */
public static GenotypesContext updatePLsAndAD(final VariantContext selectedVC, final VariantContext originalVC) {
    final int numNewAlleles = selectedVC.getAlleles().size();
    final int numOriginalAlleles = originalVC.getAlleles().size();

    // if we have more alternate alleles in the selected VC than in the original VC, then something is wrong
    if ( numNewAlleles > numOriginalAlleles )
        throw new IllegalArgumentException("Attempting to fix PLs and AD from what appears to be a *combined* VCF and not a selected one");

    final GenotypesContext oldGs = selectedVC.getGenotypes();

    // if we have the same number of alternate alleles in the selected VC as in the original VC, then we don't need to fix anything
    if ( numNewAlleles == numOriginalAlleles )
        return oldGs;

    return fixGenotypesFromSubsettedAlleles(oldGs, originalVC, selectedVC.getAlleles());
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:25,代码来源:GATKVariantContextUtils.java

示例3: fixADFromSubsettedAlleles

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
/**
 * Fix the AD for the GenotypesContext of a VariantContext that has been subset
 *
 * @param originalGs       the original GenotypesContext
 * @param originalVC       the original VariantContext
 * @param allelesToUse     the new (sub)set of alleles to use
 * @return a new non-null GenotypesContext
 */
static private GenotypesContext fixADFromSubsettedAlleles(final GenotypesContext originalGs, final VariantContext originalVC, final List<Allele> allelesToUse) {

    // the bitset representing the allele indexes we want to keep
    final boolean[] alleleIndexesToUse = getAlleleIndexBitset(originalVC, allelesToUse);

    // the new genotypes to create
    final GenotypesContext newGTs = GenotypesContext.create(originalGs.size());

    // the samples
    final List<String> sampleIndices = originalGs.getSampleNamesOrderedByName();

    // create the new genotypes
    for ( int k = 0; k < originalGs.size(); k++ ) {
        final Genotype g = originalGs.get(sampleIndices.get(k));
        newGTs.add(fixAD(g, alleleIndexesToUse, allelesToUse.size()));
    }

    return newGTs;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:28,代码来源:GATKVariantContextUtils.java

示例4: mergeGenotypes

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
private static void mergeGenotypes(GenotypesContext mergedGenotypes, VariantContext oneVC, AlleleMapper alleleMapping, boolean uniquifySamples) {
    //TODO: should we add a check for cases when the genotypeMergeOption is REQUIRE_UNIQUE
    for ( final Genotype g : oneVC.getGenotypes() ) {
        final String name = mergedSampleName(oneVC.getSource(), g.getSampleName(), uniquifySamples);
        if ( ! mergedGenotypes.containsSample(name) ) {
            // only add if the name is new
            Genotype newG = g;

            if ( uniquifySamples || alleleMapping.needsRemapping() ) {
                final List<Allele> alleles = alleleMapping.needsRemapping() ? alleleMapping.remap(g.getAlleles()) : g.getAlleles();
                newG = new GenotypeBuilder(g).name(name).alleles(alleles).make();
            }

            mergedGenotypes.add(newG);
        }
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:18,代码来源:GATKVariantContextUtils.java

示例5: pruneVariantContext

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
public static VariantContextBuilder pruneVariantContext(final VariantContextBuilder builder, Collection<String> keysToPreserve ) {
    final VariantContext vc = builder.make();
    if ( keysToPreserve == null ) keysToPreserve = Collections.emptyList();

    // VC info
    final Map<String, Object> attributes = subsetAttributes(vc.getCommonInfo(), keysToPreserve);

    // Genotypes
    final GenotypesContext genotypes = GenotypesContext.create(vc.getNSamples());
    for ( final Genotype g : vc.getGenotypes() ) {
        final GenotypeBuilder gb = new GenotypeBuilder(g);
        // remove AD, DP, PL, and all extended attributes, keeping just GT and GQ
        gb.noAD().noDP().noPL().noAttributes();
        genotypes.add(gb.make());
    }

    return builder.genotypes(genotypes).attributes(attributes);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:19,代码来源:GATKVariantContextUtils.java

示例6: purgeUnallowedGenotypeAttributes

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
public static VariantContext purgeUnallowedGenotypeAttributes(VariantContext vc, Set<String> allowedAttributes) {
    if ( allowedAttributes == null )
        return vc;

    final GenotypesContext newGenotypes = GenotypesContext.create(vc.getNSamples());
    for ( final Genotype genotype : vc.getGenotypes() ) {
        final Map<String, Object> attrs = new HashMap<>();
        for ( final Map.Entry<String, Object> attr : genotype.getExtendedAttributes().entrySet() ) {
            if ( allowedAttributes.contains(attr.getKey()) )
                attrs.put(attr.getKey(), attr.getValue());
        }
        newGenotypes.add(new GenotypeBuilder(genotype).attributes(attrs).make());
    }

    return new VariantContextBuilder(vc).genotypes(newGenotypes).make();
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:17,代码来源:GATKVariantContextUtils.java

示例7: getGLs

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
/**
 * Unpack GenotypesContext into arraylist of doubel values
 * @param GLs            Input genotype context
 * @return               ArrayList of doubles corresponding to GL vectors
 */
protected static ArrayList<double[]> getGLs(final GenotypesContext GLs, final boolean includeDummy) {
    final ArrayList<double[]> genotypeLikelihoods = new ArrayList<>(GLs.size() + 1);

    if ( includeDummy ) genotypeLikelihoods.add(new double[]{0.0,0.0,0.0}); // dummy
    for ( Genotype sample : GLs.iterateInSampleNameOrder() ) {
        if ( sample.hasLikelihoods() ) {
            final double[] gls = sample.getLikelihoods().getAsVector();

            if ( MathUtils.sum(gls) < GATKVariantContextUtils.SUM_GL_THRESH_NOCALL )
                genotypeLikelihoods.add(gls);
        }
    }

    return genotypeLikelihoods;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:21,代码来源:ExactAFCalculator.java

示例8: reduceScopeCalculateLikelihoodSums

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
@Override
protected void reduceScopeCalculateLikelihoodSums(final VariantContext vc, final int defaultPloidy, final LikelihoodSum[] likelihoodSums) {
    final int numOriginalAltAlleles = likelihoodSums.length;
    final GenotypesContext genotypes = vc.getGenotypes();
    for (final Genotype genotype : genotypes.iterateInSampleNameOrder()) {
        if (!genotype.hasPL())
            continue;
        final double[] gls = genotype.getLikelihoods().getAsVector();
        if (MathUtils.sum(gls) >= GATKVariantContextUtils.SUM_GL_THRESH_NOCALL)
            continue;

        final int PLindexOfBestGL = MathUtils.maxElementIndex(gls);

        final double bestToHomRefDiffGL = PLindexOfBestGL == PL_INDEX_OF_HOM_REF ? 0.0 : gls[PLindexOfBestGL] - gls[PL_INDEX_OF_HOM_REF];
        final int declaredPloidy = genotype.getPloidy();
        final int ploidy = declaredPloidy <= 0 ? defaultPloidy : declaredPloidy;

        final int[] acCount = GeneralPloidyGenotypeLikelihoods.getAlleleCountFromPLIndex(1 + numOriginalAltAlleles, ploidy, PLindexOfBestGL);
        // by convention, first count coming from getAlleleCountFromPLIndex comes from reference allele
        for (int k = 1; k < acCount.length; k++)
            if (acCount[k] > 0)
                likelihoodSums[k - 1].sum += acCount[k] * bestToHomRefDiffGL;
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:25,代码来源:GeneralPloidyExactAFCalculator.java

示例9: getInstance

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
/**
 * Returns a AF calculator capable to handle a particular variant-context.
 * @param variantContext the target context build.
 * @param defaultPloidy the assumed ploidy in case that there is no a GT call present to determine it.
 * @return never {@code null}
 */
public AFCalculator getInstance(final VariantContext variantContext, final int defaultPloidy, final int maximumAltAlleles) {
    if (variantContext == null)
        throw new IllegalArgumentException("variant context cannot be null");

    final int sampleCount = variantContext.getNSamples();
    if  (sampleCount == 0)
        return getInstance(defaultPloidy,maximumAltAlleles);

    final GenotypesContext genotypes = variantContext.getGenotypes();

    final Genotype firstGenotype = genotypes.get(0);
    int ploidy = firstGenotype.getPloidy();
    if (ploidy <= 0) ploidy = defaultPloidy;
    for (int i = 1 ; i < sampleCount; i++) {
        final Genotype genotype = genotypes.get(i);
        final int declaredPloidy = genotype.getPloidy();
        final int actualPloidy = declaredPloidy <= 0 ? defaultPloidy : declaredPloidy;
        if (actualPloidy != ploidy) {
            ploidy = AFCalculatorImplementation.UNBOUND_PLOIDY;
            break;
        }
    }
    return getInstance(ploidy,Math.min(variantContext.getNAlleles() - 1, maximumAltAlleles));
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:31,代码来源:AFCalculatorProvider.java

示例10: composeCallAttributes

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
@Override
protected Map<String, Object> composeCallAttributes(final boolean inheritAttributesFromInputVC, final VariantContext vc,
                                                    final AlignmentContext rawContext, final Map<String, AlignmentContext> stratifiedContexts, final RefMetaDataTracker tracker, final ReferenceContext refContext, final List<Integer> alleleCountsofMLE, final boolean bestGuessIsRef,
                                                    final AFCalculationResult AFresult, final List<Allele> allAllelesToUse, final GenotypesContext genotypes,
                                                    final GenotypeLikelihoodsCalculationModel.Model model, final Map<String, PerReadAlleleLikelihoodMap> perReadAlleleLikelihoodMap) {
    final Map<String, Object> result = super.composeCallAttributes(inheritAttributesFromInputVC, vc, rawContext, stratifiedContexts, tracker, refContext, alleleCountsofMLE, bestGuessIsRef,
            AFresult, allAllelesToUse, genotypes, model, perReadAlleleLikelihoodMap);

    final boolean limitedContext = tracker == null || refContext == null || rawContext == null || stratifiedContexts == null;

    if (configuration.COMPUTE_SLOD && !limitedContext && !bestGuessIsRef) {
        final double strandScore = calculateSLOD(stratifiedContexts, tracker, refContext, AFresult, allAllelesToUse, model, perReadAlleleLikelihoodMap);
        if (!Double.isNaN(strandScore))
            result.put("SB", strandScore);
    }
    return result;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:18,代码来源:UnifiedGenotypingEngine.java

示例11: getTableFromSamples

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
/**
 * Create the contingency table by retrieving the per-sample strand bias annotation and adding them together
 *
 * @param genotypes the genotypes from which to pull out the per-sample strand bias annotation
 * @param minCount  minimum threshold for the sample strand bias counts for each ref and alt.
 *                  If both ref and alt counts are above minCount the whole sample strand bias is added to the resulting table
 * @return the table used for several strand bias tests, will be null if none of the genotypes contain the per-sample SB annotation
 */
protected int[][] getTableFromSamples(final GenotypesContext genotypes, final int minCount) {
    if (genotypes == null) {
        throw new IllegalArgumentException("Genotypes cannot be null.");
    }

    final int[] sbArray = {0, 0, 0, 0}; // reference-forward-reverse -by- alternate-forward-reverse
    boolean foundData = false;

    for (final Genotype g : genotypes) {
        if (g.isNoCall() || !g.hasAnyAttribute(StrandBiasBySample.STRAND_BIAS_BY_SAMPLE_KEY_NAME))
            continue;

        foundData = true;
        final ArrayList<Integer> sbbsString = (ArrayList<Integer>) g.getAnyAttribute(StrandBiasBySample.STRAND_BIAS_BY_SAMPLE_KEY_NAME);
        final int[] data = Utils.list2Array(sbbsString);
        if (passesMinimumThreshold(data, minCount)) {
            for (int index = 0; index < sbArray.length; index++) {
                sbArray[index] += data[index];
            }
        }
    }

    return (foundData ? decodeSBBS(sbArray) : null);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:33,代码来源:StrandBiasTest.java

示例12: annotateGenotypes

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
private GenotypesContext annotateGenotypes(final RefMetaDataTracker tracker,
                                           final ReferenceContext ref, final Map<String, AlignmentContext> stratifiedContexts,
                                           final VariantContext vc,
                                           final Map<String, PerReadAlleleLikelihoodMap> stratifiedPerReadAlleleLikelihoodMap) {
    if (requestedGenotypeAnnotations.isEmpty())
        return vc.getGenotypes();

    final GenotypesContext genotypes = GenotypesContext.create(vc.getNSamples());
    for (final Genotype genotype : vc.getGenotypes()) {
        AlignmentContext context = null;
        PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap = null;
        if (stratifiedContexts != null)
            context = stratifiedContexts.get(genotype.getSampleName());
        if (stratifiedPerReadAlleleLikelihoodMap != null)
            perReadAlleleLikelihoodMap = stratifiedPerReadAlleleLikelihoodMap.get(genotype.getSampleName());


        final GenotypeBuilder gb = new GenotypeBuilder(genotype);
        for (final GenotypeAnnotation annotation : requestedGenotypeAnnotations) {
            annotation.annotate(tracker, walker, ref, context, vc, genotype, gb, perReadAlleleLikelihoodMap);
        }
        genotypes.add(gb.make());
    }

    return genotypes;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:27,代码来源:VariantAnnotatorEngine.java

示例13: nextKeyValue

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
@Override
public boolean nextKeyValue() throws IOException {
	if (!it.hasNext() || it.getPosition() >= length)
		return false;

	final String line = it.next();
	final VariantContext v = codec.decode(line);
	
	VariantContext vtemp = codec.decode(line);
	
	GenotypesContext gc = vtemp.getGenotypes();
	if (gc instanceof LazyParsingGenotypesContext){
		((LazyParsingGenotypesContext)gc).getParser().setHeaderDataCache(vcfHeaderDataCache);
	}
	
	CommonInfo info = v.getCommonInfo();
	info.putAttribute("SM",vtemp.getSampleNamesOrderedByName().get(0));

	Integer chromIdx = contigDict.get(v.getContig());
	if (chromIdx == null)
		chromIdx = (int) MurmurHash3.murmurhash3(v.getContig(), 0);

	key.set((long) chromIdx << 32 | (long) (v.getStart() - 1));
	vc.set(v);
	return true;
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:27,代码来源:JointCallingVCFRecordReader.java

示例14: annotate

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
@Override
public Map<String, Object> annotate(RefMetaDataTracker tracker, ChromosomeInformationShare ref, VariantContext vc) {
	final GenotypesContext genotypes = vc.getGenotypes();
       if (genotypes == null || genotypes.isEmpty())
           return null;

       final List<Double> refQuals = new ArrayList<>();
       final List<Double> altQuals = new ArrayList<>();

       if ( refQuals.isEmpty() && altQuals.isEmpty() )
           return null;

       final MannWhitneyU mannWhitneyU = new MannWhitneyU();
       
       // we are testing that set1 (the alt bases) have lower quality scores than set2 (the ref bases)
       final MannWhitneyU.Result result = mannWhitneyU.test(convertToArray(altQuals), convertToArray(refQuals), MannWhitneyU.TestType.FIRST_DOMINATES);
       final double zScore = result.getZ();


       final Map<String, Object> map = new HashMap<>();
       if (!Double.isNaN(zScore))
           map.put(getKeyNames().get(0), String.format("%.3f", zScore));
       return map;
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:25,代码来源:RankSumTest.java

示例15: calculateEH

import htsjdk.variant.variantcontext.GenotypesContext; //导入依赖的package包/类
protected double calculateEH(final VariantContext vc, final GenotypesContext genotypes) {
    HeterozygosityUtils heterozygosityUtils = new HeterozygosityUtils(RETURN_ROUNDED);
    final double[] genotypeCountsDoubles = heterozygosityUtils.getGenotypeCountsForRefVsAllAlts(vc, genotypes);
    sampleCount = heterozygosityUtils.getSampleCount();
    final int[] genotypeCounts = new int[genotypeCountsDoubles.length];
    for(int i = 0; i < genotypeCountsDoubles.length; i++) {
        genotypeCounts[i] = (int)genotypeCountsDoubles[i];
    }

    double pval = exactTest(genotypeCounts);

    //If the actual phredPval would be infinity we will probably still filter out just a very large number
    if (pval == 0) {
    	return -10.0 * Math.log10(minNeededValue);
    }
    double phredPval = -10.0 * Math.log10(pval);

    return phredPval;
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:20,代码来源:ExcessHet.java


注:本文中的htsjdk.variant.variantcontext.GenotypesContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。