当前位置: 首页>>代码示例>>Java>>正文


Java GenotypeBuilder.DP属性代码示例

本文整理汇总了Java中htsjdk.variant.variantcontext.GenotypeBuilder.DP属性的典型用法代码示例。如果您正苦于以下问题:Java GenotypeBuilder.DP属性的具体用法?Java GenotypeBuilder.DP怎么用?Java GenotypeBuilder.DP使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在htsjdk.variant.variantcontext.GenotypeBuilder的用法示例。


在下文中一共展示了GenotypeBuilder.DP属性的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: blockToVCF

/**
 * Convert a HomRefBlock into a VariantContext
 *
 * @param block the block to convert
 * @return a VariantContext representing the gVCF encoding for this block.
 * It will return {@code null} if input {@code block} is {@code null}, indicating that there
 * is no variant-context to be output into the VCF.
 */
private VariantContext blockToVCF(final HomRefBlock block) {
    if ( block == null ) return null;

    final VariantContextBuilder vcb = new VariantContextBuilder(block.getStartingVC());
    vcb.attributes(new HashMap<String, Object>(2)); // clear the attributes
    vcb.stop(block.getStop());
    vcb.attribute(VCFConstants.END_KEY, block.getStop());

    // create the single Genotype with GQ and DP annotations
    final GenotypeBuilder gb = new GenotypeBuilder(sampleName, GATKVariantContextUtils.homozygousAlleleList(block.getRef(),block.getPloidy()));
    gb.noAD().noPL().noAttributes(); // clear all attributes
    gb.GQ(block.getMedianGQ());
    gb.DP(block.getMedianDP());
    gb.attribute(MIN_DP_FORMAT_FIELD, block.getMinDP());
    gb.PL(block.getMinPLs());

    // This annotation is no longer standard
    //gb.attribute(MIN_GQ_FORMAT_FIELD, block.getMinGQ());

    return vcb.genotypes(gb.make()).make();
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:29,代码来源:GVCFWriter.java

示例2: subsetToRefOnly

/**
 * Subset the samples in VC to reference only information with ref call alleles
 *
 * Preserves DP if present
 *
 * @param vc the variant context to subset down to
 * @param ploidy ploidy to use if a genotype doesn't have any alleles
 * @return a GenotypesContext
 */
public static GenotypesContext subsetToRefOnly(final VariantContext vc, final int ploidy) {
    if ( vc == null ) throw new IllegalArgumentException("vc cannot be null");
    if ( ploidy < 1 ) throw new IllegalArgumentException("ploidy must be >= 1 but got " + ploidy);

    // the genotypes with PLs
    final GenotypesContext oldGTs = vc.getGenotypes();

    // optimization: if no input genotypes, just exit
    if (oldGTs.isEmpty()) return oldGTs;

    // the new genotypes to create
    final GenotypesContext newGTs = GenotypesContext.create(oldGTs.size());

    final Allele ref = vc.getReference();
    final List<Allele> diploidRefAlleles = Arrays.asList(ref, ref);

    // create the new genotypes
    for ( final Genotype g : vc.getGenotypes() ) {
        final int gPloidy = g.getPloidy() == 0 ? ploidy : g.getPloidy();
        final List<Allele> refAlleles = gPloidy == 2 ? diploidRefAlleles : Collections.nCopies(gPloidy, ref);
        final GenotypeBuilder gb = new GenotypeBuilder(g.getSampleName(), refAlleles);
        if ( g.hasDP() ) gb.DP(g.getDP());
        if ( g.hasGQ() ) gb.GQ(g.getGQ());
        newGTs.add(gb.make());
    }

    return newGTs;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:37,代码来源:GATKVariantContextUtils.java

示例3: annotate

public void annotate(final RefMetaDataTracker tracker,
                     final AnnotatorCompatible walker,
                     final ReferenceContext ref,
                     final AlignmentContext stratifiedContext,
                     final VariantContext vc,
                     final Genotype g,
                     final GenotypeBuilder gb,
                     final PerReadAlleleLikelihoodMap alleleLikelihoodMap) {
    if (g == null || !g.isCalled() || (stratifiedContext == null && alleleLikelihoodMap == null))
        return;

    if (alleleLikelihoodMap == null)
        throw new IllegalStateException("DepthPerSampleHC can only be used with likelihood based annotations in the HaplotypeCaller");

    // the depth for the HC is the sum of the informative alleles at this site.  It's not perfect (as we cannot
    // differentiate between reads that align over the event but aren't informative vs. those that aren't even
    // close) but it's a pretty good proxy and it matches with the AD field (i.e., sum(AD) = DP).
    int dp = 0;

    if (alleleLikelihoodMap.isEmpty()) {
        // there are no reads
    } else {
        final Set<Allele> alleles = new HashSet<>(vc.getAlleles());

        // make sure that there's a meaningful relationship between the alleles in the perReadAlleleLikelihoodMap and our VariantContext
        if (!alleleLikelihoodMap.getAllelesSet().containsAll(alleles))
            throw new IllegalStateException("VC alleles " + alleles + " not a strict subset of per read allele map alleles " + alleleLikelihoodMap.getAllelesSet());

        for (Map.Entry<GATKSAMRecord, Map<Allele, Double>> el : alleleLikelihoodMap.getLikelihoodReadMap().entrySet()) {
            final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue(), alleles);
            if (a.isInformative()) {
                dp++;
            }
        }

        gb.DP(dp);
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:38,代码来源:DepthPerSampleHC.java

示例4: subsetToRefOnly

/**
 * Subset the samples in VC to reference only information with ref call
 * alleles
 *
 * Preserves DP if present
 *
 * @param vc
 *            the variant context to subset down to
 * @param ploidy
 *            ploidy to use if a genotype doesn't have any alleles
 * @return a GenotypesContext
 */
public static GenotypesContext subsetToRefOnly(final VariantContext vc, final int ploidy) {
	if (vc == null)
		throw new IllegalArgumentException("vc cannot be null");
	if (ploidy < 1)
		throw new IllegalArgumentException("ploidy must be >= 1 but got " + ploidy);

	// the genotypes with PLs
	final GenotypesContext oldGTs = vc.getGenotypes();

	// optimization: if no input genotypes, just exit
	if (oldGTs.isEmpty())
		return oldGTs;

	// the new genotypes to create
	final GenotypesContext newGTs = GenotypesContext.create(oldGTs.size());

	final Allele ref = vc.getReference();
	final List<Allele> diploidRefAlleles = Arrays.asList(ref, ref);

	// create the new genotypes
	for (final Genotype g : vc.getGenotypes()) {
		final int gPloidy = g.getPloidy() == 0 ? ploidy : g.getPloidy();
		final List<Allele> refAlleles = Collections.nCopies(gPloidy, vc.getReference());
		final GenotypeBuilder gb = new GenotypeBuilder(g.getSampleName(), refAlleles);
		if (g.hasDP())
			gb.DP(g.getDP());
		if (g.hasGQ())
			gb.GQ(g.getGQ());
		newGTs.add(gb.make());
	}

	return newGTs;
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:45,代码来源:GaeaGvcfVariantContextUtils.java

示例5: createHomRefGenotype

private Genotype createHomRefGenotype(String sampleName) {
    final GenotypeBuilder gb = new GenotypeBuilder(sampleName, Collections.nCopies(getPloidy(), getRef()));
    gb.noAD().noPL().noAttributes(); // clear all attributes

    final int[] minPLs = getMinPLs();
    gb.PL(minPLs);
    gb.GQ(GATKVariantContextUtils.calculateGQFromPLs(minPLs));
    gb.DP(getMedianDP());
    gb.attribute(GATKVCFConstants.MIN_DP_FORMAT_KEY, getMinDP());

    return gb.make();
}
 
开发者ID:broadinstitute,项目名称:gatk,代码行数:12,代码来源:HomRefBlock.java

示例6: annotate

@Override
public void annotate( final ReferenceContext ref,
                      final VariantContext vc,
                      final Genotype g,
                      final GenotypeBuilder gb,
                      final ReadLikelihoods<Allele> likelihoods ) {
    Utils.nonNull(vc);
    Utils.nonNull(g);
    Utils.nonNull(gb);

    if ( likelihoods == null || !g.isCalled() ) {
        logger.warn("Annotation will not be calculated, genotype is not called or alleleLikelihoodMap is null");
        return;
    }

    // check that there are reads
    final String sample = g.getSampleName();
    if (likelihoods.sampleReadCount(likelihoods.indexOfSample(sample)) == 0) {
        gb.DP(0);
        return;
    }

    final Set<Allele> alleles = new LinkedHashSet<>(vc.getAlleles());

    // make sure that there's a meaningful relationship between the alleles in the likelihoods and our VariantContext
    if ( !likelihoods.alleles().containsAll(alleles) ) {
        logger.warn("VC alleles " + alleles + " not a strict subset of ReadLikelihoods alleles " + likelihoods.alleles());
        return;
    }

    // the depth for the HC is the sum of the informative alleles at this site.  It's not perfect (as we cannot
    // differentiate between reads that align over the event but aren't informative vs. those that aren't even
    // close) but it's a pretty good proxy and it matches with the AD field (i.e., sum(AD) = DP).
    final Map<Allele, List<Allele>> alleleSubset = alleles.stream().collect(Collectors.toMap(a -> a, a -> Arrays.asList(a)));
    final ReadLikelihoods<Allele> subsettedLikelihoods = likelihoods.marginalize(alleleSubset);
    final int depth = (int) subsettedLikelihoods.bestAlleles(sample).stream().filter(ba -> ba.isInformative()).count();
    gb.DP(depth);
}
 
开发者ID:broadinstitute,项目名称:gatk,代码行数:38,代码来源:DepthPerSampleHC.java

示例7: makeGenotypes

private List<Genotype> makeGenotypes(
	 final VariantContext ctx,
	 final List<String> sample_names,
	 final Allele theAllele,
	 final Allele replaceWith
	 )
		{
	final List<Genotype> genotypes=new ArrayList<>(sample_names.size());
	
	for(final String sampleName: sample_names)
		{							
		final Genotype g= ctx.getGenotype(sampleName);
		
		if( !disableHomVarAlt &&
			g.isCalled() && 
			!g.getAlleles().stream().
			filter(A->!(A.isNoCall() || A.isReference() || A.equals(theAllele) || A.equals(replaceWith))).
			collect(Collectors.toSet()).
			isEmpty() // only contains the 'other alleles'
			)
			{
			genotypes.add(GenotypeBuilder.createMissing(sampleName, g.getPloidy()));
			continue;
			}
		
		
		final GenotypeBuilder gb =new GenotypeBuilder(
				g.getSampleName(),
					g.getAlleles().stream().
					map(A->(A.isNoCall() || A.isReference() || A.equals(theAllele)?A:replaceWith)).
					collect(Collectors.toList())
				);
		if(g.hasDP()) gb.DP(g.getDP());
		if(g.hasGQ()) gb.GQ(g.getGQ());
		if(g.isFiltered()) gb.filter(g.getFilters());

		genotypes.add(gb.make());
		}
return genotypes;
	}
 
开发者ID:lindenb,项目名称:jvarkit,代码行数:40,代码来源:VcfMultiToOneAllele.java

示例8: entryToObject

@Override
public Genotype entryToObject(TupleInput in)
	{
	GenotypeBuilder gb=new GenotypeBuilder(in.readString());
	if(in.readBoolean()) gb.DP(in.readInt());
	if(in.readBoolean()) gb.AD(arrayOfIntToEntry(in));
	if(in.readBoolean()) gb.GQ(in.readInt());
	if(in.readBoolean()) gb.PL(arrayOfIntToEntry(in));
	
	/* ALLELES ======================================== */
	int n=in.readInt();
	List<Allele> alleles=new ArrayList<Allele>(n);
	for(int i=0;i< n;++i)
		{
		alleles.add(this.alleleBinding.entryToObject(in));
		}
	gb.alleles(alleles);
	/* ATTRIBUTES ===================================== */
	n=in.readInt();
	for(int i=0;i< n;++i)
		{
		String key=in.readString();
		gb.attribute(key, super.readAttribute(in));
		}
	 
	return gb.make();
	}
 
开发者ID:lindenb,项目名称:jvarkit,代码行数:27,代码来源:GenotypeBinding.java

示例9: cleanupGenotypeAnnotations

/**
 * Cleans up genotype-level annotations that need to be updated.
 * 1. move MIN_DP to DP if present
 * 2. propagate DP to AD if not present
 * 3. remove SB if present
 * 4. change the PGT value from "0|1" to "1|1" for homozygous variant genotypes
 *
 * @param VC            the VariantContext with the Genotypes to fix
 * @param createRefGTs  if true we will also create proper hom ref genotypes since we assume the site is monomorphic
 * @return a new set of Genotypes
 */
private List<Genotype> cleanupGenotypeAnnotations(final VariantContext VC, final boolean createRefGTs) {
    final GenotypesContext oldGTs = VC.getGenotypes();
    final List<Genotype> recoveredGs = new ArrayList<>(oldGTs.size());
    for ( final Genotype oldGT : oldGTs ) {
        final Map<String, Object> attrs = new HashMap<>(oldGT.getExtendedAttributes());

        final GenotypeBuilder builder = new GenotypeBuilder(oldGT);
        int depth = oldGT.hasDP() ? oldGT.getDP() : 0;

        // move the MIN_DP to DP
        if ( oldGT.hasExtendedAttribute("MIN_DP") ) {
            depth = Integer.parseInt((String)oldGT.getAnyAttribute("MIN_DP"));
            builder.DP(depth);
            attrs.remove("MIN_DP");
        }

        // remove SB
        attrs.remove("SB");

        // update PGT for hom vars
        if ( oldGT.isHomVar() && oldGT.hasExtendedAttribute(HaplotypeCaller.HAPLOTYPE_CALLER_PHASING_GT_KEY) ) {
            attrs.put(HaplotypeCaller.HAPLOTYPE_CALLER_PHASING_GT_KEY, "1|1");
        }

        // create AD if it's not there
        if ( !oldGT.hasAD() && VC.isVariant() ) {
            final int[] AD = new int[VC.getNAlleles()];
            AD[0] = depth;
            builder.AD(AD);
        }

        if ( createRefGTs ) {
            final int ploidy = oldGT.getPloidy();
            final List<Allele> refAlleles = Collections.nCopies(ploidy,VC.getReference());

            //keep 0 depth samples as no-call
            if (depth > 0) {
                builder.alleles(refAlleles);
            }

            // also, the PLs are technically no longer usable
            builder.noPL();
        }

        recoveredGs.add(builder.noAttributes().attributes(attrs).make());
    }
    return recoveredGs;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:59,代码来源:GenotypeGVCFs.java

示例10: getLikelihoods

public VariantContext getLikelihoods(final RefMetaDataTracker tracker,
                                         final ReferenceContext ref,
                                         final Map<String, AlignmentContext> contexts,
                                         final AlignmentContextUtils.ReadOrientation contextType,
                                         final List<Allele> allAllelesToUse,
                                         final boolean useBAQedPileup,
                                         final GenomeLocParser locParser,
                                         final Map<String, PerReadAlleleLikelihoodMap> perReadAlleleLikelihoodMap) {

        GenomeLoc loc = ref.getLocus();
//        if (!ref.getLocus().equals(lastSiteVisited)) {
        if (contextType == AlignmentContextUtils.ReadOrientation.COMPLETE) {
            // starting a new site: clear allele list
            haplotypeMap.clear();
            perReadAlleleLikelihoodMap.clear(); // clean mapping sample-> per read, per allele likelihoods
            alleleList = getInitialAlleleList(tracker, ref, contexts, contextType, UAC, ignoreSNPAllelesWhenGenotypingIndels);
            if (alleleList.isEmpty())
                return null;
        }

        getHaplotypeMapFromAlleles(alleleList, ref, loc, haplotypeMap); // will update haplotypeMap adding elements
        if (haplotypeMap == null || haplotypeMap.isEmpty())
            return null;

        // start making the VariantContext
        // For all non-snp VC types, VC end location is just startLocation + length of ref allele including padding base.
        final int endLoc = loc.getStart() + alleleList.get(0).length() - 1;
        final int eventLength = getEventLength(alleleList);

        final VariantContextBuilder builder = new VariantContextBuilder("UG_call", loc.getContig(), loc.getStart(), endLoc, alleleList);

        // create the genotypes; no-call everyone for now
        GenotypesContext genotypes = GenotypesContext.create();
        final int ploidy = UAC.genotypeArgs.samplePloidy;
        final List<Allele> noCall = GATKVariantContextUtils.noCallAlleles(ploidy);

        // For each sample, get genotype likelihoods based on pileup
        // compute prior likelihoods on haplotypes, and initialize haplotype likelihood matrix with them.

        for (Map.Entry<String, AlignmentContext> sample : contexts.entrySet()) {
            AlignmentContext context = AlignmentContextUtils.stratify(sample.getValue(), contextType);

            if (!perReadAlleleLikelihoodMap.containsKey(sample.getKey())){
                // no likelihoods have been computed for this sample at this site
                perReadAlleleLikelihoodMap.put(sample.getKey(), new PerReadAlleleLikelihoodMap());
            }
            final ReadBackedPileup pileup = context.getBasePileup();
            if (pileup != null) {
                final GenotypeBuilder b = new GenotypeBuilder(sample.getKey());
                final double[] genotypeLikelihoods = pairModel.computeDiploidReadHaplotypeLikelihoods(pileup, haplotypeMap, ref, eventLength, perReadAlleleLikelihoodMap.get(sample.getKey()), UAC.getSampleContamination().get(sample.getKey()));
                b.PL(genotypeLikelihoods);
                b.alleles(noCall);
                b.DP(getFilteredDepth(pileup));
                genotypes.add(b.make());

                if (DEBUG) {
                    System.out.format("Sample:%s Alleles:%s GL:", sample.getKey(), alleleList.toString());
                    for (int k = 0; k < genotypeLikelihoods.length; k++)
                        System.out.format("%1.4f ", genotypeLikelihoods[k]);
                    System.out.println();
                }
            }
        }

        return builder.genotypes(genotypes).make();
    }
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:66,代码来源:IndelGenotypeLikelihoodsCalculationModel.java

示例11: cleanupGenotypeAnnotations

private List<Genotype> cleanupGenotypeAnnotations(final VariantContext VC, final boolean createRefGTs) {
	final GenotypesContext oldGTs = VC.getGenotypes();
	final List<Genotype> recoveredGs = new ArrayList<>(oldGTs.size());
	for (final Genotype oldGT : oldGTs) {
		final Map<String, Object> attrs = new HashMap<>(oldGT.getExtendedAttributes());

		final GenotypeBuilder builder = new GenotypeBuilder(oldGT);
		int depth = oldGT.hasDP() ? oldGT.getDP() : 0;

		// move the MIN_DP to DP
		if (oldGT.hasExtendedAttribute(GaeaVCFConstants.MIN_DP_FORMAT_KEY)) {
			depth = Integer.parseInt((String) oldGT.getAnyAttribute(GaeaVCFConstants.MIN_DP_FORMAT_KEY));
			builder.DP(depth);
			attrs.remove(GaeaVCFConstants.MIN_DP_FORMAT_KEY);
		}

		// move the GQ to RGQ
		if (createRefGTs && oldGT.hasGQ()) {
			builder.noGQ();
			attrs.put(GaeaVCFConstants.REFERENCE_GENOTYPE_QUALITY, oldGT.getGQ());
		}

		// remove SB
		attrs.remove(GaeaVCFConstants.STRAND_BIAS_BY_SAMPLE_KEY);

		// update PGT for hom vars
		if (oldGT.isHomVar() && oldGT.hasExtendedAttribute(GaeaVCFConstants.HAPLOTYPE_CALLER_PHASING_GT_KEY)) {
			attrs.put(GaeaVCFConstants.HAPLOTYPE_CALLER_PHASING_GT_KEY, "1|1");
		}

		// create AD if it's not there
		if (!oldGT.hasAD() && VC.isVariant()) {
			final int[] AD = new int[VC.getNAlleles()];
			AD[0] = depth;
			builder.AD(AD);
		}

		if (createRefGTs) {
			final int ploidy = oldGT.getPloidy();
			final List<Allele> refAlleles = Collections.nCopies(ploidy, VC.getReference());

			// keep 0 depth samples and 0 GQ samples as no-call
			if (depth > 0 && oldGT.hasGQ() && oldGT.getGQ() > 0) {
				builder.alleles(refAlleles);
			}

			// also, the PLs are technically no longer usable
			builder.noPL();
		}

		recoveredGs.add(builder.noAttributes().attributes(attrs).make());
	}
	return recoveredGs;
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:54,代码来源:JointCallingEngine.java


注:本文中的htsjdk.variant.variantcontext.GenotypeBuilder.DP属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。