当前位置: 首页>>代码示例>>Java>>正文


Java Allele类代码示例

本文整理汇总了Java中htsjdk.variant.variantcontext.Allele的典型用法代码示例。如果您正苦于以下问题:Java Allele类的具体用法?Java Allele怎么用?Java Allele使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Allele类属于htsjdk.variant.variantcontext包,在下文中一共展示了Allele类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fillQualsFromLikelihoodMap

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
private void fillQualsFromLikelihoodMap(final List<Allele> alleles,
                                        final int refLoc,
                                        final PerReadAlleleLikelihoodMap likelihoodMap,
                                        final List<Double> refQuals,
                                        final List<Double> altQuals) {
    for ( final Map.Entry<GATKSAMRecord, Map<Allele,Double>> el : likelihoodMap.getLikelihoodReadMap().entrySet() ) {
        final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue());
        if ( ! a.isInformative() )
            continue; // read is non-informative

        final GATKSAMRecord read = el.getKey();
        if ( isUsableRead(read, refLoc) ) {
            final Double value = getElementForRead(read, refLoc, a);
            if ( value == null )
                continue;

            if ( a.getMostLikelyAllele().isReference() )
                refQuals.add(value);
            else if ( alleles.contains(a.getMostLikelyAllele()) )
                altQuals.add(value);
        }
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:24,代码来源:RankSumTest.java

示例2: performPerAlleleDownsampling

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
/**
 * For each allele "a" , identify those reads whose most likely allele is "a", and remove a "downsamplingFraction" proportion
 * of those reads from the "likelihoodReadMap". This is used for e.g. sample contamination
 * @param downsamplingFraction - the fraction of supporting reads to remove from each allele. If <=0 all reads kept, if >=1 all reads tossed.
 */
public void performPerAlleleDownsampling(final double downsamplingFraction) {
    // special case removal of all or no reads
    if ( downsamplingFraction <= 0.0 )
        return;
    if ( downsamplingFraction >= 1.0 ) {
        likelihoodReadMap.clear();
        return;
    }

    // start by stratifying the reads by the alleles they represent at this position
    final Map<Allele, List<GATKSAMRecord>> alleleReadMap = getAlleleStratifiedReadMap();

    // compute the reads to remove and actually remove them
    final List<GATKSAMRecord> readsToRemove = AlleleBiasedDownsamplingUtils.selectAlleleBiasedReads(alleleReadMap, downsamplingFraction);
    for ( final GATKSAMRecord read : readsToRemove )
        likelihoodReadMap.remove(read);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:23,代码来源:PerReadAlleleLikelihoodMap.java

示例3: annotateWithPileup

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
private void annotateWithPileup(final AlignmentContext stratifiedContext, final VariantContext vc, final GenotypeBuilder gb) {

        final HashMap<Byte, Integer> alleleCounts = new HashMap<>();
        for ( final Allele allele : vc.getAlleles() )
            alleleCounts.put(allele.getBases()[0], 0);

        final ReadBackedPileup pileup = stratifiedContext.getBasePileup();
        for ( final PileupElement p : pileup ) {
            if ( alleleCounts.containsKey(p.getBase()) )
                alleleCounts.put(p.getBase(), alleleCounts.get(p.getBase())+1);
        }

        // we need to add counts in the correct order
        final int[] counts = new int[alleleCounts.size()];
        counts[0] = alleleCounts.get(vc.getReference().getBases()[0]);
        for (int i = 0; i < vc.getAlternateAlleles().size(); i++)
            counts[i+1] = alleleCounts.get(vc.getAlternateAllele(i).getBases()[0]);

        gb.AD(counts);
    }
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:21,代码来源:DepthPerAlleleBySample.java

示例4: getBDNVariationType

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
private static VariationType getBDNVariationType(Variation variation, VariantContext context,
        Allele alt) {
    VariationType type = null;
    for (Pattern pattern : BIND_PATTERNS) {
        Matcher matcher = pattern.matcher(alt.getDisplayString());
        if (matcher.matches()) {
            type = VariationType.BND;
            if (context.getAttribute(BIND_CIPOS_ATTRIBUTE) != null) {
                variation.setBindInfo(new HashMap<>());
                variation.getBindInfo().put(BIND_CIPOS_ATTRIBUTE, context.getAttribute(BIND_CIPOS_ATTRIBUTE));
            }
            return type;
        }
    }
    return type;
}
 
开发者ID:react-dev26,项目名称:NGB-master,代码行数:17,代码来源:VcfFileReader.java

示例5: fixADFromSubsettedAlleles

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
/**
 * Fix the AD for the GenotypesContext of a VariantContext that has been subset
 *
 * @param originalGs       the original GenotypesContext
 * @param originalVC       the original VariantContext
 * @param allelesToUse     the new (sub)set of alleles to use
 * @return a new non-null GenotypesContext
 */
static private GenotypesContext fixADFromSubsettedAlleles(final GenotypesContext originalGs, final VariantContext originalVC, final List<Allele> allelesToUse) {

    // the bitset representing the allele indexes we want to keep
    final boolean[] alleleIndexesToUse = getAlleleIndexBitset(originalVC, allelesToUse);

    // the new genotypes to create
    final GenotypesContext newGTs = GenotypesContext.create(originalGs.size());

    // the samples
    final List<String> sampleIndices = originalGs.getSampleNamesOrderedByName();

    // create the new genotypes
    for ( int k = 0; k < originalGs.size(); k++ ) {
        final Genotype g = originalGs.get(sampleIndices.get(k));
        newGTs.add(fixAD(g, alleleIndexesToUse, allelesToUse.size()));
    }

    return newGTs;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:28,代码来源:GATKVariantContextUtils.java

示例6: calculateOutputAlleleSubset

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
/**
 * Provided the exact mode computations it returns the appropiate subset of alleles that progress to genotyping.
 * @param afcr the exact model calcualtion result.
 * @return never {@code null}.
 */
private OutputAlleleSubset calculateOutputAlleleSubset(final AFCalculationResult afcr) {
    final List<Allele> alleles = afcr.getAllelesUsedInGenotyping();

    final int alternativeAlleleCount = alleles.size() - 1;
    Allele[] outputAlleles = new Allele[alternativeAlleleCount];
    int[] mleCounts = new int[alternativeAlleleCount];
    int outputAlleleCount = 0;
    boolean siteIsMonomorphic = true;
    for (final Allele alternativeAllele : alleles) {
        if (alternativeAllele.isReference()) continue;
        final boolean isPlausible = afcr.isPolymorphicPhredScaledQual(alternativeAllele, configuration.genotypeArgs.STANDARD_CONFIDENCE_FOR_EMITTING);
        final boolean toOutput = isPlausible || forceKeepAllele(alternativeAllele);

        siteIsMonomorphic &= ! isPlausible;
        if (!toOutput) continue;
        outputAlleles[outputAlleleCount] = alternativeAllele;
        mleCounts[outputAlleleCount++] = afcr.getAlleleCountAtMLE(alternativeAllele);
    }

    return new OutputAlleleSubset(outputAlleleCount,outputAlleles,mleCounts,siteIsMonomorphic);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:27,代码来源:GenotypingEngine.java

示例7: parseBNFDInfo

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
private void parseBNFDInfo(Variation variation, VariantContext context, Integer sampleIndex,
        VcfFile vcfFile) {
    if (variation.getBindInfo() == null) {
        variation.setBindInfo(new HashMap<>());
    }
    Allele alt = context.getAlternateAllele(sampleIndex != null ? sampleIndex : 0);

    for (Pattern pattern : BIND_PATTERNS) {
        Matcher matcher = pattern.matcher(alt.getDisplayString());
        if (matcher.matches()) {
            String chrName = matcher.group(1);
            Optional<Chromosome> chromosome = referenceGenomeManager.loadChromosomes(vcfFile.getReferenceId())
                    .stream()
                    .filter(c -> c.getName().equals(chrName) ||
                            c.getName().equals(Utils.changeChromosomeName(chrName)))
                    .findAny();

            variation.getBindInfo().put(BIND_CHR_ATTRIBUTE, chromosome.isPresent() ?
                    chromosome.get().getId() : chrName);
            variation.getBindInfo().put(BIND_POS_ATTRIBUTE, matcher.group(2));
            break;
        }
    }
}
 
开发者ID:react-dev26,项目名称:NGB-master,代码行数:25,代码来源:VcfFileReader.java

示例8: genotypeLikelihoods

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
/**
 * Calculate the likelihoods given the list of alleles and the likelihood map.
 *
 * <p>This operation is <b>thread-unsafe</b>.</p>
 *
 * @param likelihoods the likelihood matrix all alleles vs all reads.
 *
 * @throws IllegalArgumentException if {@code alleleList} is {@code null} or {@code likelihoods} is {@code null}
 *     or the alleleList size does not match the allele-count of this calculator, or there are missing allele vs
 *     read combinations in {@code likelihoods}.
 *
 * @return never {@code null}.
 */
public <A extends Allele> GenotypeLikelihoods genotypeLikelihoods(final ReadLikelihoods.Matrix<A> likelihoods) {
    if (likelihoods == null)
        throw new IllegalArgumentException("the likelihood map cannot be null");

    if (likelihoods.alleleCount() != alleleCount)
        throw new IllegalArgumentException("mismatch between allele list and alleleCount");


    final int readCount = likelihoods.readCount();


    ensureReadCapacity(readCount);

    /// [x][y][z] = z * LnLk(Read_x | Allele_y)
    final double[] readLikelihoodComponentsByAlleleCount
            = readLikelihoodComponentsByAlleleCount(likelihoods);
    final double[][] genotypeLikelihoodByRead = genotypeLikelihoodByRead(readLikelihoodComponentsByAlleleCount,readCount);
    final double[] readLikelihoodsByGenotypeIndex = genotypeLikelihoods(genotypeLikelihoodByRead, readCount);
    return GenotypeLikelihoods.fromLog10Likelihoods(readLikelihoodsByGenotypeIndex);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:34,代码来源:GenotypeLikelihoodCalculator.java

示例9: determineVariationType

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
private void determineVariationType(final VariantGA4GH context, final Variation variation,
                                    final List<Allele> alleles) {
    final VariantContext.Type type = setTypeVCFforGA4GH(alleles); // Determine VariationType
    switch (type) {
        case SNP:
            variation.setType(VariationType.SNV);
            break;
        case INDEL:
        case MIXED:
            variation.setType(determineInDel(context, alleles));
            break;
        case SYMBOLIC:
            parseSymbolicVariation(variation, alleles);
            break;
        case MNP:
            variation.setType(VariationType.MNP);
            break;
        default:
            variation.setType(null);
            if (variation.getGenotypeData() == null) {
                variation.setGenotypeData(new GenotypeData());
            }
            variation.getGenotypeData().setOrganismType(OrganismType.NO_VARIATION);
    }
}
 
开发者ID:react-dev26,项目名称:NGB-master,代码行数:26,代码来源:VcfGa4ghReader.java

示例10: getHaplotypeMapFromAlleles

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
public static void getHaplotypeMapFromAlleles(final List<Allele> alleleList,
                                              final ReferenceContext ref,
                                              final GenomeLoc loc,
                                              final LinkedHashMap<Allele, Haplotype> haplotypeMap) {
    // protect against having an indel too close to the edge of a contig
    if (loc.getStart() <= HAPLOTYPE_SIZE)
        haplotypeMap.clear();
        // check if there is enough reference window to create haplotypes (can be an issue at end of contigs)
    else if (ref.getWindow().getStop() < loc.getStop() + HAPLOTYPE_SIZE)
        haplotypeMap.clear();
    else if (alleleList.isEmpty())
        haplotypeMap.clear();
    else {
        final int eventLength = getEventLength(alleleList);
        final int hsize = ref.getWindow().size() - Math.abs(eventLength) - 1;
        final int numPrefBases = ref.getLocus().getStart() - ref.getWindow().getStart() + 1;

        if (hsize <= 0)  // protect against event lengths larger than ref window sizes
            haplotypeMap.clear();
        else
            haplotypeMap.putAll(Haplotype.makeHaplotypeListFromAlleles(alleleList, loc.getStart(),
                    ref, hsize, numPrefBases));
    }
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:25,代码来源:IndelGenotypeLikelihoodsCalculationModel.java

示例11: composeCallAttributes

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
@Override
protected Map<String, Object> composeCallAttributes(final boolean inheritAttributesFromInputVC, final VariantContext vc,
                                                    final AlignmentContext rawContext, final Map<String, AlignmentContext> stratifiedContexts, final RefMetaDataTracker tracker, final ReferenceContext refContext, final List<Integer> alleleCountsofMLE, final boolean bestGuessIsRef,
                                                    final AFCalculationResult AFresult, final List<Allele> allAllelesToUse, final GenotypesContext genotypes,
                                                    final GenotypeLikelihoodsCalculationModel.Model model, final Map<String, PerReadAlleleLikelihoodMap> perReadAlleleLikelihoodMap) {
    final Map<String, Object> result = super.composeCallAttributes(inheritAttributesFromInputVC, vc, rawContext, stratifiedContexts, tracker, refContext, alleleCountsofMLE, bestGuessIsRef,
            AFresult, allAllelesToUse, genotypes, model, perReadAlleleLikelihoodMap);

    final boolean limitedContext = tracker == null || refContext == null || rawContext == null || stratifiedContexts == null;

    if (configuration.COMPUTE_SLOD && !limitedContext && !bestGuessIsRef) {
        final double strandScore = calculateSLOD(stratifiedContexts, tracker, refContext, AFresult, allAllelesToUse, model, perReadAlleleLikelihoodMap);
        if (!Double.isNaN(strandScore))
            result.put("SB", strandScore);
    }
    return result;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:18,代码来源:UnifiedGenotypingEngine.java

示例12: realignReadsToMostLikelyHaplotype

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
/**
 * Loop over all of the reads in this likelihood map and realign them to its most likely haplotype
 * @param haplotypes            the collection of haplotypes
 * @param paddedReferenceLoc    the active region
 */
public void realignReadsToMostLikelyHaplotype(final Collection<Haplotype> haplotypes, final GenomeLoc paddedReferenceLoc) {

    // we need to remap the Alleles back to the Haplotypes; inefficient but unfortunately this is a requirement currently
    final Map<Allele, Haplotype> alleleToHaplotypeMap = new HashMap<>(haplotypes.size());
    for ( final Haplotype haplotype : haplotypes )
        alleleToHaplotypeMap.put(Allele.create(haplotype.getBases()), haplotype);

    final Map<GATKSAMRecord, Map<Allele, Double>> newLikelihoodReadMap = new LinkedHashMap<>(likelihoodReadMap.size());
    for( final Map.Entry<GATKSAMRecord, Map<Allele, Double>> entry : likelihoodReadMap.entrySet() ) {
        final MostLikelyAllele bestAllele = PerReadAlleleLikelihoodMap.getMostLikelyAllele(entry.getValue());
        final GATKSAMRecord alignedToRef = AlignmentUtils.createReadAlignedToRef(entry.getKey(), alleleToHaplotypeMap.get(bestAllele.getMostLikelyAllele()), paddedReferenceLoc.getStart(), bestAllele.isInformative());
        newLikelihoodReadMap.put(alignedToRef, entry.getValue());
    }

    likelihoodReadMap.clear();
    likelihoodReadMap.putAll(newLikelihoodReadMap);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:23,代码来源:PerReadAlleleLikelihoodMap.java

示例13: typeOfBiallelicVariant

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
private static VariantContext.Type typeOfBiallelicVariant(Allele ref, Allele allele) {
    if (ref.isSymbolic()) {
        throw new IllegalStateException("Unexpected error: encountered a record with a symbolic reference allele");
    }
    if (allele.isSymbolic()) {
        return VariantContext.Type.SYMBOLIC;
    }
    if (ref.length() == allele.length()) {
        if (allele.length() == 1) {
            return VariantContext.Type.SNP;
        } else {
            return VariantContext.Type.MNP;
        }
    }

    return VariantContext.Type.INDEL;
}
 
开发者ID:react-dev26,项目名称:NGB-master,代码行数:18,代码来源:VcfGa4ghReader.java

示例14: determineHeterozygousGenotypeGA4GH

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
private static OrganismType determineHeterozygousGenotypeGA4GH(VariantGA4GH ga4GHEntity, List<Allele> alleles,
                                                               int[] genotypeArray) {
    OrganismType organismType = null;
    for (int i = 0; i < genotypeArray.length; i++) {
        if (alleles.get(i).isReference()) {
            genotypeArray[i] = 0;
            organismType = OrganismType.HETEROZYGOUS;
        } else {
            if (organismType == null) {
                organismType = OrganismType.HETERO_VAR;
            }
            genotypeArray[i] = ga4GHEntity.getAlternateBases().indexOf(alleles.get(i)) + 1;
        }
    }

    return organismType;
}
 
开发者ID:react-dev26,项目名称:NGB-master,代码行数:18,代码来源:VcfGa4ghReader.java

示例15: annotateWithLikelihoods

import htsjdk.variant.variantcontext.Allele; //导入依赖的package包/类
private void annotateWithLikelihoods(final PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap, final VariantContext vc, final GenotypeBuilder gb) {
    final Set<Allele> alleles = new HashSet<>(vc.getAlleles());

    // make sure that there's a meaningful relationship between the alleles in the perReadAlleleLikelihoodMap and our VariantContext
    if ( ! perReadAlleleLikelihoodMap.getAllelesSet().containsAll(alleles) )
        throw new IllegalStateException("VC alleles " + alleles + " not a strict subset of per read allele map alleles " + perReadAlleleLikelihoodMap.getAllelesSet());

    final HashMap<Allele, Integer> alleleCounts = new HashMap<>();
    for ( final Allele allele : vc.getAlleles() ) { alleleCounts.put(allele, 0); }

    for ( final Map.Entry<GATKSAMRecord,Map<Allele,Double>> el : perReadAlleleLikelihoodMap.getLikelihoodReadMap().entrySet()) {
        final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue(), alleles);
        if (! a.isInformative() ) continue; // read is non-informative
        final GATKSAMRecord read = el.getKey();
        final int prevCount = alleleCounts.get(a.getMostLikelyAllele());
        alleleCounts.put(a.getMostLikelyAllele(), prevCount + 1);
    }

    final int[] counts = new int[alleleCounts.size()];
    counts[0] = alleleCounts.get(vc.getReference());
    for (int i = 0; i < vc.getAlternateAlleles().size(); i++)
        counts[i+1] = alleleCounts.get( vc.getAlternateAllele(i) );

    gb.AD(counts);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:26,代码来源:DepthPerAlleleBySample.java


注:本文中的htsjdk.variant.variantcontext.Allele类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。