本文整理汇总了Java中org.apache.commons.collections4.ListUtils.union方法的典型用法代码示例。如果您正苦于以下问题:Java ListUtils.union方法的具体用法?Java ListUtils.union怎么用?Java ListUtils.union使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.commons.collections4.ListUtils
的用法示例。
在下文中一共展示了ListUtils.union方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: CoveragePerContigCollection
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
public CoveragePerContigCollection(final LocatableMetadata metadata,
final List<CoveragePerContig> coveragePerContigs,
final List<String> contigs) {
super(
metadata,
coveragePerContigs,
new TableColumnCollection(ListUtils.union(Collections.singletonList(SAMPLE_NAME_TABLE_COLUMN), contigs)),
dataLine -> new CoveragePerContig(
dataLine.get(SAMPLE_NAME_TABLE_COLUMN),
contigs.stream().collect(Collectors.toMap(
Function.identity(),
dataLine::getInt,
(u, v) -> {
throw new GATKException.ShouldNeverReachHereException("Cannot have duplicate contigs.");
}, //contigs should already be distinct
LinkedHashMap::new))),
(coveragePerContig, dataLine) -> {
dataLine.append(coveragePerContig.getSampleName());
contigs.stream().map(coveragePerContig::getCoverage).forEach(dataLine::append);
});
}
示例2: joinListCommands
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
/**
* This method joins commands list in one List.
*
* @param params List to join.
* @return List of all commands.
*/
public List<String> joinListCommands(List<String>... params) {
List<String> result = new ArrayList<String>();
if (params.length != 0) {
for (int i = 0; i < params.length; i++)
result = ListUtils.union(result, params[i]);
}
return result;
}
示例3: initialNonConstantLog2CopyRatios
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
/**
* evenly-spaced log-2 copy ratios
* @param K the initial number of hidden states
*/
private static List<Double> initialNonConstantLog2CopyRatios(final int K) {
ParamUtils.isPositive(K, "must have at least one non-constant state");
final double spacing = (MAX_INITIAL_LOG_2_COPY_RATIO - MIN_INITIAL_LOG_2_COPY_RATIO) / (K + 1);
final int numNegativeStates = K / 2;
final int numPositiveStates = K - numNegativeStates;
final List<Double> negativeStates = Doubles.asList(GATKProtectedMathUtils.createEvenlySpacedPoints(MIN_INITIAL_LOG_2_COPY_RATIO, spacing, numNegativeStates));
final List<Double> positiveStates = Doubles.asList(GATKProtectedMathUtils.createEvenlySpacedPoints(spacing, MAX_INITIAL_LOG_2_COPY_RATIO, numPositiveStates));
return ListUtils.union(negativeStates, positiveStates);
}
示例4: ScalarHMMSegmenter
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
public ScalarHMMSegmenter(final List<SimpleInterval> positions, final List<DATA> data,
final List<Double> constantHiddenStates, final List<Double> initialNonConstantHiddenStates) {
super(positions, data, ListUtils.union(constantHiddenStates, initialNonConstantHiddenStates),
uniformWeights(constantHiddenStates.size() + initialNonConstantHiddenStates.size()),
DEFAULT_INITIAL_CONCENTRATION, DEFAULT_MEMORY_LENGTH);
numConstantStates = constantHiddenStates.size();
}
示例5: makeLikelihoods
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
public static ReadLikelihoods<Allele> makeLikelihoods(final String sample,
final List<GATKRead> refReads,
final List<GATKRead> altReads,
final List<GATKRead> uninformativeReads,
final double refReadAltLikelihood,
final double altReadRefLikelihood,
final double badReadAltLikelihood,
final Allele refAllele,
final Allele altAllele) {
final List<GATKRead> reads = ListUtils.union(ListUtils.union(refReads, altReads), uninformativeReads);
final ReadLikelihoods<Allele> likelihoods = initializeReadLikelihoods(sample, new IndexedAlleleList<>(Arrays.asList(refAllele, altAllele)), reads);
final LikelihoodMatrix<Allele> matrix = likelihoods.sampleMatrix(0);
int readIndex = 0;
for (int i = 0; i < refReads.size(); i++) {
matrix.set(0, readIndex, MATCH_LIKELIHOOD);
matrix.set(1, readIndex, refReadAltLikelihood);
readIndex++;
}
for (int i = 0; i < altReads.size(); i++) {
matrix.set(0, readIndex, altReadRefLikelihood);
matrix.set(1, readIndex, MATCH_LIKELIHOOD);
readIndex++;
}
for (int i = 0; i < uninformativeReads.size(); i++) {
matrix.set(0, readIndex, MATCH_LIKELIHOOD);
matrix.set(1, readIndex, badReadAltLikelihood);
readIndex++;
}
return likelihoods;
}
示例6: execute
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
@Override
@Transactional
public void execute( JobConfiguration jobConfiguration )
{
notifier.clear( jobConfiguration ).notify( jobConfiguration, "Monitoring data" );
MonitoringJobParameters monitoringJobParameters = (MonitoringJobParameters) jobConfiguration.getJobParameters();
//TODO improve collection usage
try
{
List<Period> periods;
Collection<ValidationRule> validationRules;
List<String> groupUIDs = monitoringJobParameters.getValidationRuleGroups();
if ( groupUIDs.isEmpty() )
{
validationRules = validationRuleService
.getValidationRulesWithNotificationTemplates();
}
else
{
validationRules = groupUIDs.stream()
.map( ( uid ) -> validationRuleService.getValidationRuleGroup( uid ) )
.filter( Objects::nonNull )
.map( ValidationRuleGroup::getMembers )
.filter( Objects::nonNull )
.reduce( Sets.newHashSet(), SetUtils::union );
}
if ( monitoringJobParameters.getRelativeStart() != 0 && monitoringJobParameters.getRelativeEnd() != 0 )
{
Date startDate = DateUtils.getDateAfterAddition( new Date(), monitoringJobParameters.getRelativeStart() );
Date endDate = DateUtils.getDateAfterAddition( new Date(), monitoringJobParameters.getRelativeEnd() );
periods = periodService.getPeriodsBetweenDates( startDate, endDate );
periods = ListUtils.union( periods, periodService.getIntersectionPeriods( periods ) );
}
else
{
periods = validationRules.stream()
.map( ValidationRule::getPeriodType )
.distinct()
.map( ( vr ) -> Arrays.asList( vr.createPeriod(), vr.getPreviousPeriod( vr.createPeriod() ) ) )
.reduce( Lists.newArrayList(), ListUtils::union );
}
ValidationAnalysisParams parameters = validationService
.newParamsBuilder( validationRules, null, periods )
.withIncludeOrgUnitDescendants( true )
.withMaxResults( ValidationService.MAX_SCHEDULED_ALERTS )
.withSendNotifications( monitoringJobParameters.isSendNotifications() )
.withPersistResults( monitoringJobParameters.isPersistResults() )
.build();
validationService.validationAnalysis( parameters );
notifier.notify( jobConfiguration, INFO, "Monitoring process done", true );
}
catch ( RuntimeException ex )
{
notifier.notify( jobConfiguration, ERROR, "Process failed: " + ex.getMessage(), true );
messageService.sendSystemErrorNotification( "Monitoring process failed", ex );
throw ex;
}
}
示例7: addNonRefSymbolicAllele
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
private VariantContext addNonRefSymbolicAllele(final VariantContext mergedVC) {
final List<Allele> alleleList = ListUtils.union(mergedVC.getAlleles(), Arrays.asList(GATKVCFConstants.NON_REF_SYMBOLIC_ALLELE));
return new VariantContextBuilder(mergedVC).alleles(alleleList).make();
}
示例8: apply
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
@Override
public Object apply(List<Object> args, Context context) throws ParseException {
SensorEnrichmentConfig config = getSensorEnrichmentConfig(args, 0);
ThreatIntelConfig tiConfig = (ThreatIntelConfig) getConfig(config, EnrichmentConfigFunctions.Type.THREAT_INTEL);
if(tiConfig == null) {
tiConfig = new ThreatIntelConfig();
config.setThreatIntel(tiConfig);
}
org.apache.metron.common.configuration.enrichment.threatintel.ThreatTriageConfig triageConfig = tiConfig.getTriageConfig();
if(triageConfig == null) {
triageConfig = new org.apache.metron.common.configuration.enrichment.threatintel.ThreatTriageConfig();
tiConfig.setTriageConfig(triageConfig);
}
// build the new rules
List<RiskLevelRule> newRules = new ArrayList<>();
for(Map<String, Object> newRule : getNewRuleDefinitions(args)) {
if(newRule != null && newRule.containsKey("rule") && newRule.containsKey("score")) {
// create the rule
RiskLevelRule ruleToAdd = new RiskLevelRule();
ruleToAdd.setRule((String) newRule.get(RULE_EXPR_KEY));
ruleToAdd.setScore(ConversionUtils.convert(newRule.get(RULE_SCORE_KEY), Double.class));
// add optional rule fields
if (newRule.containsKey(RULE_NAME_KEY)) {
ruleToAdd.setName((String) newRule.get(RULE_NAME_KEY));
}
if (newRule.containsKey(RULE_COMMENT_KEY)) {
ruleToAdd.setComment((String) newRule.get(RULE_COMMENT_KEY));
}
if (newRule.containsKey(RULE_REASON_KEY)) {
ruleToAdd.setReason((String) newRule.get(RULE_REASON_KEY));
}
newRules.add(ruleToAdd);
}
}
// combine the new and existing rules
List<RiskLevelRule> allRules = ListUtils.union(triageConfig.getRiskLevelRules(), newRules);
triageConfig.setRiskLevelRules(allRules);
return toJSON(config);
}
示例9: testBooleanOperators
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
@Test
public void testBooleanOperators() {
findPage.goToListView();
final List<String> potentialTerms = new ArrayList<>(Arrays.asList(
"mabuya",
"margita",
"overtrain",
"brevity",
"tessellate",
"hydrangea",
"\"dearly departed\"",
"abstruse",
"lobotomy"
));
final String termOne = findService.termWithBetween1And30Results(potentialTerms);
final String termTwo = findService.termWithBetween1And30Results(potentialTerms);
assertThat("Test only works if query terms both have <=30 results ", "", not(anyOf(is(termOne), is(termTwo))));
final List<String> resultsTermOne = getResultsList(termOne);
final int resultsNumberTermOne = resultsTermOne.size();
final List<String> resultsTermTwo = getResultsList(termTwo);
final int resultsNumberTermTwo = resultsTermTwo.size();
final List<String> andResults = getResultsList(termOne + " AND " + termTwo);
final int numberOfAndResults = andResults.size();
assertThat(numberOfAndResults, allOf(lessThanOrEqualTo(resultsNumberTermOne), lessThanOrEqualTo(resultsNumberTermTwo)));
assertThat(termOne + " results contain every results in the 'AND' results", resultsTermOne.containsAll(andResults));
assertThat(termTwo + " results contain every results in the 'AND' results", resultsTermTwo.containsAll(andResults));
final List<String> orResults = getResultsList(termOne + " OR " + termTwo);
final Set<String> concatenatedResults = new HashSet<>(ListUtils.union(resultsTermOne, resultsTermTwo));
assertThat(orResults, hasSize(concatenatedResults.size()));
assertThat("'OR' results contains all the results that are present for each term alone", orResults.containsAll(concatenatedResults));
final List<String> xorResults = getResultsList(termOne + " XOR " + termTwo);
concatenatedResults.removeAll(andResults);
assertThat(xorResults.size(), is(concatenatedResults.size()));
assertThat(xorResults, containsInAnyOrder(concatenatedResults.toArray()));
checkANotB(termOne + " NOT " + termTwo, new HashSet<>(concatenatedResults), resultsTermTwo);
checkANotB(termTwo + " NOT " + termOne, new HashSet<>(concatenatedResults), resultsTermOne);
}
示例10: execute
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
@Override
public void execute()
{
// Update the job statuses
_persistenceModule.updateJobStatuses();
// Get the current worker models
List<WorkerModel> workers = _persistenceModule.fetchAllWorkers();
List<WorkerModel> activeWorkers = workers.stream().filter(w -> w.isActive()).collect(Collectors.toList());
// Get the top priority unblocked jobs, the currently assigned jobs
List<JobModel> waitingJobs = _persistenceModule.fetchJobsWithStatus(JobStatus.WAITING);
List<JobModel> assignedJobs = _persistenceModule.fetchJobsWithStatus(JobStatus.PROCESSING);
List<JobModel> prioritySortedJobs = ListUtils.union(waitingJobs, assignedJobs);
prioritySortedJobs.sort(new JobPriorityComparator());
// Determine optimal greedy allocation of jobs and the current real allocation
JobAssignments idealAssingments = buildDesiredJobAssingments(activeWorkers, prioritySortedJobs);
JobAssignments actualAssingments = buildActualJobAssingments(activeWorkers, assignedJobs);
// Determine how many jobs of each priority need to give/take from each worker.
JobAssignments jobsAssignmentsToGive = JobAssignments.subtract(idealAssingments, actualAssingments);
JobAssignments jobsAssignmentsToTake = JobAssignments.subtract(actualAssingments, idealAssingments);
// Determine which jobs will be taken back
Map<Integer, List<JobModel>> assignedJobsByPriority = assignedJobs
.stream()
.collect(Collectors.groupingBy(JobModel::getPriority));
List<JobAssignmentOperation> takeJobOperations = buildListOfJobsToTake(jobsAssignmentsToTake,
assignedJobsByPriority);
// Determine which jobs are available (or will be available) for assignment
Set<UUID> jobIDsToTake = takeJobOperations
.stream()
.map(jobOperation -> jobOperation.getWorkerID())
.collect(Collectors.toSet());
List<JobModel> jobsToTake = assignedJobs
.stream()
.filter(job -> jobIDsToTake.contains(job.getJobID()))
.collect(Collectors.toList());
List<JobModel> availableJobs = ListUtils.union(waitingJobs, jobsToTake);
Map<Integer, List<JobModel>> availableJobsByPriority = availableJobs
.stream()
.collect(Collectors.groupingBy(JobModel::getPriority));
// Determine which jobs will be given out
List<JobAssignmentOperation> giveJobOperations = buildListOfJobsToGive(jobsAssignmentsToGive,
availableJobsByPriority);
// Because we have to wait for worker acknowledgments before we can finish taking a job back that we want to
// re-assign, we want to create a thread for every individual assign and take operation. Each assign operation
// will have an job future. As soon as the job future is available, we can talk to that worker and give it the
// job. Jobs which are already unassigned will get their future fulfilled immediately, but any jobs that the
// network will need to wait for will only get fulfilled when the worker who currently has that job has returned
// it.
List<JobAssignmentOperation> allJobOperations = ListUtils.union(takeJobOperations, giveJobOperations);
_jobAssignmentThreads = allJobOperations
.stream()
.map(jobOperation -> _jobAssignmentThreadFactory.make(jobOperation))
.collect(Collectors.toList());
_jobAssignmentThreads.stream().forEach(jobThread -> jobThread.start());
_jobAssignmentThreads.stream().forEach(jobThread -> jobThread.join());
}
示例11: addNonRefSymbolicAllele
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
private VariantContext addNonRefSymbolicAllele(final VariantContext mergedVC) {
final List<Allele> alleleList = ListUtils.union(mergedVC.getAlleles(), Arrays.asList(Allele.NON_REF_ALLELE));
return new VariantContextBuilder(mergedVC).alleles(alleleList).make();
}
示例12: makeTriAllelicLikelihoods
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
public static ReadLikelihoods<Allele> makeTriAllelicLikelihoods(final String sample,
final List<GATKRead> refReads,
final List<GATKRead> alt1Reads,
final List<GATKRead> alt2Reads,
final List<GATKRead> uninformativeReads,
final double refReadAltLikelihood,
final double alt1ReadRefLikelihood,
final double alt2ReadRefLikelihood,
final double badReadAltLikelihood,
final Allele refAllele,
final Allele alt1Allele,
final Allele alt2Allele) {
final List<GATKRead> reads = ListUtils.union(ListUtils.union(refReads, ListUtils.union(alt1Reads,alt2Reads)), uninformativeReads);
final ReadLikelihoods<Allele> likelihoods = initializeReadLikelihoods(sample, new IndexedAlleleList<>(Arrays.asList(refAllele, alt1Allele, alt2Allele)), reads);
final LikelihoodMatrix<Allele> matrix = likelihoods.sampleMatrix(0);
int readIndex = 0;
for (int i = 0; i < refReads.size(); i++) {
matrix.set(0, readIndex, MATCH_LIKELIHOOD);
matrix.set(1, readIndex, refReadAltLikelihood);
matrix.set(2, readIndex, refReadAltLikelihood);
readIndex++;
}
for (int i = 0; i < alt1Reads.size(); i++) {
matrix.set(0, readIndex, alt1ReadRefLikelihood);
matrix.set(1, readIndex, MATCH_LIKELIHOOD);
matrix.set(2, readIndex, alt1ReadRefLikelihood);
readIndex++;
}
for (int i = 0; i < alt2Reads.size(); i++) {
matrix.set(0, readIndex, alt2ReadRefLikelihood);
matrix.set(1, readIndex, alt2ReadRefLikelihood);
matrix.set(2, readIndex, MATCH_LIKELIHOOD);
readIndex++;
}
for (int i = 0; i < uninformativeReads.size(); i++) {
matrix.set(0, readIndex, MATCH_LIKELIHOOD);
matrix.set(1, readIndex, badReadAltLikelihood);
matrix.set(2, readIndex, badReadAltLikelihood);
readIndex++;
}
return likelihoods;
}