本文整理汇总了Java中org.apache.commons.collections4.ListUtils.partition方法的典型用法代码示例。如果您正苦于以下问题:Java ListUtils.partition方法的具体用法?Java ListUtils.partition怎么用?Java ListUtils.partition使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.commons.collections4.ListUtils
的用法示例。
在下文中一共展示了ListUtils.partition方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: extractTaskResultsAndMergeIntoMap
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
private Map<String, byte[]> extractTaskResultsAndMergeIntoMap(SchedulerDBManager dbManager,
EligibleTaskDescriptor eligibleTaskDescriptor, InternalJob job) {
Map<String, byte[]> mergedVariables = new HashMap<>();
int numberOfParentTasks = eligibleTaskDescriptor.getParents().size();
List<TaskId> parentIds = new ArrayList<>(numberOfParentTasks);
for (int i = 0; i < numberOfParentTasks; i++) {
parentIds.add(eligibleTaskDescriptor.getParents().get(i).getTaskId());
}
// Batch fetching of parent tasks results
Map<TaskId, TaskResult> taskResults = new HashMap<>();
for (List<TaskId> parentsSubList : ListUtils.partition(new ArrayList<>(parentIds),
PASchedulerProperties.SCHEDULER_DB_FETCH_TASK_RESULTS_BATCH_SIZE.getValueAsInt())) {
taskResults.putAll(dbManager.loadTasksResults(job.getId(), parentsSubList));
}
for (TaskResult taskResult : taskResults.values()) {
if (taskResult.getPropagatedVariables() != null) {
mergedVariables.putAll(taskResult.getPropagatedVariables());
}
}
return mergedVariables;
}
示例2: onPostExecute
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
@Override
protected void onPostExecute(List<SmbFile> files) {
try {
final int cpuCount = Runtime.getRuntime().availableProcessors();
final int maxPoolSize = cpuCount * 2 + 1;
final int partitionSize = files.size() < maxPoolSize ? files.size() : (files.size() / maxPoolSize);
List<List<SmbFile>> subSets = ListUtils.partition(files, partitionSize);
mNumOfSets = subSets.size();
for (List<SmbFile> subSet : subSets) {
if (mIsMovie) {
new DownloadMovieTask(mContext, mConfig, subSet, this)
.executeOnExecutor(THREAD_POOL_EXECUTOR);
} else {
new DownloadTvShowTask(mContext, mConfig, subSet, this)
.executeOnExecutor(THREAD_POOL_EXECUTOR);
}
}
} catch (Exception e) {
if (mCallback != null) {
mCallback.failure();
}
}
}
示例3: updateVariables
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
/**
* Updates the runtime variables for this task. Variables are updated using the following order:
* 1) job variables
* 2) task variables
* 3) propagated variables
* 4) system variables
*
* @param schedulingService
*/
public synchronized void updateVariables(SchedulingService schedulingService) {
if (updatedVariables == null) {
updatedVariables = new LinkedHashMap<>();
updatedVariables.putAll(internalJob.getVariablesAsReplacementMap());
updatedVariables.putAll(getScopeVariables());
if (internalTasksDependencies != null) {
Set<TaskId> parentIds = new HashSet<>(internalTasksDependencies.size());
for (InternalTask parentTask : internalTasksDependencies) {
parentIds.addAll(InternalTaskParentFinder.getInstance()
.getFirstNotSkippedParentTaskIds(parentTask));
}
// Batch fetching of parent tasks results
Map<TaskId, TaskResult> taskResults = new HashMap<>();
for (List<TaskId> parentsSubList : ListUtils.partition(new ArrayList<>(parentIds),
PASchedulerProperties.SCHEDULER_DB_FETCH_TASK_RESULTS_BATCH_SIZE.getValueAsInt())) {
taskResults.putAll(schedulingService.getInfrastructure()
.getDBManager()
.loadTasksResults(internalJob.getId(), parentsSubList));
}
if (!parentIds.isEmpty()) {
updateVariablesWithTaskResults(taskResults);
}
}
updatedVariables.putAll(getSystemVariables());
}
}
示例4: buildThreadArray
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
/**
* Builds the thread array.
*
* <p>
* 调用 {@link ListUtils#partition(List, int)} 对list 分成N份,对应的创建N份线程,每个线程的 名字 参见 {@link #buildThreadName(int, PartitionRunnableBuilder)}
* </p>
*
* <p>
* 会自动创建 ThreadGroup,线程组名字参见 {@link #buildThreadGroupName(List, PartitionRunnableBuilder)}, <br>
* 所有新建的线程将归属到该 线程组,你可以在自定义的partitionRunnableBuilder中监控或者管理 该ThreadGroup
* </p>
*
* @param <T>
* the generic type
* @param list
* the list
* @param eachSize
* the per size
* @param paramsMap
* the params map
* @param partitionRunnableBuilder
* the group runnable builder
* @return the thread[]
*/
private static <T> Thread[] buildThreadArray(
List<T> list,
int eachSize,
Map<String, ?> paramsMap,
PartitionRunnableBuilder<T> partitionRunnableBuilder){
//使用group进行管理
ThreadGroup threadGroup = new ThreadGroup(buildThreadGroupName(list, partitionRunnableBuilder));
//将 list 分成 N 份
List<List<T>> groupList = ListUtils.partition(list, eachSize);
//-------------------------------------------------------------------
int i = 0;
Thread[] threads = new Thread[groupList.size()];
for (List<T> perBatchList : groupList){
String threadName = buildThreadName(i, partitionRunnableBuilder);
PartitionThreadEntity partitionThreadEntity = new PartitionThreadEntity(
threadName,
list.size(),
eachSize,
i,
perBatchList.size());
Runnable runnable = partitionRunnableBuilder.build(perBatchList, partitionThreadEntity, paramsMap);
threads[i] = new Thread(threadGroup, runnable, threadName);
i++;
}
//---------------------------------------------------------------
LOGGER.info("total list size:[{}],build [{}] threads,perSize:[{}]", list.size(), threads.length, eachSize);
return threads;
}
示例5: getAminoAcids
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
/**
* Constructs amino acid sequences for specified gene track
*
* @param track a track to create sequences
* @param cdsList a list of CDS blocks
* @param cdsNucleotides a list od CDS sequences
* @param frames a list of CDS's frames
* @return a map of protein sequences to CDS
*/
public Map<Gene, List<ProteinSequenceEntry>> getAminoAcids(final Track<Gene> track, final List<Gene> cdsList,
final List<List<Sequence>> cdsNucleotides,
final List<Integer> frames) {
if (CollectionUtils.isEmpty(cdsList) || CollectionUtils.isEmpty(cdsNucleotides)) {
return Collections.emptyMap();
}
double time1 = Utils.getSystemTimeMilliseconds();
Map<Gene, List<ProteinSequenceEntry>> proteinSequences = new HashMap<>();
//if gene is on the negative strand, we should process it from the end
checkAndRevert(cdsList, cdsNucleotides, frames);
MutableInt aminoAcidCounter = new MutableInt(0);
for (int i = 0; i < cdsNucleotides.size(); i++) {
List<Sequence> nucleotides = cdsNucleotides.get(i);
Gene cds = cdsList.get(i);
int extendedStart = 0;
int frame = frames.get(i);
if (frame > 0 && i != 0) {
//restore the part of the triplet from the previous nucleotides
List<Sequence> prev = cdsNucleotides.get(i - 1);
int prevNucleotides = TRIPLE_LENGTH - frame;
if (prev.size() >= prevNucleotides) {
List<Sequence> nucleotidesExtended = new ArrayList<>();
nucleotidesExtended.addAll(prev.subList(prev.size() - prevNucleotides, prev.size()));
nucleotidesExtended.addAll(nucleotides);
nucleotides = nucleotidesExtended;
extendedStart = -prevNucleotides;
}
} else {
nucleotides = nucleotides.subList(frame, nucleotides.size());
}
// Construct amino acids from nucleotide triples.
List<List<Sequence>> tripleList = ListUtils.partition(nucleotides, TRIPLE_LENGTH);
List<ProteinSequenceEntry> value =
reconstructAminoAcidByTriples(track, cds, cdsNucleotides, i, tripleList,
extendedStart, aminoAcidCounter);
proteinSequences.putIfAbsent(cds, value);
}
double time2 = Utils.getSystemTimeMilliseconds();
LOGGER.debug("Get amino acids {}:{} ms", Thread.currentThread().getName(), time2 - time1);
return proteinSequences;
}
示例6: write
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
@Override
public void write(ArrayList<Stat> stats, long invokeTimeMs, Set<Tag> tags) {
Date dt = new Date();
dt.setTime(invokeTimeMs);
Collection<Dimension> parentDims = tagsToDimensions(tags);
List<MetricDatum> metrics = new ArrayList<MetricDatum>();
/*
* Create CW metric objects from bender internal Stat objects
*/
for (Stat stat : stats) {
/*
* Dimension are CW's version of metric tags. A conversion must be done.
*/
Collection<Dimension> metricDims = tagsToDimensions(stat.getTags());
metricDims.addAll(parentDims);
MetricDatum metric = new MetricDatum();
metric.setMetricName(stat.getName());
// TODO: add units to Stat object SYSTEMS-870
metric.setUnit(StandardUnit.None);
metric.setTimestamp(dt);
metric.setDimensions(metricDims);
metric.setValue((double) stat.getValue());
metrics.add(metric);
}
/*
* Not very well documented in java docs but CW only allows 20 metrics at a time.
*/
List<List<MetricDatum>> chunks = ListUtils.partition(metrics, 20);
for (List<MetricDatum> chunk : chunks) {
PutMetricDataRequest req = new PutMetricDataRequest();
req.withMetricData(chunk);
req.setNamespace(namespace);
this.client.putMetricData(req);
}
}
示例7: getStringSerializableMap
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
public VariablesMap getStringSerializableMap(SchedulingService service, TaskTerminationData taskToTerminate)
throws Exception {
VariablesMap variablesMap = new VariablesMap();
RunningTaskData taskData = taskToTerminate.taskData;
TaskResultImpl taskResult = taskToTerminate.taskResult;
InternalJob internalJob = taskToTerminate.internalJob;
if (taskToTerminate.terminationStatus == ABORTED || taskResult == null) {
List<InternalTask> iDependences = taskData.getTask().getIDependences();
if (iDependences != null) {
Set<TaskId> parentIds = new HashSet<>(iDependences.size());
for (InternalTask parentTask : iDependences) {
parentIds.addAll(InternalTaskParentFinder.getInstance()
.getFirstNotSkippedParentTaskIds(parentTask));
}
// Batch fetching of parent tasks results
Map<TaskId, TaskResult> taskResults = new HashMap<>();
for (List<TaskId> parentsSubList : ListUtils.partition(new ArrayList<>(parentIds),
PASchedulerProperties.SCHEDULER_DB_FETCH_TASK_RESULTS_BATCH_SIZE.getValueAsInt())) {
taskResults.putAll(service.getInfrastructure()
.getDBManager()
.loadTasksResults(taskData.getTask().getJobId(), parentsSubList));
}
getResultsFromListOfTaskResults(variablesMap.getInheritedMap(), taskResults);
} else {
if (internalJob != null) {
for (Map.Entry<String, JobVariable> jobVariableEntry : internalJob.getVariables().entrySet()) {
variablesMap.getInheritedMap().put(jobVariableEntry.getKey(),
jobVariableEntry.getValue().getValue());
}
}
}
variablesMap.getInheritedMap().put(SchedulerVars.PA_TASK_SUCCESS.toString(), Boolean.toString(false));
} else if (taskResult.hadException()) {
variablesMap.setInheritedMap(fillMapWithTaskResult(taskResult, false));
} else {
variablesMap.setInheritedMap(fillMapWithTaskResult(taskResult, true));
}
variablesMap.setScopeMap(getNonInheritedScopeVariables(variablesMap.getInheritedMap(),
taskData.getTask().getScopeVariables(),
taskData.getTask().getVariables()));
return variablesMap;
}
示例8: toSubList
import org.apache.commons.collections4.ListUtils; //导入方法依赖的package包/类
/**
* 单个 List ,分割成几个 subList , 保持原来 List 中元素的排序
* 每个 subList 的大小不同,分割成的份数不同
*
* @param list the list to return consecutive sublists of
* @param size 每个 sub list 的大小 the desired size of each sublist (the last may be smaller)
* 大小不同,最后分割成的份数不同
* @param <T>
* @return
*/
@Deprecated // 使用下面的 partition ,分组更合理
private static <T> List<List<T>> toSubList(List<T> list, int size) {
//Guava
//return Lists.partition(list, size);
//Apache Commons Collections
return ListUtils.partition(list, size);
}