当前位置: 首页>>代码示例>>Java>>正文


Java JavaConversions.asJavaList方法代码示例

本文整理汇总了Java中scala.collection.JavaConversions.asJavaList方法的典型用法代码示例。如果您正苦于以下问题:Java JavaConversions.asJavaList方法的具体用法?Java JavaConversions.asJavaList怎么用?Java JavaConversions.asJavaList使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scala.collection.JavaConversions的用法示例。


在下文中一共展示了JavaConversions.asJavaList方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getAllPartitionIds

import scala.collection.JavaConversions; //导入方法依赖的package包/类
/**
 * 根据指定topic获取该topic的partition列表
 * @param topic
 * @return
 */
public static List<Integer> getAllPartitionIds(String topic) {
	List list = new ArrayList();
	list.add(topic);
	Buffer buffer = JavaConversions.asScalaBuffer(list);

	Map<String, Seq<Object>> topicPartMap = JavaConversions.asJavaMap(ZkUtils.getPartitionsForTopics(getZkClient(), buffer));
	List<Object> javaList = JavaConversions.asJavaList(topicPartMap.get(topic));
	
	List<Integer> retList = new ArrayList<Integer>();
	for (Object obj : javaList) {
		retList.add((Integer)obj);
	}
	
	return retList;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:21,代码来源:AdminUtil.java

示例2: getPartitionInfos

import scala.collection.JavaConversions; //导入方法依赖的package包/类
public List<PartitionInfo> getPartitionInfos(String group, String topic) {
  Seq<String> singleTopic = JavaConversions.asScalaBuffer(Collections.singletonList(topic)).toSeq();
  scala.collection.Map<String, Seq<Object>> pidMap = ZkUtils.getPartitionsForTopics(zkClient, singleTopic);
  Option<Seq<Object>> partitions = pidMap.get(topic);
  if (partitions.get() == null) {
    return Collections.emptyList();
  }
  List<PartitionInfo> infos = Lists.newArrayList();
  for (Object o : JavaConversions.asJavaList(partitions.get())) {
    PartitionInfo info = getPartitionInfo(group, topic, Int.unbox(o));
    if (info != null) {
      infos.add(info);
    }
  }
  return infos;
}
 
开发者ID:shunfei,项目名称:DCMonitor,代码行数:17,代码来源:KafkaInfos.java

示例3: convertTupleResultToRow

import scala.collection.JavaConversions; //导入方法依赖的package包/类
private static Row convertTupleResultToRow(List<String> bindingNames, BindingSet tuple, StructType schema) {
	List<StructField> schemaAsList = JavaConversions.asJavaList(schema.toList());

	String[] resultArray = new String[schemaAsList.size()];
	for (int i = 0; i < schemaAsList.size(); i++) {
		resultArray[i] = tuple.getBinding(schemaAsList.get(i).name()).getValue().stringValue();
	}

	org.apache.spark.sql.catalyst.expressions.GenericRow testGeneric = new GenericRow(resultArray);
	return testGeneric;
}
 
开发者ID:ansell,项目名称:spark-rdf4j,代码行数:12,代码来源:SparkRDF4JSparqlRelation.java

示例4: getProgressFromStage_1_0x

import scala.collection.JavaConversions; //导入方法依赖的package包/类
private int[] getProgressFromStage_1_0x(JobProgressListener sparkListener, Object stage)
    throws IllegalAccessException, IllegalArgumentException,
    InvocationTargetException, NoSuchMethodException, SecurityException {
  int numTasks = (int) stage.getClass().getMethod("numTasks").invoke(stage);
  int completedTasks = 0;

  int id = (int) stage.getClass().getMethod("id").invoke(stage);

  Object completedTaskInfo = null;

  completedTaskInfo = JavaConversions.asJavaMap(
      (HashMap<Object, Object>) sparkListener.getClass()
          .getMethod("stageIdToTasksComplete").invoke(sparkListener)).get(id);

  if (completedTaskInfo != null) {
    completedTasks += (int) completedTaskInfo;
  }
  List<Object> parents = JavaConversions.asJavaList((Seq<Object>) stage.getClass()
      .getMethod("parents").invoke(stage));
  if (parents != null) {
    for (Object s : parents) {
      int[] p = getProgressFromStage_1_0x(sparkListener, s);
      numTasks += p[0];
      completedTasks += p[1];
    }
  }

  return new int[] {numTasks, completedTasks};
}
 
开发者ID:lorthos,项目名称:incubator-zeppelin-druid,代码行数:30,代码来源:SparkInterpreter.java

示例5: getProgressFromStage_1_1x

import scala.collection.JavaConversions; //导入方法依赖的package包/类
private int[] getProgressFromStage_1_1x(JobProgressListener sparkListener, Object stage)
    throws IllegalAccessException, IllegalArgumentException,
    InvocationTargetException, NoSuchMethodException, SecurityException {
  int numTasks = (int) stage.getClass().getMethod("numTasks").invoke(stage);
  int completedTasks = 0;
  int id = (int) stage.getClass().getMethod("id").invoke(stage);

  try {
    Method stageIdToData = sparkListener.getClass().getMethod("stageIdToData");
    HashMap<Tuple2<Object, Object>, Object> stageIdData =
        (HashMap<Tuple2<Object, Object>, Object>) stageIdToData.invoke(sparkListener);
    Class<?> stageUIDataClass =
        this.getClass().forName("org.apache.spark.ui.jobs.UIData$StageUIData");

    Method numCompletedTasks = stageUIDataClass.getMethod("numCompleteTasks");
    Set<Tuple2<Object, Object>> keys =
        JavaConverters.asJavaSetConverter(stageIdData.keySet()).asJava();
    for (Tuple2<Object, Object> k : keys) {
      if (id == (int) k._1()) {
        Object uiData = stageIdData.get(k).get();
        completedTasks += (int) numCompletedTasks.invoke(uiData);
      }
    }
  } catch (Exception e) {
    logger.error("Error on getting progress information", e);
  }

  List<Object> parents = JavaConversions.asJavaList((Seq<Object>) stage.getClass()
      .getMethod("parents").invoke(stage));
  if (parents != null) {
    for (Object s : parents) {
      int[] p = getProgressFromStage_1_1x(sparkListener, s);
      numTasks += p[0];
      completedTasks += p[1];
    }
  }
  return new int[] {numTasks, completedTasks};
}
 
开发者ID:lorthos,项目名称:incubator-zeppelin-druid,代码行数:39,代码来源:SparkInterpreter.java

示例6: getAllBrokers

import scala.collection.JavaConversions; //导入方法依赖的package包/类
/** 获取所有Brokers*/
public static List<BrokerModel> getAllBrokers(){
	ZkClient client = getZkClient();
	List<Broker> asJavaList = JavaConversions.asJavaList(ZkUtils.getAllBrokersInCluster(client));
	List<BrokerModel> retList = new ArrayList<BrokerModel>();
	for (Broker broker : asJavaList) {
		BrokerModel model = new BrokerModel();
		model.setHost(broker.host());
		model.setId(broker.id());
		model.setPort(broker.port());
		retList.add(model);
	}
	return retList;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:15,代码来源:AdminUtil.java

示例7: getAllTopicNames

import scala.collection.JavaConversions; //导入方法依赖的package包/类
/**
 * 获取Kafka的队列名字列表
 * @return
 */
public static List<String> getAllTopicNames(){
	ZkClient client = getZkClient();
	Seq<String> allTopics = ZkUtils.getAllTopics(client);
	List<String> retList = JavaConversions.asJavaList(allTopics);
	return retList;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:11,代码来源:AdminUtil.java

示例8: getTopics

import scala.collection.JavaConversions; //导入方法依赖的package包/类
public List<String> getTopics() {
  try {
    return JavaConversions.asJavaList(ZkUtils.getAllTopics(zkClient));
  } catch (Exception e) {
    log.error(e, "could not get topics");
    return Collections.emptyList();
  }
}
 
开发者ID:shunfei,项目名称:DCMonitor,代码行数:9,代码来源:KafkaInfos.java

示例9: initMaps

import scala.collection.JavaConversions; //导入方法依赖的package包/类
/**
 * Initializes the hashmaps used to retrieve job id from stage and viceversa
 * (a list of stage id from a job id)
 * 
 * @param stageDetails
 * @param stageColumns
 * @param jobDetails
 * @param jobColumns
 */
@SuppressWarnings("unchecked")
private static void initMaps(List<Row> stageDetails,
		List<String> stageColumns, List<Row> jobDetails,
		List<String> jobColumns) {

	// setup hashmap from job to list of stages ids and viceversa
	for (Row row : jobDetails) {
		int jobID = (int) row.getLong(jobColumns.indexOf("Job ID"));
		List<Long> tmpStageList = null;
		List<Integer> stageList = null;
		if (row.get(jobColumns.indexOf("Stage IDs")) instanceof scala.collection.immutable.List<?>)
			tmpStageList = JavaConversions.asJavaList((Seq<Long>) row
					.get(jobColumns.indexOf("Stage IDs")));
		else if (row.get(jobColumns.indexOf("Stage IDs")) instanceof ArrayBuffer<?>)
			tmpStageList = JavaConversions
					.asJavaList((ArrayBuffer<Long>) row.get(jobColumns
							.indexOf("Stage IDs")));
		else {
			logger.warn("Could not parse Stage IDs Serialization:"
					+ row.get(jobColumns.indexOf("Stage IDs")).toString()
					+ " class: "
					+ row.get(jobColumns.indexOf("Stage IDs")).getClass()
					+ " Object: "
					+ row.get(jobColumns.indexOf("Stage IDs")));
		}

		// convert it to integers
		stageList = new ArrayList<Integer>();
		for (Long stage : tmpStageList) {
			stageList.add(stage.intValue());
			// Initialize the hashmap StageID -> JobID
			stage2jobMap.put(stage.intValue(), jobID);
		}
		// and the one JobID -> List of stage IDs
		job2StagesMap.put(jobID, stageList);
	}
}
 
开发者ID:GiovanniPaoloGibilisco,项目名称:spark-log-processor,代码行数:47,代码来源:LoggerParser.java

示例10: getProgressFromStage_1_1x

import scala.collection.JavaConversions; //导入方法依赖的package包/类
private int[] getProgressFromStage_1_1x(JobProgressListener sparkListener, Stage stage) {
    int numTasks = stage.numTasks();
    int completedTasks = 0;

    try {
        Method stageIdToData = sparkListener.getClass().getMethod("stageIdToData");
        HashMap<Tuple2<Object, Object>, Object> stageIdData = (HashMap<Tuple2<Object, Object>, Object>) stageIdToData
                .invoke(sparkListener);
        Class<?> stageUIDataClass = this.getClass().forName("org.apache.spark.ui.jobs.UIData$StageUIData");

        Method numCompletedTasks = stageUIDataClass.getMethod("numCompleteTasks");

        Set<Tuple2<Object, Object>> keys = JavaConverters.asJavaSetConverter(stageIdData.keySet()).asJava();
        for (Tuple2<Object, Object> k : keys) {
            if (stage.id() == (int) k._1()) {
                Object uiData = stageIdData.get(k).get();
                completedTasks += (int) numCompletedTasks.invoke(uiData);
            }
        }
    } catch (Exception e) {
        logger.error("Error on getting progress information", e);
    }

    List<Stage> parents = JavaConversions.asJavaList(stage.parents());
    if (parents != null) {
        for (Stage s : parents) {
            int[] p = getProgressFromStage_1_1x(sparkListener, s);
            numTasks += p[0];
            completedTasks += p[1];
        }
    }
    return new int[] { numTasks, completedTasks };
}
 
开发者ID:Stratio,项目名称:Explorer,代码行数:34,代码来源:SparkInterpreter.java

示例11: getGvcfValue

import scala.collection.JavaConversions; //导入方法依赖的package包/类
protected static java.util.List<VcfRecord> getGvcfValue(String filePath, RefContigInfo refContigInfo) {
    String realignedSamPath = TestRealignerTargetCreator.class.getResource(filePath).getFile();
    List<VcfRecord> vcfRecordList = NormalFileLoader.loadVcf(realignedSamPath, refContigInfo);
    return JavaConversions.asJavaList(vcfRecordList);
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:6,代码来源:AbstractTestCase.java

示例12: extractStageNodes

import scala.collection.JavaConversions; //导入方法依赖的package包/类
/**
 * gets a list of stages from the dataframe
 * 
 * @param stageDetails
 *            - The collected dataframe containing stages details
 * @param stageDetailsColumns
 *            - The columns of stageDetails
 * @param jobDetails
 *            - The collected dataframe containing jobs details
 * @param jobDetailsColumns
 *            - The columns of job Details
 * @return
 */
@SuppressWarnings("unchecked")
private static List<Stagenode> extractStageNodes(List<Row> stageDetails,
		List<String> stageColumns) {
	List<Stagenode> stages = new ArrayList<Stagenode>();

	for (Row row : stageDetails) {
		List<Long> tmpParentList = null;
		List<Integer> parentList = null;
		if (row.get(stageColumns.indexOf("Parent IDs")) instanceof scala.collection.immutable.List<?>)
			tmpParentList = JavaConversions.asJavaList((Seq<Long>) row
					.get(stageColumns.indexOf("Parent IDs")));
		else if (row.get(stageColumns.indexOf("Parent IDs")) instanceof ArrayBuffer<?>)
			tmpParentList = JavaConversions
					.asJavaList((ArrayBuffer<Long>) row.get(stageColumns
							.indexOf("Parent IDs")));
		else {
			logger.warn("Could not parse Stage Parent IDs Serialization:"
					+ row.get(stageColumns.indexOf("Parent IDs"))
							.toString()
					+ " class: "
					+ row.get(stageColumns.indexOf("Parent IDs"))
							.getClass() + " Object: "
					+ row.get(stageColumns.indexOf("Parent IDs")));
		}

		parentList = new ArrayList<Integer>();
		for (Long parent : tmpParentList)
			parentList.add(parent.intValue());
		Stagenode stage = null;

		int stageId = (int) row.getLong(stageColumns.indexOf("Stage ID"));

		stage = new Stagenode(stage2jobMap.get(stageId), stageId,
				parentList, row.getString(stageColumns
						.indexOf("Stage Name")), Boolean.parseBoolean(row
						.getString(stageColumns.indexOf("Executed"))));
		stages.add(stage);

	}

	logger.info(stages.size() + "Stages found");
	return stages;
}
 
开发者ID:GiovanniPaoloGibilisco,项目名称:spark-log-processor,代码行数:57,代码来源:LoggerParser.java

示例13: extractRDDs

import scala.collection.JavaConversions; //导入方法依赖的package包/类
/**
 * Extracts a list of RDDs from the table
 * 
 * @param stageDetails
 * @return list of RDDs
 */
@SuppressWarnings("unchecked")
private static List<RDDnode> extractRDDs(DataFrame rddDetails) {
	List<RDDnode> rdds = new ArrayList<RDDnode>();

	DataFrame table = rddDetails.select("RDD ID", "Parent IDs", "Name",
			"Scope", "Number of Partitions", "Stage ID", "Disk", "Memory",
			"Use ExternalBlockStore", "Deserialized", "Replication")
			.distinct();
	for (Row row : table.collectAsList()) {
		List<Long> tmpParentList = null;
		List<Integer> parentList = null;
		if (row.get(1) instanceof scala.collection.immutable.List<?>)
			tmpParentList = JavaConversions.asJavaList((Seq<Long>) row
					.get(1));
		else if (row.get(1) instanceof ArrayBuffer<?>)
			tmpParentList = JavaConversions
					.asJavaList((ArrayBuffer<Long>) row.get(1));
		else {
			logger.warn("Could not parse RDD PArent IDs Serialization:"
					+ row.get(1).toString() + " class: "
					+ row.get(1).getClass() + " Object: " + row.get(1));
		}
		parentList = new ArrayList<Integer>();
		for (Long parent : tmpParentList)
			parentList.add(parent.intValue());
		int scopeID = 0;
		String scopeName = null;
		if (row.get(3) != null && !row.getString(3).isEmpty()
				&& row.getString(3).startsWith("{")) {
			JsonObject scopeObject = new JsonParser().parse(
					row.getString(3)).getAsJsonObject();
			scopeID = scopeObject.get("id").getAsInt();
			scopeName = scopeObject.get("name").getAsString();
		}

		boolean useDisk = false;
		if (!row.isNullAt(6))
			useDisk = row.getBoolean(6);

		boolean useMemory = false;
		if (!row.isNullAt(7))
			useMemory = row.getBoolean(7);

		rdds.add(new RDDnode((int) row.getLong(0), row.getString(2),
				parentList, scopeID, (int) row.getLong(4), scopeName,
				(int) row.getLong(5), useDisk, useMemory,
				row.getBoolean(8), row.getBoolean(9), (int) row.getLong(10)));

	}
	return rdds;
}
 
开发者ID:GiovanniPaoloGibilisco,项目名称:spark-log-processor,代码行数:58,代码来源:LoggerParser.java

示例14: getAllGroupConsumers

import scala.collection.JavaConversions; //导入方法依赖的package包/类
/**
 * 根据GroupName获取所有的Consumer信息
 * @param groupName
 * @return
 */
public static List<String> getAllGroupConsumers(String groupName){
	List<String> asJavaList = JavaConversions.asJavaList(ZkUtils.getConsumersInGroup(getZkClient(), groupName));
	return asJavaList;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:10,代码来源:AdminUtil.java


注:本文中的scala.collection.JavaConversions.asJavaList方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。