當前位置: 首頁>>代碼示例>>Java>>正文


Java JavaConversions.asJavaList方法代碼示例

本文整理匯總了Java中scala.collection.JavaConversions.asJavaList方法的典型用法代碼示例。如果您正苦於以下問題:Java JavaConversions.asJavaList方法的具體用法?Java JavaConversions.asJavaList怎麽用?Java JavaConversions.asJavaList使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scala.collection.JavaConversions的用法示例。


在下文中一共展示了JavaConversions.asJavaList方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getAllPartitionIds

import scala.collection.JavaConversions; //導入方法依賴的package包/類
/**
 * 根據指定topic獲取該topic的partition列表
 * @param topic
 * @return
 */
public static List<Integer> getAllPartitionIds(String topic) {
	List list = new ArrayList();
	list.add(topic);
	Buffer buffer = JavaConversions.asScalaBuffer(list);

	Map<String, Seq<Object>> topicPartMap = JavaConversions.asJavaMap(ZkUtils.getPartitionsForTopics(getZkClient(), buffer));
	List<Object> javaList = JavaConversions.asJavaList(topicPartMap.get(topic));
	
	List<Integer> retList = new ArrayList<Integer>();
	for (Object obj : javaList) {
		retList.add((Integer)obj);
	}
	
	return retList;
}
 
開發者ID:linzhaoming,項目名稱:easyframe-msg,代碼行數:21,代碼來源:AdminUtil.java

示例2: getPartitionInfos

import scala.collection.JavaConversions; //導入方法依賴的package包/類
public List<PartitionInfo> getPartitionInfos(String group, String topic) {
  Seq<String> singleTopic = JavaConversions.asScalaBuffer(Collections.singletonList(topic)).toSeq();
  scala.collection.Map<String, Seq<Object>> pidMap = ZkUtils.getPartitionsForTopics(zkClient, singleTopic);
  Option<Seq<Object>> partitions = pidMap.get(topic);
  if (partitions.get() == null) {
    return Collections.emptyList();
  }
  List<PartitionInfo> infos = Lists.newArrayList();
  for (Object o : JavaConversions.asJavaList(partitions.get())) {
    PartitionInfo info = getPartitionInfo(group, topic, Int.unbox(o));
    if (info != null) {
      infos.add(info);
    }
  }
  return infos;
}
 
開發者ID:shunfei,項目名稱:DCMonitor,代碼行數:17,代碼來源:KafkaInfos.java

示例3: convertTupleResultToRow

import scala.collection.JavaConversions; //導入方法依賴的package包/類
private static Row convertTupleResultToRow(List<String> bindingNames, BindingSet tuple, StructType schema) {
	List<StructField> schemaAsList = JavaConversions.asJavaList(schema.toList());

	String[] resultArray = new String[schemaAsList.size()];
	for (int i = 0; i < schemaAsList.size(); i++) {
		resultArray[i] = tuple.getBinding(schemaAsList.get(i).name()).getValue().stringValue();
	}

	org.apache.spark.sql.catalyst.expressions.GenericRow testGeneric = new GenericRow(resultArray);
	return testGeneric;
}
 
開發者ID:ansell,項目名稱:spark-rdf4j,代碼行數:12,代碼來源:SparkRDF4JSparqlRelation.java

示例4: getProgressFromStage_1_0x

import scala.collection.JavaConversions; //導入方法依賴的package包/類
private int[] getProgressFromStage_1_0x(JobProgressListener sparkListener, Object stage)
    throws IllegalAccessException, IllegalArgumentException,
    InvocationTargetException, NoSuchMethodException, SecurityException {
  int numTasks = (int) stage.getClass().getMethod("numTasks").invoke(stage);
  int completedTasks = 0;

  int id = (int) stage.getClass().getMethod("id").invoke(stage);

  Object completedTaskInfo = null;

  completedTaskInfo = JavaConversions.asJavaMap(
      (HashMap<Object, Object>) sparkListener.getClass()
          .getMethod("stageIdToTasksComplete").invoke(sparkListener)).get(id);

  if (completedTaskInfo != null) {
    completedTasks += (int) completedTaskInfo;
  }
  List<Object> parents = JavaConversions.asJavaList((Seq<Object>) stage.getClass()
      .getMethod("parents").invoke(stage));
  if (parents != null) {
    for (Object s : parents) {
      int[] p = getProgressFromStage_1_0x(sparkListener, s);
      numTasks += p[0];
      completedTasks += p[1];
    }
  }

  return new int[] {numTasks, completedTasks};
}
 
開發者ID:lorthos,項目名稱:incubator-zeppelin-druid,代碼行數:30,代碼來源:SparkInterpreter.java

示例5: getProgressFromStage_1_1x

import scala.collection.JavaConversions; //導入方法依賴的package包/類
private int[] getProgressFromStage_1_1x(JobProgressListener sparkListener, Object stage)
    throws IllegalAccessException, IllegalArgumentException,
    InvocationTargetException, NoSuchMethodException, SecurityException {
  int numTasks = (int) stage.getClass().getMethod("numTasks").invoke(stage);
  int completedTasks = 0;
  int id = (int) stage.getClass().getMethod("id").invoke(stage);

  try {
    Method stageIdToData = sparkListener.getClass().getMethod("stageIdToData");
    HashMap<Tuple2<Object, Object>, Object> stageIdData =
        (HashMap<Tuple2<Object, Object>, Object>) stageIdToData.invoke(sparkListener);
    Class<?> stageUIDataClass =
        this.getClass().forName("org.apache.spark.ui.jobs.UIData$StageUIData");

    Method numCompletedTasks = stageUIDataClass.getMethod("numCompleteTasks");
    Set<Tuple2<Object, Object>> keys =
        JavaConverters.asJavaSetConverter(stageIdData.keySet()).asJava();
    for (Tuple2<Object, Object> k : keys) {
      if (id == (int) k._1()) {
        Object uiData = stageIdData.get(k).get();
        completedTasks += (int) numCompletedTasks.invoke(uiData);
      }
    }
  } catch (Exception e) {
    logger.error("Error on getting progress information", e);
  }

  List<Object> parents = JavaConversions.asJavaList((Seq<Object>) stage.getClass()
      .getMethod("parents").invoke(stage));
  if (parents != null) {
    for (Object s : parents) {
      int[] p = getProgressFromStage_1_1x(sparkListener, s);
      numTasks += p[0];
      completedTasks += p[1];
    }
  }
  return new int[] {numTasks, completedTasks};
}
 
開發者ID:lorthos,項目名稱:incubator-zeppelin-druid,代碼行數:39,代碼來源:SparkInterpreter.java

示例6: getAllBrokers

import scala.collection.JavaConversions; //導入方法依賴的package包/類
/** 獲取所有Brokers*/
public static List<BrokerModel> getAllBrokers(){
	ZkClient client = getZkClient();
	List<Broker> asJavaList = JavaConversions.asJavaList(ZkUtils.getAllBrokersInCluster(client));
	List<BrokerModel> retList = new ArrayList<BrokerModel>();
	for (Broker broker : asJavaList) {
		BrokerModel model = new BrokerModel();
		model.setHost(broker.host());
		model.setId(broker.id());
		model.setPort(broker.port());
		retList.add(model);
	}
	return retList;
}
 
開發者ID:linzhaoming,項目名稱:easyframe-msg,代碼行數:15,代碼來源:AdminUtil.java

示例7: getAllTopicNames

import scala.collection.JavaConversions; //導入方法依賴的package包/類
/**
 * 獲取Kafka的隊列名字列表
 * @return
 */
public static List<String> getAllTopicNames(){
	ZkClient client = getZkClient();
	Seq<String> allTopics = ZkUtils.getAllTopics(client);
	List<String> retList = JavaConversions.asJavaList(allTopics);
	return retList;
}
 
開發者ID:linzhaoming,項目名稱:easyframe-msg,代碼行數:11,代碼來源:AdminUtil.java

示例8: getTopics

import scala.collection.JavaConversions; //導入方法依賴的package包/類
public List<String> getTopics() {
  try {
    return JavaConversions.asJavaList(ZkUtils.getAllTopics(zkClient));
  } catch (Exception e) {
    log.error(e, "could not get topics");
    return Collections.emptyList();
  }
}
 
開發者ID:shunfei,項目名稱:DCMonitor,代碼行數:9,代碼來源:KafkaInfos.java

示例9: initMaps

import scala.collection.JavaConversions; //導入方法依賴的package包/類
/**
 * Initializes the hashmaps used to retrieve job id from stage and viceversa
 * (a list of stage id from a job id)
 * 
 * @param stageDetails
 * @param stageColumns
 * @param jobDetails
 * @param jobColumns
 */
@SuppressWarnings("unchecked")
private static void initMaps(List<Row> stageDetails,
		List<String> stageColumns, List<Row> jobDetails,
		List<String> jobColumns) {

	// setup hashmap from job to list of stages ids and viceversa
	for (Row row : jobDetails) {
		int jobID = (int) row.getLong(jobColumns.indexOf("Job ID"));
		List<Long> tmpStageList = null;
		List<Integer> stageList = null;
		if (row.get(jobColumns.indexOf("Stage IDs")) instanceof scala.collection.immutable.List<?>)
			tmpStageList = JavaConversions.asJavaList((Seq<Long>) row
					.get(jobColumns.indexOf("Stage IDs")));
		else if (row.get(jobColumns.indexOf("Stage IDs")) instanceof ArrayBuffer<?>)
			tmpStageList = JavaConversions
					.asJavaList((ArrayBuffer<Long>) row.get(jobColumns
							.indexOf("Stage IDs")));
		else {
			logger.warn("Could not parse Stage IDs Serialization:"
					+ row.get(jobColumns.indexOf("Stage IDs")).toString()
					+ " class: "
					+ row.get(jobColumns.indexOf("Stage IDs")).getClass()
					+ " Object: "
					+ row.get(jobColumns.indexOf("Stage IDs")));
		}

		// convert it to integers
		stageList = new ArrayList<Integer>();
		for (Long stage : tmpStageList) {
			stageList.add(stage.intValue());
			// Initialize the hashmap StageID -> JobID
			stage2jobMap.put(stage.intValue(), jobID);
		}
		// and the one JobID -> List of stage IDs
		job2StagesMap.put(jobID, stageList);
	}
}
 
開發者ID:GiovanniPaoloGibilisco,項目名稱:spark-log-processor,代碼行數:47,代碼來源:LoggerParser.java

示例10: getProgressFromStage_1_1x

import scala.collection.JavaConversions; //導入方法依賴的package包/類
private int[] getProgressFromStage_1_1x(JobProgressListener sparkListener, Stage stage) {
    int numTasks = stage.numTasks();
    int completedTasks = 0;

    try {
        Method stageIdToData = sparkListener.getClass().getMethod("stageIdToData");
        HashMap<Tuple2<Object, Object>, Object> stageIdData = (HashMap<Tuple2<Object, Object>, Object>) stageIdToData
                .invoke(sparkListener);
        Class<?> stageUIDataClass = this.getClass().forName("org.apache.spark.ui.jobs.UIData$StageUIData");

        Method numCompletedTasks = stageUIDataClass.getMethod("numCompleteTasks");

        Set<Tuple2<Object, Object>> keys = JavaConverters.asJavaSetConverter(stageIdData.keySet()).asJava();
        for (Tuple2<Object, Object> k : keys) {
            if (stage.id() == (int) k._1()) {
                Object uiData = stageIdData.get(k).get();
                completedTasks += (int) numCompletedTasks.invoke(uiData);
            }
        }
    } catch (Exception e) {
        logger.error("Error on getting progress information", e);
    }

    List<Stage> parents = JavaConversions.asJavaList(stage.parents());
    if (parents != null) {
        for (Stage s : parents) {
            int[] p = getProgressFromStage_1_1x(sparkListener, s);
            numTasks += p[0];
            completedTasks += p[1];
        }
    }
    return new int[] { numTasks, completedTasks };
}
 
開發者ID:Stratio,項目名稱:Explorer,代碼行數:34,代碼來源:SparkInterpreter.java

示例11: getGvcfValue

import scala.collection.JavaConversions; //導入方法依賴的package包/類
protected static java.util.List<VcfRecord> getGvcfValue(String filePath, RefContigInfo refContigInfo) {
    String realignedSamPath = TestRealignerTargetCreator.class.getResource(filePath).getFile();
    List<VcfRecord> vcfRecordList = NormalFileLoader.loadVcf(realignedSamPath, refContigInfo);
    return JavaConversions.asJavaList(vcfRecordList);
}
 
開發者ID:PAA-NCIC,項目名稱:SparkSeq,代碼行數:6,代碼來源:AbstractTestCase.java

示例12: extractStageNodes

import scala.collection.JavaConversions; //導入方法依賴的package包/類
/**
 * gets a list of stages from the dataframe
 * 
 * @param stageDetails
 *            - The collected dataframe containing stages details
 * @param stageDetailsColumns
 *            - The columns of stageDetails
 * @param jobDetails
 *            - The collected dataframe containing jobs details
 * @param jobDetailsColumns
 *            - The columns of job Details
 * @return
 */
@SuppressWarnings("unchecked")
private static List<Stagenode> extractStageNodes(List<Row> stageDetails,
		List<String> stageColumns) {
	List<Stagenode> stages = new ArrayList<Stagenode>();

	for (Row row : stageDetails) {
		List<Long> tmpParentList = null;
		List<Integer> parentList = null;
		if (row.get(stageColumns.indexOf("Parent IDs")) instanceof scala.collection.immutable.List<?>)
			tmpParentList = JavaConversions.asJavaList((Seq<Long>) row
					.get(stageColumns.indexOf("Parent IDs")));
		else if (row.get(stageColumns.indexOf("Parent IDs")) instanceof ArrayBuffer<?>)
			tmpParentList = JavaConversions
					.asJavaList((ArrayBuffer<Long>) row.get(stageColumns
							.indexOf("Parent IDs")));
		else {
			logger.warn("Could not parse Stage Parent IDs Serialization:"
					+ row.get(stageColumns.indexOf("Parent IDs"))
							.toString()
					+ " class: "
					+ row.get(stageColumns.indexOf("Parent IDs"))
							.getClass() + " Object: "
					+ row.get(stageColumns.indexOf("Parent IDs")));
		}

		parentList = new ArrayList<Integer>();
		for (Long parent : tmpParentList)
			parentList.add(parent.intValue());
		Stagenode stage = null;

		int stageId = (int) row.getLong(stageColumns.indexOf("Stage ID"));

		stage = new Stagenode(stage2jobMap.get(stageId), stageId,
				parentList, row.getString(stageColumns
						.indexOf("Stage Name")), Boolean.parseBoolean(row
						.getString(stageColumns.indexOf("Executed"))));
		stages.add(stage);

	}

	logger.info(stages.size() + "Stages found");
	return stages;
}
 
開發者ID:GiovanniPaoloGibilisco,項目名稱:spark-log-processor,代碼行數:57,代碼來源:LoggerParser.java

示例13: extractRDDs

import scala.collection.JavaConversions; //導入方法依賴的package包/類
/**
 * Extracts a list of RDDs from the table
 * 
 * @param stageDetails
 * @return list of RDDs
 */
@SuppressWarnings("unchecked")
private static List<RDDnode> extractRDDs(DataFrame rddDetails) {
	List<RDDnode> rdds = new ArrayList<RDDnode>();

	DataFrame table = rddDetails.select("RDD ID", "Parent IDs", "Name",
			"Scope", "Number of Partitions", "Stage ID", "Disk", "Memory",
			"Use ExternalBlockStore", "Deserialized", "Replication")
			.distinct();
	for (Row row : table.collectAsList()) {
		List<Long> tmpParentList = null;
		List<Integer> parentList = null;
		if (row.get(1) instanceof scala.collection.immutable.List<?>)
			tmpParentList = JavaConversions.asJavaList((Seq<Long>) row
					.get(1));
		else if (row.get(1) instanceof ArrayBuffer<?>)
			tmpParentList = JavaConversions
					.asJavaList((ArrayBuffer<Long>) row.get(1));
		else {
			logger.warn("Could not parse RDD PArent IDs Serialization:"
					+ row.get(1).toString() + " class: "
					+ row.get(1).getClass() + " Object: " + row.get(1));
		}
		parentList = new ArrayList<Integer>();
		for (Long parent : tmpParentList)
			parentList.add(parent.intValue());
		int scopeID = 0;
		String scopeName = null;
		if (row.get(3) != null && !row.getString(3).isEmpty()
				&& row.getString(3).startsWith("{")) {
			JsonObject scopeObject = new JsonParser().parse(
					row.getString(3)).getAsJsonObject();
			scopeID = scopeObject.get("id").getAsInt();
			scopeName = scopeObject.get("name").getAsString();
		}

		boolean useDisk = false;
		if (!row.isNullAt(6))
			useDisk = row.getBoolean(6);

		boolean useMemory = false;
		if (!row.isNullAt(7))
			useMemory = row.getBoolean(7);

		rdds.add(new RDDnode((int) row.getLong(0), row.getString(2),
				parentList, scopeID, (int) row.getLong(4), scopeName,
				(int) row.getLong(5), useDisk, useMemory,
				row.getBoolean(8), row.getBoolean(9), (int) row.getLong(10)));

	}
	return rdds;
}
 
開發者ID:GiovanniPaoloGibilisco,項目名稱:spark-log-processor,代碼行數:58,代碼來源:LoggerParser.java

示例14: getAllGroupConsumers

import scala.collection.JavaConversions; //導入方法依賴的package包/類
/**
 * 根據GroupName獲取所有的Consumer信息
 * @param groupName
 * @return
 */
public static List<String> getAllGroupConsumers(String groupName){
	List<String> asJavaList = JavaConversions.asJavaList(ZkUtils.getConsumersInGroup(getZkClient(), groupName));
	return asJavaList;
}
 
開發者ID:linzhaoming,項目名稱:easyframe-msg,代碼行數:10,代碼來源:AdminUtil.java


注:本文中的scala.collection.JavaConversions.asJavaList方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。