本文整理汇总了Java中org.apache.falcon.entity.v0.cluster.Cluster类的典型用法代码示例。如果您正苦于以下问题:Java Cluster类的具体用法?Java Cluster怎么用?Java Cluster使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Cluster类属于org.apache.falcon.entity.v0.cluster包,在下文中一共展示了Cluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: waitForWorkflow
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
private void waitForWorkflow(String instance, WorkflowJob.Status status) throws Exception {
ExternalId extId = new ExternalId(processName, Tag.DEFAULT, EntityUtil.parseDateUTC(instance));
OozieClient ozClient = OozieClientFactory.get(
(Cluster) ConfigurationStore.get().get(EntityType.CLUSTER, clusterName));
String jobId = ozClient.getJobId(extId.getId());
WorkflowJob jobInfo = null;
for (int i = 0; i < 15; i++) {
jobInfo = ozClient.getJobInfo(jobId);
if (jobInfo.getStatus() == status) {
break;
}
System.out.println("Waiting for workflow job " + jobId + " status " + status);
Thread.sleep((i + 1) * 1000);
}
Assert.assertEquals(status, jobInfo.getStatus());
}
示例2: loadEntity
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
private <T extends Entity> T loadEntity(EntityType type, String resource, String name) throws JAXBException {
Entity entity = (Entity) type.getUnmarshaller().unmarshal(this.getClass().getResourceAsStream(resource));
switch (entity.getEntityType()) {
case CLUSTER:
((Cluster) entity).setName(name);
break;
case FEED:
((Feed) entity).setName(name);
break;
case PROCESS:
((org.apache.falcon.entity.v0.process.Process) entity).setName(name);
break;
}
return (T)entity;
}
示例3: getHDFSFeed
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
private TypeUtils.Pair<String, Feed> getHDFSFeed(String feedResource, String clusterName) throws Exception {
Feed feed = loadEntity(EntityType.FEED, feedResource, "feed" + random());
org.apache.falcon.entity.v0.feed.Cluster feedCluster = feed.getClusters().getClusters().get(0);
feedCluster.setName(clusterName);
STORE.publish(EntityType.FEED, feed);
String feedId = assertFeedIsRegistered(feed, clusterName);
assertFeedAttributes(feedId);
String processId = assertEntityIsRegistered(FalconDataTypes.FALCON_FEED_CREATION.getName(),
AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
FalconBridge.getFeedQualifiedName(feed.getName(), clusterName));
Referenceable processEntity = atlasClient.getEntity(processId);
assertEquals(((List<Id>)processEntity.get("outputs")).get(0).getId()._getId(), feedId);
String inputId = ((List<Id>) processEntity.get("inputs")).get(0).getId()._getId();
Referenceable pathEntity = atlasClient.getEntity(inputId);
assertEquals(pathEntity.getTypeName(), HiveMetaStoreBridge.HDFS_PATH);
List<Location> locations = FeedHelper.getLocations(feedCluster, feed);
Location dataLocation = FileSystemStorage.getLocation(locations, LocationType.DATA);
assertEquals(pathEntity.get(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME),
FalconBridge.normalize(dataLocation.getPath()));
return TypeUtils.Pair.of(feedId, feed);
}
示例4: testReplicationFeed
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
@Test
public void testReplicationFeed() throws Exception {
Cluster srcCluster = loadEntity(EntityType.CLUSTER, CLUSTER_RESOURCE, "cluster" + random());
STORE.publish(EntityType.CLUSTER, srcCluster);
assertClusterIsRegistered(srcCluster);
Cluster targetCluster = loadEntity(EntityType.CLUSTER, CLUSTER_RESOURCE, "cluster" + random());
STORE.publish(EntityType.CLUSTER, targetCluster);
assertClusterIsRegistered(targetCluster);
Feed feed = getTableFeed(FEED_REPLICATION_RESOURCE, srcCluster.getName(), targetCluster.getName());
String inId = atlasClient.getEntity(FalconDataTypes.FALCON_FEED.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
FalconBridge.getFeedQualifiedName(feed.getName(), srcCluster.getName())).getId()._getId();
String outId = atlasClient.getEntity(FalconDataTypes.FALCON_FEED.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
FalconBridge.getFeedQualifiedName(feed.getName(), targetCluster.getName())).getId()._getId();
String processId = assertEntityIsRegistered(FalconDataTypes.FALCON_FEED_REPLICATION.getName(),
AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, feed.getName());
Referenceable process = atlasClient.getEntity(processId);
assertEquals(((List<Id>)process.get("inputs")).get(0)._getId(), inId);
assertEquals(((List<Id>)process.get("outputs")).get(0)._getId(), outId);
}
示例5: storeEntity
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
protected void storeEntity(EntityType type, String name) throws Exception {
Unmarshaller unmarshaller = type.getUnmarshaller();
ConfigurationStore store = ConfigurationStore.get();
store.remove(type, name);
switch (type) {
case CLUSTER:
Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass().getResource(CLUSTER_XML));
cluster.setName(name);
ClusterHelper.getInterface(cluster, Interfacetype.WRITE).setEndpoint(conf.get("fs.default.name"));
store.publish(type, cluster);
break;
case FEED:
Feed feed = (Feed) unmarshaller.unmarshal(this.getClass().getResource(FEED_XML));
feed.setName(name);
store.publish(type, feed);
break;
case PROCESS:
Process process = (Process) unmarshaller.unmarshal(this.getClass().getResource(PROCESS_XML));
process.setName(name);
store.publish(type, process);
break;
}
}
示例6: getApplicableColos
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
protected Set<String> getApplicableColos(String type, Entity entity) throws FalconWebException {
try {
if (DeploymentUtil.isEmbeddedMode()) {
return DeploymentUtil.getDefaultColos();
}
if (EntityType.valueOf(type.toUpperCase()) == EntityType.CLUSTER) {
return getAllColos();
}
Set<String> clusters = EntityUtil.getClustersDefined(entity);
Set<String> colos = new HashSet<String>();
for (String cluster : clusters) {
Cluster clusterEntity = EntityUtil.getEntity(EntityType.CLUSTER, cluster);
colos.add(clusterEntity.getColo());
}
return colos;
} catch (FalconException e) {
throw FalconWebException.newException(e, Response.Status.BAD_REQUEST);
}
}
示例7: getReplicationCoordinators
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
private List<COORDINATORAPP> getReplicationCoordinators(Cluster targetCluster, Path bundlePath)
throws FalconException {
Feed feed = getEntity();
List<COORDINATORAPP> replicationCoords = new ArrayList<COORDINATORAPP>();
if (FeedHelper.getCluster(feed, targetCluster.getName()).getType() == ClusterType.TARGET) {
String coordName = EntityUtil.getWorkflowName(Tag.REPLICATION, feed).toString();
Path basePath = getCoordPath(bundlePath, coordName);
createReplicatonWorkflow(targetCluster, basePath, coordName);
for (org.apache.falcon.entity.v0.feed.Cluster feedCluster : feed.getClusters().getClusters()) {
if (feedCluster.getType() == ClusterType.SOURCE) {
COORDINATORAPP coord = createAndGetCoord(feed,
(Cluster) ConfigurationStore.get().get(EntityType.CLUSTER, feedCluster.getName()),
targetCluster,
bundlePath);
if (coord != null) {
replicationCoords.add(coord);
}
}
}
}
return replicationCoords;
}
示例8: newWorkflowSchedule
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
@Override
public Properties newWorkflowSchedule(Feed feed, Date startDate, String clusterName, String user)
throws FalconException {
org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, clusterName);
if (!startDate.before(feedCluster.getValidity().getEnd())) {
return null;
}
Cluster cluster = CONFIG_STORE.get(EntityType.CLUSTER, feedCluster.getName());
Path bundlePath = new Path(ClusterHelper.getLocation(cluster, "staging"), EntityUtil.getStagingPath(feed));
Feed feedClone = (Feed) feed.copy();
EntityUtil.setStartDate(feedClone, clusterName, startDate);
AbstractOozieEntityMapper<Feed> mapper = new OozieFeedMapper(feedClone);
if (!mapper.map(cluster, bundlePath)) {
return null;
}
return createAppProperties(clusterName, bundlePath, user);
}
示例9: createOutputEvent
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
private void createOutputEvent(String feed, String name, Cluster cluster,
String type, LocationType locType, COORDINATORAPP coord,
Map<String, String> props, String instance)
throws FalconException {
SYNCDATASET dataset = createDataSet(feed, cluster, name + type,
locType);
coord.getDatasets().getDatasetOrAsyncDataset().add(dataset);
DATAOUT dataout = new DATAOUT();
if (coord.getOutputEvents() == null) {
coord.setOutputEvents(new OUTPUTEVENTS());
}
dataout.setName(name + type);
dataout.setDataset(name + type);
dataout.setInstance(getELExpression(instance));
coord.getOutputEvents().getDataOut().add(dataout);
String outputExpr = "${coord:dataOut('" + name + type + "')}";
props.put(name + "." + type, outputExpr);
}
示例10: createDataSet
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
private SYNCDATASET createDataSet(String feedName, Cluster cluster, String datasetName, LocationType locationType)
throws FalconException {
Feed feed = (Feed) EntityUtil.getEntity(EntityType.FEED, feedName);
SYNCDATASET syncdataset = new SYNCDATASET();
syncdataset.setName(datasetName);
String locPath = FeedHelper.getLocation(feed, locationType,
cluster.getName()).getPath();
syncdataset.setUriTemplate(new Path(locPath).toUri().getScheme() != null ? locPath : "${nameNode}"
+ locPath);
syncdataset.setFrequency("${coord:" + feed.getFrequency().toString() + "}");
org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
syncdataset.setInitialInstance(SchemaHelper.formatDateUTC(feedCluster.getValidity().getStart()));
syncdataset.setTimezone(feed.getTimezone().getID());
if (feed.getAvailabilityFlag() == null) {
syncdataset.setDoneFlag("");
} else {
syncdataset.setDoneFlag(feed.getAvailabilityFlag());
}
return syncdataset;
}
示例11: addOptionalInputProperties
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
private void addOptionalInputProperties(Properties properties, Input in, String clusterName)
throws FalconException {
Feed feed = EntityUtil.getEntity(EntityType.FEED, in.getFeed());
org.apache.falcon.entity.v0.feed.Cluster cluster = FeedHelper.getCluster(feed, clusterName);
String inName = in.getName();
properties.put(inName + ".frequency", String.valueOf(feed.getFrequency().getFrequency()));
properties.put(inName + ".freq_timeunit", mapToCoordTimeUnit(feed.getFrequency().getTimeUnit()).name());
properties.put(inName + ".timezone", feed.getTimezone().getID());
properties.put(inName + ".end_of_duration", Timeunit.NONE.name());
properties.put(inName + ".initial-instance", SchemaHelper.formatDateUTC(cluster.getValidity().getStart()));
properties.put(inName + ".done-flag", "notused");
String locPath = FeedHelper.getLocation(feed, LocationType.DATA, clusterName).getPath().replace('$', '%');
properties.put(inName + ".uri-template",
new Path(locPath).toUri().getScheme() != null ? locPath : "${nameNode}" + locPath);
properties.put(inName + ".start-instance", in.getStart());
properties.put(inName + ".end-instance", in.getEnd());
}
示例12: storeEntity
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
private void storeEntity(EntityType type, String name) throws Exception {
Unmarshaller unmarshaller = type.getUnmarshaller();
ConfigurationStore store = ConfigurationStore.get();
store.remove(type, name);
switch (type) {
case CLUSTER:
Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass().getResource(CLUSTER_XML));
cluster.setName(name);
store.publish(type, cluster);
break;
case FEED:
Feed feed = (Feed) unmarshaller.unmarshal(this.getClass().getResource(FEED_XML));
feed.setName(name);
store.publish(type, feed);
break;
case PROCESS:
Process process = (Process) unmarshaller.unmarshal(this.getClass().getResource(PROCESS_XML));
process.setName(name);
store.publish(type, process);
break;
default:
}
}
示例13: map
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
public boolean map(Cluster cluster, Path bundlePath) throws FalconException {
BUNDLEAPP bundleApp = new BUNDLEAPP();
bundleApp.setName(EntityUtil.getWorkflowName(entity).toString());
// all the properties are set prior to bundle and coordinators creation
List<COORDINATORAPP> coordinators = getCoordinators(cluster, bundlePath);
if (coordinators.size() == 0) {
return false;
}
for (COORDINATORAPP coordinatorapp : coordinators) {
Path coordPath = getCoordPath(bundlePath, coordinatorapp.getName());
String coordXmlName = marshal(cluster, coordinatorapp, coordPath,
EntityUtil.getWorkflowNameSuffix(coordinatorapp.getName(), entity));
createTempDir(cluster, coordPath);
COORDINATOR bundleCoord = new COORDINATOR();
bundleCoord.setName(coordinatorapp.getName());
bundleCoord.setAppPath(getStoragePath(coordPath) + "/" + coordXmlName);
bundleApp.getCoordinator().add(bundleCoord);
copySharedLibs(cluster, coordPath);
}
marshal(cluster, bundleApp, bundlePath);
return true;
}
示例14: marshal
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
protected void marshal(Cluster cluster, JAXBElement<?> jaxbElement, JAXBContext jaxbContext, Path outPath)
throws FalconException {
try {
Marshaller marshaller = jaxbContext.createMarshaller();
marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
FileSystem fs = outPath.getFileSystem(ClusterHelper.getConfiguration(cluster));
OutputStream out = fs.create(outPath);
try {
marshaller.marshal(jaxbElement, out);
} finally {
out.close();
}
if (LOG.isDebugEnabled()) {
StringWriter writer = new StringWriter();
marshaller.marshal(jaxbElement, writer);
LOG.debug("Writing definition to " + outPath + " on cluster " + cluster.getName());
LOG.debug(writer.getBuffer());
}
LOG.info("Marshalled " + jaxbElement.getDeclaredType() + " to " + outPath);
} catch (Exception e) {
throw new FalconException("Unable to marshall app object", e);
}
}
示例15: afterDelete
import org.apache.falcon.entity.v0.cluster.Cluster; //导入依赖的package包/类
@Override
public void afterDelete(Entity entity, String clusterName) throws FalconException {
try {
Cluster cluster = EntityUtil.getEntity(EntityType.CLUSTER, clusterName);
Path entityPath = new Path(ClusterHelper.getLocation(cluster, "staging"),
EntityUtil.getStagingPath(entity)).getParent();
LOG.info("Deleting entity path " + entityPath + " on cluster " + clusterName);
Configuration conf = ClusterHelper.getConfiguration(cluster);
FileSystem fs = FileSystem.get(conf);
if (fs.exists(entityPath) && !fs.delete(entityPath, true)) {
throw new FalconException("Unable to cleanup entity path: " + entityPath);
}
} catch (Exception e) {
throw new FalconException(
"Failed to cleanup entity path for " + entity.toShortString() + " on cluster " + clusterName, e);
}
}