本文整理汇总了Java中org.apache.falcon.entity.FeedHelper类的典型用法代码示例。如果您正苦于以下问题:Java FeedHelper类的具体用法?Java FeedHelper怎么用?Java FeedHelper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FeedHelper类属于org.apache.falcon.entity包,在下文中一共展示了FeedHelper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getInputEntities
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private static List<Referenceable> getInputEntities(org.apache.falcon.entity.v0.cluster.Cluster cluster,
Feed feed) throws Exception {
org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
if(feedCluster != null) {
final CatalogTable table = getTable(feedCluster, feed);
if (table != null) {
CatalogStorage storage = new CatalogStorage(cluster, table);
return createHiveTableInstance(cluster.getName(), storage.getDatabase().toLowerCase(),
storage.getTable().toLowerCase());
} else {
List<Location> locations = FeedHelper.getLocations(feedCluster, feed);
if (CollectionUtils.isNotEmpty(locations)) {
Location dataLocation = FileSystemStorage.getLocation(locations, LocationType.DATA);
if (dataLocation != null) {
final String pathUri = normalize(dataLocation.getPath());
LOG.info("Registering DFS Path {} ", pathUri);
return fillHDFSDataSet(pathUri, cluster.getName());
}
}
}
}
return null;
}
示例2: getHDFSFeed
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private TypeUtils.Pair<String, Feed> getHDFSFeed(String feedResource, String clusterName) throws Exception {
Feed feed = loadEntity(EntityType.FEED, feedResource, "feed" + random());
org.apache.falcon.entity.v0.feed.Cluster feedCluster = feed.getClusters().getClusters().get(0);
feedCluster.setName(clusterName);
STORE.publish(EntityType.FEED, feed);
String feedId = assertFeedIsRegistered(feed, clusterName);
assertFeedAttributes(feedId);
String processId = assertEntityIsRegistered(FalconDataTypes.FALCON_FEED_CREATION.getName(),
AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
FalconBridge.getFeedQualifiedName(feed.getName(), clusterName));
Referenceable processEntity = atlasClient.getEntity(processId);
assertEquals(((List<Id>)processEntity.get("outputs")).get(0).getId()._getId(), feedId);
String inputId = ((List<Id>) processEntity.get("inputs")).get(0).getId()._getId();
Referenceable pathEntity = atlasClient.getEntity(inputId);
assertEquals(pathEntity.getTypeName(), HiveMetaStoreBridge.HDFS_PATH);
List<Location> locations = FeedHelper.getLocations(feedCluster, feed);
Location dataLocation = FileSystemStorage.getLocation(locations, LocationType.DATA);
assertEquals(pathEntity.get(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME),
FalconBridge.normalize(dataLocation.getPath()));
return TypeUtils.Pair.of(feedId, feed);
}
示例3: getReplicationCoordinators
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private List<COORDINATORAPP> getReplicationCoordinators(Cluster targetCluster, Path bundlePath)
throws FalconException {
Feed feed = getEntity();
List<COORDINATORAPP> replicationCoords = new ArrayList<COORDINATORAPP>();
if (FeedHelper.getCluster(feed, targetCluster.getName()).getType() == ClusterType.TARGET) {
String coordName = EntityUtil.getWorkflowName(Tag.REPLICATION, feed).toString();
Path basePath = getCoordPath(bundlePath, coordName);
createReplicatonWorkflow(targetCluster, basePath, coordName);
for (org.apache.falcon.entity.v0.feed.Cluster feedCluster : feed.getClusters().getClusters()) {
if (feedCluster.getType() == ClusterType.SOURCE) {
COORDINATORAPP coord = createAndGetCoord(feed,
(Cluster) ConfigurationStore.get().get(EntityType.CLUSTER, feedCluster.getName()),
targetCluster,
bundlePath);
if (coord != null) {
replicationCoords.add(coord);
}
}
}
}
return replicationCoords;
}
示例4: newWorkflowSchedule
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
@Override
public Properties newWorkflowSchedule(Feed feed, Date startDate, String clusterName, String user)
throws FalconException {
org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, clusterName);
if (!startDate.before(feedCluster.getValidity().getEnd())) {
return null;
}
Cluster cluster = CONFIG_STORE.get(EntityType.CLUSTER, feedCluster.getName());
Path bundlePath = new Path(ClusterHelper.getLocation(cluster, "staging"), EntityUtil.getStagingPath(feed));
Feed feedClone = (Feed) feed.copy();
EntityUtil.setStartDate(feedClone, clusterName, startDate);
AbstractOozieEntityMapper<Feed> mapper = new OozieFeedMapper(feedClone);
if (!mapper.map(cluster, bundlePath)) {
return null;
}
return createAppProperties(clusterName, bundlePath, user);
}
示例5: createDataSet
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private SYNCDATASET createDataSet(String feedName, Cluster cluster, String datasetName, LocationType locationType)
throws FalconException {
Feed feed = (Feed) EntityUtil.getEntity(EntityType.FEED, feedName);
SYNCDATASET syncdataset = new SYNCDATASET();
syncdataset.setName(datasetName);
String locPath = FeedHelper.getLocation(feed, locationType,
cluster.getName()).getPath();
syncdataset.setUriTemplate(new Path(locPath).toUri().getScheme() != null ? locPath : "${nameNode}"
+ locPath);
syncdataset.setFrequency("${coord:" + feed.getFrequency().toString() + "}");
org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
syncdataset.setInitialInstance(SchemaHelper.formatDateUTC(feedCluster.getValidity().getStart()));
syncdataset.setTimezone(feed.getTimezone().getID());
if (feed.getAvailabilityFlag() == null) {
syncdataset.setDoneFlag("");
} else {
syncdataset.setDoneFlag(feed.getAvailabilityFlag());
}
return syncdataset;
}
示例6: addOptionalInputProperties
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private void addOptionalInputProperties(Properties properties, Input in, String clusterName)
throws FalconException {
Feed feed = EntityUtil.getEntity(EntityType.FEED, in.getFeed());
org.apache.falcon.entity.v0.feed.Cluster cluster = FeedHelper.getCluster(feed, clusterName);
String inName = in.getName();
properties.put(inName + ".frequency", String.valueOf(feed.getFrequency().getFrequency()));
properties.put(inName + ".freq_timeunit", mapToCoordTimeUnit(feed.getFrequency().getTimeUnit()).name());
properties.put(inName + ".timezone", feed.getTimezone().getID());
properties.put(inName + ".end_of_duration", Timeunit.NONE.name());
properties.put(inName + ".initial-instance", SchemaHelper.formatDateUTC(cluster.getValidity().getStart()));
properties.put(inName + ".done-flag", "notused");
String locPath = FeedHelper.getLocation(feed, LocationType.DATA, clusterName).getPath().replace('$', '%');
properties.put(inName + ".uri-template",
new Path(locPath).toUri().getScheme() != null ? locPath : "${nameNode}" + locPath);
properties.put(inName + ".start-instance", in.getStart());
properties.put(inName + ".end-instance", in.getEnd());
}
示例7: validateFeedRetentionPeriod
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
public static void validateFeedRetentionPeriod(String startInstance, Feed feed, String clusterName)
throws FalconException {
String feedRetention = FeedHelper.getCluster(feed, clusterName).getRetention().getLimit().toString();
ExpressionHelper evaluator = ExpressionHelper.get();
Date now = new Date();
ExpressionHelper.setReferenceDate(now);
Date instStart = evaluator.evaluate(startInstance, Date.class);
long feedDuration = evaluator.evaluate(feedRetention, Long.class);
Date feedStart = new Date(now.getTime() - feedDuration);
if (instStart.before(feedStart)) {
throw new ValidationException("StartInstance :" + startInstance + " of process is out of range for Feed: "
+ feed.getName() + " in cluster: " + clusterName + "'s retention limit :" + feedRetention);
}
}
示例8: getRetentionCoordinator
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private COORDINATORAPP getRetentionCoordinator(Cluster cluster, Path bundlePath) throws FalconException {
Feed feed = getEntity();
org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
if (feedCluster.getValidity().getEnd().before(new Date())) {
LOG.warn("Feed Retention is not applicable as Feed's end time for cluster " + cluster.getName()
+ " is not in the future");
return null;
}
COORDINATORAPP retentionApp = new COORDINATORAPP();
String coordName = EntityUtil.getWorkflowName(Tag.RETENTION, feed).toString();
retentionApp.setName(coordName);
retentionApp.setEnd(SchemaHelper.formatDateUTC(feedCluster.getValidity().getEnd()));
retentionApp.setStart(SchemaHelper.formatDateUTC(new Date()));
retentionApp.setTimezone(feed.getTimezone().getID());
TimeUnit timeUnit = feed.getFrequency().getTimeUnit();
if (timeUnit == TimeUnit.hours || timeUnit == TimeUnit.minutes) {
retentionApp.setFrequency("${coord:hours(6)}");
} else {
retentionApp.setFrequency("${coord:days(1)}");
}
Path wfPath = getCoordPath(bundlePath, coordName);
retentionApp.setAction(getRetentionWorkflowAction(cluster, wfPath, coordName));
return retentionApp;
}
示例9: getReplicationWorkflowAction
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private ACTION getReplicationWorkflowAction(Cluster srcCluster, Cluster trgCluster, Path wfPath, String wfName)
throws FalconException {
ACTION replicationAction = new ACTION();
WORKFLOW replicationWF = new WORKFLOW();
try {
replicationWF.setAppPath(getStoragePath(wfPath.toString()));
Feed feed = getEntity();
String srcPart = FeedHelper.normalizePartitionExpression(
FeedHelper.getCluster(feed, srcCluster.getName()).getPartition());
srcPart = FeedHelper.evaluateClusterExp(srcCluster, srcPart);
String targetPart = FeedHelper.normalizePartitionExpression(
FeedHelper.getCluster(feed, trgCluster.getName()).getPartition());
targetPart = FeedHelper.evaluateClusterExp(trgCluster, targetPart);
StringBuilder pathsWithPartitions = new StringBuilder();
pathsWithPartitions.append("${coord:dataIn('input')}/").append(
FeedHelper.normalizePartitionExpression(srcPart, targetPart));
Map<String, String> props = createCoordDefaultConfiguration(trgCluster, wfPath, wfName);
props.put("srcClusterName", srcCluster.getName());
props.put("srcClusterColo", srcCluster.getColo());
props.put(ARG.feedNames.getPropName(), feed.getName());
props.put(ARG.feedInstancePaths.getPropName(), pathsWithPartitions.toString());
String parts = pathsWithPartitions.toString().replaceAll("//+", "/");
parts = StringUtils.stripEnd(parts, "/");
props.put("sourceRelativePaths", parts);
props.put("distcpSourcePaths", "${coord:dataIn('input')}");
props.put("distcpTargetPaths", "${coord:dataOut('output')}");
props.put("falconInPaths", pathsWithPartitions.toString());
props.put("falconInputFeeds", feed.getName());
replicationWF.setConfiguration(getCoordConfig(props));
replicationAction.setWorkflow(replicationWF);
} catch (Exception e) {
throw new FalconException("Unable to create replication workflow", e);
}
return replicationAction;
}
示例10: getLocationURI
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private String getLocationURI(Cluster cluster, Feed feed, LocationType type) {
String path = FeedHelper.getLocation(feed, type, cluster.getName())
.getPath();
if (!path.equals("/tmp")) {
if (new Path(path).toUri().getScheme() == null) {
return new Path(ClusterHelper.getStorageUrl(cluster), path)
.toString();
} else {
return path;
}
}
return null;
}
示例11: validateFeedGroups
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private void validateFeedGroups(Feed feed) throws ValidationException {
String[] groupNames = feed.getGroups() != null ? feed.getGroups().split(",") : new String[]{};
String defaultPath = FeedHelper.getLocation(feed, LocationType.DATA)
.getPath();
for (Cluster cluster : feed.getClusters().getClusters()) {
if (!FeedGroup.getDatePattern(
FeedHelper.getLocation(feed, LocationType.DATA,
cluster.getName()).getPath()).equals(
FeedGroup.getDatePattern(defaultPath))) {
throw new ValidationException("Feeds default path pattern: "
+ FeedHelper.getLocation(feed, LocationType.DATA).getPath()
+ ", does not match with cluster: "
+ cluster.getName()
+ " path pattern: "
+ FeedHelper.getLocation(feed, LocationType.DATA, cluster.getName()).getPath());
}
}
for (String groupName : groupNames) {
FeedGroup group = FeedGroupMap.get().getGroupsMapping().get(groupName);
if (group != null && !group.canContainFeed(feed)) {
throw new ValidationException(
"Feed " + feed.getName() + "'s frequency: " + feed.getFrequency().toString()
+ ", path pattern: " + FeedHelper.getLocation(feed, LocationType.DATA).getPath()
+ " does not match with group: " + group.getName() + "'s frequency: "
+ group.getFrequency()
+ ", date pattern: " + group.getDatePattern());
}
}
}
示例12: validateClusterExpDefined
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private void validateClusterExpDefined(Cluster cl) throws FalconException {
if (cl.getPartition() == null) {
return;
}
org.apache.falcon.entity.v0.cluster.Cluster cluster = EntityUtil.getEntity(EntityType.CLUSTER, cl.getName());
String part = FeedHelper.normalizePartitionExpression(cl.getPartition());
if (FeedHelper.evaluateClusterExp(cluster, part).equals(part)) {
throw new ValidationException(
"Alteast one of the partition tags has to be a cluster expression for cluster " + cl.getName());
}
}
示例13: canContainFeed
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
public boolean canContainFeed(org.apache.falcon.entity.v0.feed.Feed feed) {
if (this.frequency.equals(feed.getFrequency())
&& this.datePattern.equals(getDatePattern(FeedHelper.getLocation(feed, LocationType.DATA).getPath()))) {
return true;
}
return false;
}
示例14: testClusterPartitionExp
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
@Test
public void testClusterPartitionExp() throws FalconException {
Cluster cluster = ConfigurationStore.get().get(EntityType.CLUSTER,
"testCluster");
Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
"/*/${cluster.colo}"), "/*/" + cluster.getColo());
Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
"/*/${cluster.name}/Local"), "/*/" + cluster.getName() + "/Local");
Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
"/*/${cluster.field1}/Local"), "/*/value1/Local");
}
示例15: getRetentionWorkflowAction
import org.apache.falcon.entity.FeedHelper; //导入依赖的package包/类
private ACTION getRetentionWorkflowAction(Cluster cluster, Path wfPath, String wfName) throws FalconException {
Feed feed = getEntity();
ACTION retentionAction = new ACTION();
WORKFLOW retentionWorkflow = new WORKFLOW();
try {
//
WORKFLOWAPP retWfApp = createRetentionWorkflow(cluster);
retWfApp.setName(wfName);
marshal(cluster, retWfApp, wfPath);
retentionWorkflow.setAppPath(getStoragePath(wfPath.toString()));
Map<String, String> props = createCoordDefaultConfiguration(cluster, wfPath, wfName);
org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
String feedPathMask = getLocationURI(cluster, feed, LocationType.DATA);
String metaPathMask = getLocationURI(cluster, feed, LocationType.META);
String statsPathMask = getLocationURI(cluster, feed, LocationType.STATS);
String tmpPathMask = getLocationURI(cluster, feed, LocationType.TMP);
StringBuilder feedBasePaths = new StringBuilder(feedPathMask);
if (metaPathMask != null) {
feedBasePaths.append(FEED_PATH_SEP).append(metaPathMask);
}
if (statsPathMask != null) {
feedBasePaths.append(FEED_PATH_SEP).append(statsPathMask);
}
if (tmpPathMask != null) {
feedBasePaths.append(FEED_PATH_SEP).append(tmpPathMask);
}
props.put("feedDataPath", feedBasePaths.toString().replaceAll("\\$\\{", "\\?\\{"));
props.put("timeZone", feed.getTimezone().getID());
props.put("frequency", feed.getFrequency().getTimeUnit().name());
props.put("limit", feedCluster.getRetention().getLimit().toString());
props.put(ARG.operation.getPropName(), EntityOps.DELETE.name());
props.put(ARG.feedNames.getPropName(), feed.getName());
props.put(ARG.feedInstancePaths.getPropName(), "IGNORE");
retentionWorkflow.setConfiguration(getCoordConfig(props));
retentionAction.setWorkflow(retentionWorkflow);
return retentionAction;
} catch (Exception e) {
throw new FalconException("Unable to create parent/retention workflow", e);
}
}