当前位置: 首页>>代码示例>>Java>>正文


Java Cluster类代码示例

本文整理汇总了Java中org.apache.falcon.entity.v0.process.Cluster的典型用法代码示例。如果您正苦于以下问题:Java Cluster类的具体用法?Java Cluster怎么用?Java Cluster使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Cluster类属于org.apache.falcon.entity.v0.process包,在下文中一共展示了Cluster类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createClusterEntity

import org.apache.falcon.entity.v0.process.Cluster; //导入依赖的package包/类
/**
 * Creates cluster entity
 *
 * @param cluster ClusterEntity
 * @return cluster instance reference
 */
public static Referenceable createClusterEntity(final org.apache.falcon.entity.v0.cluster.Cluster cluster) {
    LOG.info("Creating cluster Entity : {}", cluster.getName());

    Referenceable clusterRef = new Referenceable(FalconDataTypes.FALCON_CLUSTER.getName());

    clusterRef.set(AtlasClient.NAME, cluster.getName());
    clusterRef.set(AtlasClient.DESCRIPTION, cluster.getDescription());
    clusterRef.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, cluster.getName());

    clusterRef.set(FalconBridge.COLO, cluster.getColo());

    if (cluster.getACL() != null) {
        clusterRef.set(AtlasClient.OWNER, cluster.getACL().getGroup());
    }

    if (StringUtils.isNotEmpty(cluster.getTags())) {
        clusterRef.set(FalconBridge.TAGS,
                EventUtil.convertKeyValueStringToMap(cluster.getTags()));
    }

    return clusterRef;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:29,代码来源:FalconBridge.java

示例2: getInputEntities

import org.apache.falcon.entity.v0.process.Cluster; //导入依赖的package包/类
private static List<Referenceable> getInputEntities(org.apache.falcon.entity.v0.cluster.Cluster cluster,
                                                    Feed feed) throws Exception {
    org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());

    if(feedCluster != null) {
        final CatalogTable table = getTable(feedCluster, feed);
        if (table != null) {
            CatalogStorage storage = new CatalogStorage(cluster, table);
            return createHiveTableInstance(cluster.getName(), storage.getDatabase().toLowerCase(),
                    storage.getTable().toLowerCase());
        } else {
            List<Location> locations = FeedHelper.getLocations(feedCluster, feed);
            if (CollectionUtils.isNotEmpty(locations)) {
                Location dataLocation = FileSystemStorage.getLocation(locations, LocationType.DATA);
                if (dataLocation != null) {
                    final String pathUri = normalize(dataLocation.getPath());
                    LOG.info("Registering DFS Path {} ", pathUri);
                    return fillHDFSDataSet(pathUri, cluster.getName());
                }
            }
        }
    }

    return null;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:26,代码来源:FalconBridge.java

示例3: getEdgesFor

import org.apache.falcon.entity.v0.process.Cluster; //导入依赖的package包/类
private Map<Node, Set<Node>> getEdgesFor(Feed feed) {
    Map<Node, Set<Node>> nodeEdges = new HashMap<Node, Set<Node>>();
    Node feedNode = new Node(EntityType.FEED, feed.getName());
    Set<Node> feedEdges = new HashSet<Node>();
    nodeEdges.put(feedNode, feedEdges);

    for (org.apache.falcon.entity.v0.feed.Cluster cluster : feed.getClusters().getClusters()) {
        Node clusterNode = new Node(EntityType.CLUSTER, cluster.getName());
        if (!nodeEdges.containsKey(clusterNode)) {
            nodeEdges.put(clusterNode, new HashSet<Node>());
        }
        Set<Node> clusterEdges = nodeEdges.get(clusterNode);
        feedEdges.add(clusterNode);
        clusterEdges.add(feedNode);
    }
    return nodeEdges;
}
 
开发者ID:shaikidris,项目名称:incubator-falcon,代码行数:18,代码来源:EntityGraph.java

示例4: getCluster

import org.apache.falcon.entity.v0.process.Cluster; //导入依赖的package包/类
public static Cluster getCluster(Process process, String clusterName) {
    for (Cluster cluster : process.getClusters().getClusters()) {
        if (cluster.getName().equals(clusterName)) {
            return cluster;
        }
    }
    return null;
}
 
开发者ID:shaikidris,项目名称:incubator-falcon,代码行数:9,代码来源:ProcessHelper.java

示例5: testProcessView

import org.apache.falcon.entity.v0.process.Cluster; //导入依赖的package包/类
@Test
public void testProcessView() throws Exception {
    Process process = (Process) EntityType.PROCESS.getUnmarshaller().unmarshal(
            getClass().getResourceAsStream(PROCESS_XML));
    Cluster cluster = new Cluster();
    cluster.setName("newCluster");
    cluster.setValidity(process.getClusters().getClusters().get(0).getValidity());
    process.getClusters().getClusters().add(cluster);
    Assert.assertEquals(process.getClusters().getClusters().size(), 2);
    String currentCluster = process.getClusters().getClusters().get(0).getName();
    Process newProcess = EntityUtil.getClusterView(process, currentCluster);
    Assert.assertFalse(EntityUtil.equals(process, newProcess));
    Assert.assertEquals(newProcess.getClusters().getClusters().size(), 1);
    Assert.assertEquals(newProcess.getClusters().getClusters().get(0).getName(), currentCluster);
}
 
开发者ID:shaikidris,项目名称:incubator-falcon,代码行数:16,代码来源:EntityUtilTest.java

示例6: createFeedCreationEntity

import org.apache.falcon.entity.v0.process.Cluster; //导入依赖的package包/类
public static List<Referenceable> createFeedCreationEntity(Feed feed, ConfigurationStore falconStore) throws Exception {
    LOG.info("Creating feed : {}", feed.getName());

    List<Referenceable> entities = new ArrayList<>();

    if (feed.getClusters() != null) {
        List<Referenceable> replicationInputs = new ArrayList<>();
        List<Referenceable> replicationOutputs = new ArrayList<>();

        for (org.apache.falcon.entity.v0.feed.Cluster feedCluster : feed.getClusters().getClusters()) {
            org.apache.falcon.entity.v0.cluster.Cluster cluster = falconStore.get(EntityType.CLUSTER,
                    feedCluster.getName());

            // set cluster
            Referenceable clusterReferenceable = getClusterEntityReference(cluster.getName(), cluster.getColo());
            entities.add(clusterReferenceable);

            // input as hive_table or hdfs_path, output as falcon_feed dataset
            List<Referenceable> inputs = new ArrayList<>();
            List<Referenceable> inputReferenceables = getInputEntities(cluster, feed);
            if (inputReferenceables != null) {
                entities.addAll(inputReferenceables);
                inputs.add(inputReferenceables.get(inputReferenceables.size() - 1));
            }

            List<Referenceable> outputs = new ArrayList<>();
            Referenceable feedEntity = createFeedEntity(feed, clusterReferenceable);
            if (feedEntity != null) {
                entities.add(feedEntity);
                outputs.add(feedEntity);
            }

            if (!inputs.isEmpty() || !outputs.isEmpty()) {
                Referenceable feedCreateEntity = new Referenceable(FalconDataTypes.FALCON_FEED_CREATION.getName());
                String feedQualifiedName = getFeedQualifiedName(feed.getName(), cluster.getName());

                feedCreateEntity.set(AtlasClient.NAME, feed.getName());
                feedCreateEntity.set(AtlasClient.DESCRIPTION, "Feed creation - " + feed.getName());
                feedCreateEntity.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, feedQualifiedName);

                if (!inputs.isEmpty()) {
                    feedCreateEntity.set(AtlasClient.PROCESS_ATTRIBUTE_INPUTS, inputs);
                }
                if (!outputs.isEmpty()) {
                    feedCreateEntity.set(AtlasClient.PROCESS_ATTRIBUTE_OUTPUTS, outputs);
                }

                feedCreateEntity.set(FalconBridge.STOREDIN, clusterReferenceable);
                entities.add(feedCreateEntity);
            }

            if (ClusterType.SOURCE == feedCluster.getType()) {
                replicationInputs.add(feedEntity);
            } else if (ClusterType.TARGET == feedCluster.getType()) {
                replicationOutputs.add(feedEntity);
            }
        }

        if (!replicationInputs.isEmpty() && !replicationInputs.isEmpty()) {
            Referenceable feedReplicationEntity = new Referenceable(FalconDataTypes
                    .FALCON_FEED_REPLICATION.getName());

            feedReplicationEntity.set(AtlasClient.NAME, feed.getName());
            feedReplicationEntity.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, feed.getName());

            feedReplicationEntity.set(AtlasClient.PROCESS_ATTRIBUTE_INPUTS, replicationInputs);
            feedReplicationEntity.set(AtlasClient.PROCESS_ATTRIBUTE_OUTPUTS, replicationOutputs);
            entities.add(feedReplicationEntity);
        }

    }
    return entities;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:74,代码来源:FalconBridge.java

示例7: shouldUpdate

import org.apache.falcon.entity.v0.process.Cluster; //导入依赖的package包/类
public static boolean shouldUpdate(Feed oldFeed, Feed newFeed, Process affectedProcess) {
    if (!FeedHelper.getLocation(oldFeed.getLocations(), LocationType.DATA)
        .getPath().equals(FeedHelper.getLocation(newFeed.getLocations(), LocationType.DATA).getPath())
            || !FeedHelper.getLocation(oldFeed.getLocations(), LocationType.META)
                .getPath().equals(FeedHelper.getLocation(newFeed.getLocations(), LocationType.META).getPath())
            || !FeedHelper.getLocation(oldFeed.getLocations(), LocationType.STATS)
                .getPath().equals(FeedHelper.getLocation(newFeed.getLocations(), LocationType.STATS).getPath())
            || !FeedHelper.getLocation(oldFeed.getLocations(), LocationType.TMP)
                .getPath().equals(FeedHelper.getLocation(newFeed.getLocations(), LocationType.TMP).getPath())) {
        return true;
    }
    LOG.debug(oldFeed.toShortString() + ": Location identical. Ignoring...");

    if (!oldFeed.getFrequency().equals(newFeed.getFrequency())) {
        return true;
    }
    LOG.debug(oldFeed.toShortString() + ": Frequency identical. Ignoring...");

    // it is not possible to have oldFeed partitions as non empty and
    // new being empty. validator should have gated this.
    // Also if new partitions are added and old is empty, then there is
    // nothing
    // to update in process
    boolean partitionApplicable = false;
    Inputs affectedInputs = affectedProcess.getInputs();
    if (affectedInputs != null && affectedInputs.getInputs() != null) {
        for (Input input : affectedInputs.getInputs()) {
            if (input.getFeed().equals(oldFeed.getName())) {
                if (input.getPartition() != null && !input.getPartition().isEmpty()) {
                    partitionApplicable = true;
                }
            }
        }
        if (partitionApplicable) {
            LOG.debug("Partitions are applicable. Checking ...");
            if (newFeed.getPartitions() != null && oldFeed.getPartitions() != null) {
                List<String> newParts = getPartitions(newFeed.getPartitions());
                List<String> oldParts = getPartitions(oldFeed.getPartitions());
                if (newParts.size() != oldParts.size()) {
                    return true;
                }
                if (!newParts.containsAll(oldParts)) {
                    return true;
                }
            }
            LOG.debug(oldFeed.toShortString() + ": Partitions identical. Ignoring...");
        }
    }

    for (Cluster cluster : affectedProcess.getClusters().getClusters()) {
        if (!FeedHelper
                .getCluster(oldFeed, cluster.getName()).getValidity().getStart()
                .equals(FeedHelper.getCluster(newFeed, cluster.getName()).getValidity().getStart())
                || !FeedHelper.getLocation(oldFeed, LocationType.DATA, cluster.getName()).getPath()
                .equals(FeedHelper.getLocation(newFeed, LocationType.DATA, cluster.getName()).getPath())
                || !FeedHelper.getLocation(oldFeed, LocationType.META, cluster.getName()).getPath()
                .equals(FeedHelper.getLocation(newFeed, LocationType.META, cluster.getName()).getPath())
                || !FeedHelper.getLocation(oldFeed, LocationType.STATS, cluster.getName()).getPath()
                .equals(FeedHelper.getLocation(newFeed, LocationType.STATS, cluster.getName()).getPath())
                || !FeedHelper.getLocation(oldFeed, LocationType.TMP, cluster.getName()).getPath()
                .equals(FeedHelper.getLocation(newFeed, LocationType.TMP, cluster.getName()).getPath())) {
            return true;
        }
        LOG.debug(oldFeed.toShortString() + ": Feed on cluster" + cluster.getName() + " identical. Ignoring...");
    }

    return false;
}
 
开发者ID:shaikidris,项目名称:incubator-falcon,代码行数:69,代码来源:UpdateHelper.java

示例8: testParse

import org.apache.falcon.entity.v0.process.Cluster; //导入依赖的package包/类
@Test
public void testParse() throws FalconException, JAXBException {

    Process process = parser.parseAndValidate(getClass().getResourceAsStream(PROCESS_XML));

    Assert.assertNotNull(process);
    Assert.assertEquals(process.getName(), "sample");

    Assert.assertEquals(process.getParallel(), 1);
    Assert.assertEquals(process.getOrder().name(), "LIFO");
    Assert.assertEquals(process.getFrequency().toString(), "hours(1)");
    Assert.assertEquals(process.getEntityType(), EntityType.PROCESS);

    Assert.assertEquals(process.getInputs().getInputs().get(0).getName(), "impression");
    Assert.assertEquals(process.getInputs().getInputs().get(0).getFeed(), "impressionFeed");
    Assert.assertEquals(process.getInputs().getInputs().get(0).getStart(), "today(0,0)");
    Assert.assertEquals(process.getInputs().getInputs().get(0).getEnd(), "today(2,0)");
    Assert.assertEquals(process.getInputs().getInputs().get(0).getPartition(), "*/US");
    Assert.assertEquals(process.getInputs().getInputs().get(0).isOptional(), false);

    Assert.assertEquals(process.getOutputs().getOutputs().get(0).getName(), "impOutput");
    Assert.assertEquals(process.getOutputs().getOutputs().get(0).getFeed(), "imp-click-join1");
    Assert.assertEquals(process.getOutputs().getOutputs().get(0).getInstance(), "today(0,0)");

    Assert.assertEquals(process.getProperties().getProperties().get(0).getName(), "name1");
    Assert.assertEquals(process.getProperties().getProperties().get(0).getValue(), "value1");

    Cluster processCluster = process.getClusters().getClusters().get(0);
    Assert.assertEquals(SchemaHelper.formatDateUTC(processCluster.getValidity().getStart()), "2011-11-02T00:00Z");
    Assert.assertEquals(SchemaHelper.formatDateUTC(processCluster.getValidity().getEnd()), "2011-12-30T00:00Z");
    Assert.assertEquals(process.getTimezone().getID(), "UTC");

    Assert.assertEquals(process.getWorkflow().getEngine().name().toLowerCase(), "oozie");
    Assert.assertEquals(process.getWorkflow().getPath(), "/path/to/workflow");

    StringWriter stringWriter = new StringWriter();
    Marshaller marshaller = EntityType.PROCESS.getMarshaller();
    marshaller.marshal(process, stringWriter);
    System.out.println(stringWriter.toString());

    // TODO for retry and late policy
}
 
开发者ID:shaikidris,项目名称:incubator-falcon,代码行数:43,代码来源:ProcessEntityParserTest.java


注:本文中的org.apache.falcon.entity.v0.process.Cluster类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。