当前位置: 首页>>代码示例>>Java>>正文


Java QueryFunction类代码示例

本文整理汇总了Java中storm.trident.state.QueryFunction的典型用法代码示例。如果您正苦于以下问题:Java QueryFunction类的具体用法?Java QueryFunction怎么用?Java QueryFunction使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


QueryFunction类属于storm.trident.state包,在下文中一共展示了QueryFunction类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    if (args.length < 3) {
        Logger.getAnonymousLogger().log(Level.SEVERE, "where are the commandline args? -- use args -- folder numWorkers windowSize");
        System.exit(-1);
    }

    final FieldTemplate template = new MlStormFieldTemplate();
    final int numWorkers = Integer.valueOf(args[1]);
    final int windowSize = Integer.valueOf(args[2]);
    final StateUpdater stateUpdater = new CobwebClusterUpdater(template);
    final StateFactory stateFactory = new MlStormClustererFactory.CobwebClustererFactory(numWorkers, windowSize);
    final QueryFunction<CobwebClustererState, String> queryFunction = new MlStormClustererQuery.CobwebClustererQuery();
    final MlStormSpout features = new MddbFeatureExtractorSpout(args[0], template);
    final StormTopology stormTopology = WekaBaseLearningTopology.buildTopology(features, template, numWorkers, stateUpdater, stateFactory, queryFunction, null, TOPOLOGY_DRPC_NAME, null);

    if (numWorkers == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(TOPOLOGY_DRPC_NAME, MlStormConfig.getDefaultMlStormConfig(numWorkers), stormTopology);
    } else {
        StormSubmitter.submitTopology(TOPOLOGY_DRPC_NAME, MlStormConfig.getDefaultMlStormConfig(numWorkers), stormTopology);
    }
}
 
开发者ID:LakkiB,项目名称:mlstorm,代码行数:23,代码来源:CobwebClusteringTopology.java

示例2: main

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    if (args.length < 5) {
        System.err.println(" Where are all the arguments? -- use args -- folder numWorkers windowSize k parallelism");
        return;
    }

    final FieldTemplate template = new MlStormFieldTemplate();
    final int numWorkers = Integer.valueOf(args[1]);
    final int windowSize = Integer.valueOf(args[2]);
    final int k = Integer.valueOf(args[3]);
    final int parallelism = Integer.valueOf(args[4]);
    final StateUpdater stateUpdater = new KmeansClusterUpdater(template);
    final StateFactory stateFactory = new MlStormClustererFactory.KmeansClustererFactory(k, windowSize, template);
    final QueryFunction<KmeansClustererState, String> queryFunction = new MlStormClustererQuery.KmeansClustererQuery();
    final QueryFunction<KmeansClustererState, String> parameterUpdateFunction = new MlStormClustererQuery.KmeansNumClustersUpdateQuery();
    final MlStormSpout features = new MddbFeatureExtractorSpout(args[0], template);
    final StormTopology stormTopology = buildTopology(features, template, parallelism, stateUpdater, stateFactory, queryFunction, parameterUpdateFunction, "kmeans", "kUpdate");

    if (numWorkers == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("kmeans", MlStormConfig.getDefaultMlStormConfig(numWorkers), stormTopology);
    } else {
        StormSubmitter.submitTopology("kmeans", MlStormConfig.getDefaultMlStormConfig(numWorkers), stormTopology);
    }
}
 
开发者ID:LakkiB,项目名称:mlstorm,代码行数:26,代码来源:KmeansClusteringTopology.java

示例3: main

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    if (args.length < 4) {
        Logger.getAnonymousLogger().log(Level.SEVERE, " Where are all the arguments? -- use args -- file numWorkers windowSize parallelism");
        System.exit(-1);
    }

    final FieldTemplate template = new MlStormFieldTemplate();
    final int numWorkers = Integer.valueOf(args[1]);
    final int windowSize = Integer.valueOf(args[2]);
    final int parallelism = Integer.valueOf(args[3]);
    final StateUpdater stateUpdater = new BinaryClassifierStateUpdater(template);
    final StateFactory stateFactory = new BinaryClassifierFactory(WekaClassificationAlgorithms.svm.name(), windowSize, template, null /* weka.core.Utils.splitOptions("-C 1.0 -L 0.0010 -P 1.0E-12 -N 0 -V -1 -W 1 -K \"weka.classifiers.functions.supportVector.PolyKernel -C 250007 -E 1.0\"")*/);
    final QueryFunction<MlStormWekaState, Integer> queryFunction = new BinaryClassifierQuery.SvmQuery();
    final QueryFunction<KmeansClustererState, String> parameterUpdateFunction = null;
    final MlStormSpout features = new AustralianElectricityPricingSpout(args[0], template);
    final StormTopology stormTopology = WekaBaseLearningTopology.buildTopology(features, template, parallelism, stateUpdater, stateFactory, queryFunction, parameterUpdateFunction, "svm", "svmUpdate");

    if (numWorkers == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("SVM", MlStormConfig.getDefaultMlStormConfig(numWorkers), stormTopology);
    } else {
        StormSubmitter.submitTopology("SVM", MlStormConfig.getDefaultMlStormConfig(numWorkers), stormTopology);
    }
}
 
开发者ID:LakkiB,项目名称:mlstorm,代码行数:25,代码来源:SvmTopology.java

示例4: stateQuery

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) {
    projectionValidation(inputFields);
    String stateId = state._node.stateInfo.id;
    Node n = new ProcessorNode(_topology.getUniqueStreamId(),
                    _name,
                    TridentUtils.fieldsConcat(getOutputFields(), functionFields),
                    functionFields,
                    new StateQueryProcessor(stateId, inputFields, function));
    _topology._colocate.get(stateId).add(n);
    return _topology.addSourcedNode(this, n);
}
 
开发者ID:zhangjunfang,项目名称:jstorm-0.9.6.3-,代码行数:12,代码来源:Stream.java

示例5: stateQuery

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) {
    return _stream.partitionBy(_groupFields)
                  .stateQuery(state,
                     inputFields,
                     function,
                     functionFields);
}
 
开发者ID:zhangjunfang,项目名称:jstorm-0.9.6.3-,代码行数:8,代码来源:GroupedStream.java

示例6: stateQuery

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) {
    projectionValidation(inputFields);
    String stateId = state._node.stateInfo.id;
    Node n =
            new ProcessorNode(_topology.getUniqueStreamId(), _name, TridentUtils.fieldsConcat(getOutputFields(), functionFields), functionFields,
                    new StateQueryProcessor(stateId, inputFields, function));
    _topology._colocate.get(stateId).add(n);
    return _topology.addSourcedNode(this, n);
}
 
开发者ID:kkllwww007,项目名称:jstrom,代码行数:10,代码来源:Stream.java

示例7: assertArguments

import storm.trident.state.QueryFunction; //导入依赖的package包/类
private static void assertArguments(IRichSpout spout, int parallelism, List<StateUpdater> stateUpdaters, List<StateFactory> stateFactories, List<QueryFunction> queryFunctions, List<String> drpcQueryFunctionNames, ReducerAggregator drpcPartitionResultAggregator, StateFactory metaStateFactory, StateUpdater metaStateUpdater, QueryFunction metaQueryFunction) {
    assert spout != null;
    assert parallelism != 0;
    assert stateFactories != null;
    assert queryFunctions != null;
    assert drpcPartitionResultAggregator != null;
    assert stateUpdaters != null;
    assert drpcQueryFunctionNames != null;
    assert metaQueryFunction != null;
    assert metaStateFactory != null;
    assert metaStateUpdater != null;
}
 
开发者ID:LakkiB,项目名称:mlstorm,代码行数:13,代码来源:EnsembleLearnerTopologyBuilder.java

示例8: StateQueryProcessor

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public StateQueryProcessor(String stateId, Fields inputFields, QueryFunction function) {
    _stateId = stateId;
    _function = function;
    _inputFields = inputFields;
}
 
开发者ID:zhangjunfang,项目名称:jstorm-0.9.6.3-,代码行数:6,代码来源:StateQueryProcessor.java

示例9: stateQuery

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) {
    return _stream.partitionBy(_groupFields).stateQuery(state, inputFields, function, functionFields);
}
 
开发者ID:kkllwww007,项目名称:jstrom,代码行数:4,代码来源:GroupedStream.java

示例10: buildTopology

import storm.trident.state.QueryFunction; //导入依赖的package包/类
/**
 * @param spout                   An MlStorm spout
 * @param fieldTemplate           A FieldTemplate
 * @param parallelism
 * @param stateUpdater
 * @param stateFactory
 * @param queryFunction
 * @param parameterUpdateFunction
 * @param drpcFunctionName
 * @param drpcUpdateFunction
 * @return
 */
protected static StormTopology buildTopology(final MlStormSpout spout,
                                             final FieldTemplate fieldTemplate,
                                             final int parallelism,
                                             final StateUpdater stateUpdater,
                                             final StateFactory stateFactory,
                                             final QueryFunction queryFunction,
                                             final QueryFunction parameterUpdateFunction,
                                             final String drpcFunctionName,
                                             final String drpcUpdateFunction) {
    final TridentTopology topology = new TridentTopology();
    final Stream featuresStream = topology.newStream("featureVectors", spout);

    /**
     * Stream the feature vectors using the given spout.
     * Use the feature vectors to update a weka classifier/clusterer
     */
    TridentState state =
            featuresStream
                    .broadcast()
                    .parallelismHint(parallelism)
                    .partitionPersist(stateFactory, new Fields(fieldTemplate.getKeyField(), fieldTemplate.getFeatureVectorField()), stateUpdater)
                    .parallelismHint(parallelism);

    // This queries the partition for partitionId and cluster distribution.
    topology.newDRPCStream(drpcFunctionName)
            .broadcast()
            .stateQuery(state, new Fields(FieldTemplate.FieldConstants.ARGS), queryFunction, new Fields(FieldTemplate.FieldConstants.PARTITION, FieldTemplate.FieldConstants.RESULT))
            .toStream()
            .project(new Fields(FieldTemplate.FieldConstants.RESULT))
    ;

    /**
     * The human feedback controller can look at the cluster distributions and later update the parameters (k for kmeans)
     * as a Drpc query. (<partitionId,newK>) ex: args="1, 20". The partitionId is the value returned by the previous query.

     * Notice that this function is generic and one could inject *any* parameter updater functions
     */

    if (parameterUpdateFunction != null) {
        topology.newDRPCStream(drpcUpdateFunction)
                .broadcast()
                .stateQuery(state, new Fields(FieldTemplate.FieldConstants.ARGS), parameterUpdateFunction, new Fields(FieldTemplate.FieldConstants.RESULT))
                .each(new Fields(FieldTemplate.FieldConstants.RESULT), new Printer())
        ;
    }
    return topology.build();
}
 
开发者ID:LakkiB,项目名称:mlstorm,代码行数:60,代码来源:WekaBaseLearningTopology.java

示例11: main

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    if (args.length < 5) {
        System.err.println(" Where are all the arguments? -- use args -- folder numWorkers windowSize k parallelism");
        return;
    }

    /* The fields our spout is going to emit. These field names are used by the State updaters, so edit with caution */
    final String drpcFunctionName = "ClassifierEnsemble";
    final FieldTemplate template = new MlStormFieldTemplate();

    final int numWorkers = Integer.valueOf(args[1]);
    final int windowSize = Integer.valueOf(args[2]);
    final int parallelism = Integer.valueOf(args[3]);

    final StateUpdater stateUpdater = new BinaryClassifierStateUpdater.BinaryMetaClassifierStateUpdater(template);
    /* All the weak learners and meta learner are batch (window) learners */
    final StateFactory metaFactory = new BinaryClassifierFactory(WekaClassificationAlgorithms.svm.name(), windowSize, template, null /* additional options to this weka algorithm */);
    final QueryFunction metaQueryFunction = new BinaryClassifierQuery.MetaQuery();
    final ReducerAggregator drpcPartitionResultAggregator = new EnsembleLabelDistributionPairAggregator();
    final Aggregator metaFeatureVectorBuilder = new MetaFeatureVectorBuilder();
    final QueryFunction<MlStormWekaState, Map.Entry<Integer, double[]>> queryFunction = new BinaryClassifierQuery();

    final List<StateUpdater> stateUpdaters = new ArrayList<StateUpdater>();
    final List<StateFactory> factories = new ArrayList<StateFactory>();
    final List<QueryFunction> queryFunctions = new ArrayList<QueryFunction>();
    final List<String> queryFunctionNames = new ArrayList<String>();

    for (WekaClassificationAlgorithms alg : WekaClassificationAlgorithms.values()) {
        factories.add(new BinaryClassifierFactory(alg.name(), windowSize, template, null));
        stateUpdaters.add(stateUpdater);
        queryFunctions.add(queryFunction);
        queryFunctionNames.add(drpcFunctionName);
    }

    final MlStormSpout features = new AustralianElectricityPricingSpout(args[0], template);

    /*
    *  This is where we actually build our concrete topology
    *  Take a look at the utils.Base class for detailed description of the arguments and the topology construction details
    */

    final StormTopology stormTopology = buildTopology(features, parallelism, template, stateUpdaters, factories,
            queryFunctions, queryFunctionNames, drpcPartitionResultAggregator, metaFactory, stateUpdater, metaQueryFunction, metaFeatureVectorBuilder);

    if (numWorkers == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(drpcFunctionName, MlStormConfig.getEnsembleMlStormConfig(numWorkers), stormTopology);
    } else {
        StormSubmitter.submitTopology(drpcFunctionName, MlStormConfig.getEnsembleMlStormConfig(numWorkers), stormTopology);
    }
}
 
开发者ID:LakkiB,项目名称:mlstorm,代码行数:52,代码来源:EnsembleClassifierTopology.java

示例12: main

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    if (args.length < 5) {
        System.err.println(" Where are all the arguments? -- use args -- trainingFile numWorkers windowSize k parallelism");
        return;
    }

    final int numWorkers = Integer.valueOf(args[1]);
    final int windowSize = Integer.valueOf(args[2]);
    final int parallelism = Integer.valueOf(args[3]);


    final String drpcFunctionName = "OnlineClassifierEnsemble";
    final FieldTemplate template = new MlStormFieldTemplate();
    final StateUpdater stateUpdater = new BinaryClassifierStateUpdater.BinaryMetaClassifierStateUpdater(template);
    /* All the weak learners and meta learner are Updateable/online learners */
    final StateFactory metaFactory = new BinaryClassifierFactory.OnlineBinaryClassifierFactory(WekaOnlineClassificationAlgorithms.onlineDecisionTree.name(), windowSize, null /* additional options to this weka algorithm */);
    final QueryFunction metaQueryFunction = new BinaryClassifierQuery.MetaQuery();
    final Aggregator metaFeatureVectorBuilder = new MetaFeatureVectorBuilder();
    final ReducerAggregator drpcPartitionResultAggregator = new EnsembleLabelDistributionPairAggregator();
    final QueryFunction<MlStormWekaState, Map.Entry<Integer, double[]>> queryFunction = new BinaryClassifierQuery();

    final List<StateUpdater> stateUpdaters = new ArrayList<StateUpdater>();
    final List<StateFactory> stateFactories = new ArrayList<StateFactory>();
    final List<QueryFunction> queryFunctions = new ArrayList<QueryFunction>();
    final List<String> queryFunctionNames = new ArrayList<String>();

    // Build an ensemble of all the classifiers available in WekaOnlineClassificationAlgorithms
    for (WekaOnlineClassificationAlgorithms alg : WekaOnlineClassificationAlgorithms.values()) {
        stateFactories.add(new BinaryClassifierFactory.OnlineBinaryClassifierFactory(alg.name(), windowSize, null));
        stateUpdaters.add(stateUpdater);
        queryFunctions.add(queryFunction);
        queryFunctionNames.add(drpcFunctionName);
    }

    final MlStormSpout features = new AustralianElectricityPricingSpout(args[0], template);
    /*
    *  This is where we actually build our concrete topology
    *  Take a look at the utils.Base class for detailed description of the arguments and the topology construction details
    */
    final StormTopology stormTopology = EnsembleLearnerTopologyBuilder.buildTopology(features, parallelism, template, stateUpdaters, stateFactories, queryFunctions, queryFunctionNames, drpcPartitionResultAggregator, metaFactory, stateUpdater, metaQueryFunction, metaFeatureVectorBuilder);

    if (numWorkers == 1) {
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(drpcFunctionName, MlStormConfig.getDefaultMlStormConfig(numWorkers), stormTopology);
    } else {
        StormSubmitter.submitTopology(drpcFunctionName, MlStormConfig.getDefaultMlStormConfig(numWorkers), stormTopology);
    }
}
 
开发者ID:LakkiB,项目名称:mlstorm,代码行数:49,代码来源:OnlineEnsembleClassifierTopology.java

示例13: main

import storm.trident.state.QueryFunction; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    if (args.length < 5) {
        Logger.getAnonymousLogger().log(Level.SEVERE, " Where are all the arguments? -- use args -- bpti_folder numWorkers windowSize k parallelism");
        System.exit(-1);
    }

    final FieldTemplate template = new MlStormFieldTemplate();

    final int numWorkers = Integer.valueOf(args[1]);
    final int windowSize = Integer.valueOf(args[2]);
    final int k = Integer.valueOf(args[3]);
    final int parallelism = Integer.valueOf(args[4]);

    final StateUpdater stateUpdater = new ClustererUpdater(template);
    final QueryFunction<ClustererState, Map.Entry<Integer, double[]>> queryFunction = new MlStormClustererQuery.ClustererQuery();

    final List<StateUpdater> stateUpdaters = new ArrayList<StateUpdater>();
    final List<StateFactory> factories = new ArrayList<StateFactory>();
    final List<QueryFunction> queryFunctions = new ArrayList<QueryFunction>();
    final List<String> queryFunctionNames = new ArrayList<String>();

    final ReducerAggregator drpcPartitionResultAggregator = new EnsembleLabelDistributionPairAggregator();
    final StateUpdater metaStateUpdater = new ClustererUpdater(template);
    final StateFactory metaStateFactory = new MlStormClustererFactory.ClustererFactory(k, windowSize, WekaClusterers.densityBased.name(), false, template, null /* additional options to this weka algorithm */);
    final QueryFunction metaQueryFunction = new MlStormClustererQuery.MetaClustererQuery();
    final Aggregator metaFeatureVectorBuilder = new MetaFeatureVectorBuilder();

    for (WekaClusterers alg : WekaClusterers.values()) {
        factories.add(new MlStormClustererFactory.ClustererFactory(k, windowSize, alg.name(), true, template, null));
        stateUpdaters.add(stateUpdater);
        queryFunctions.add(queryFunction);
        queryFunctionNames.add(FieldTemplate.FieldConstants.CONSENSUS.CONSENSUS_DRPC);
    }

    final MlStormSpout spout = new MddbFeatureExtractorSpout(args[0], template);
    /*
    *  This is where we actually build our concrete topology
    *  Take a look at the utils.Base class for detailed description of the arguments and the topology construction details
    */
    final StormTopology stormTopology = buildTopology(spout, parallelism, template, stateUpdaters, factories,
            queryFunctions, queryFunctionNames, drpcPartitionResultAggregator, metaStateFactory, metaStateUpdater, metaQueryFunction, metaFeatureVectorBuilder);

    if (numWorkers == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(FieldTemplate.FieldConstants.CONSENSUS.CONSENSUS_DRPC, MlStormConfig.getMddbStormConfig(numWorkers), stormTopology);
    } else {
        StormSubmitter.submitTopology(FieldTemplate.FieldConstants.CONSENSUS.CONSENSUS_DRPC, MlStormConfig.getMddbStormConfig(numWorkers), stormTopology);
    }
}
 
开发者ID:LakkiB,项目名称:mlstorm,代码行数:50,代码来源:EnsembleClustererTopology.java


注:本文中的storm.trident.state.QueryFunction类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。