当前位置: 首页>>代码示例>>Java>>正文


Java SADomain.setModel方法代码示例

本文整理汇总了Java中burlap.mdp.singleagent.SADomain.setModel方法的典型用法代码示例。如果您正苦于以下问题:Java SADomain.setModel方法的具体用法?Java SADomain.setModel怎么用?Java SADomain.setModel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在burlap.mdp.singleagent.SADomain的用法示例。


在下文中一共展示了SADomain.setModel方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: generateDomain

import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {

    SADomain domain = new SADomain();


    domain.addActionTypes(
            new UniversalActionType(ACTION_NORTH),
            new UniversalActionType(ACTION_SOUTH),
            new UniversalActionType(ACTION_EAST),
            new UniversalActionType(ACTION_WEST));

    GridWorldStateModel smodel = new GridWorldStateModel();
    RewardFunction rf = new ExampleRF();
    TerminalFunction tf = new ExampleTF();

    domain.setModel(new FactoredModel(smodel, rf, tf));

    return domain;
}
 
开发者ID:honzaMaly,项目名称:kusanagi,代码行数:21,代码来源:ExampleGridWorld.java

示例2: generateDomain

import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
    SADomain domain = new SADomain();
    domain.addActionTypes(
            new UniversalActionType(NextActionEnumerations.YES.name()),
            new UniversalActionType(NextActionEnumerations.NO.name()));

    //unknown reward
    RewardFunction rf = (state, action, state1) -> defaultReward;

    //no terminal state
    TerminalFunction tf = state -> false;

    domain.setModel(new FactoredModel(model, rf, tf));

    return domain;
}
 
开发者ID:honzaMaly,项目名称:kusanagi,代码行数:18,代码来源:DecisionDomainGenerator.java

示例3: generateDomain

import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {

	SADomain domain = new SADomain();


	domain.addActionTypes(
			new UniversalActionType(ACTION_NORTH),
			new UniversalActionType(ACTION_SOUTH),
			new UniversalActionType(ACTION_EAST),
			new UniversalActionType(ACTION_WEST));

	GridWorldStateModel smodel = new GridWorldStateModel();
	RewardFunction rf = new ExampleRF(this.goalx, this.goaly);
	TerminalFunction tf = new ExampleTF(this.goalx, this.goaly);

	domain.setModel(new FactoredModel(smodel, rf, tf));

	return domain;
}
 
开发者ID:jmacglashan,项目名称:burlap_examples,代码行数:21,代码来源:ExampleGridWorld.java

示例4: generateDomain

import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
	
	SADomain domain = new SADomain();


	IPPhysicsParams cphys = this.physParams.copy();
	IPModel smodel = new IPModel(cphys);

	RewardFunction rf = this.rf;
	TerminalFunction tf = this.tf;

	if(rf == null){
		rf = new InvertedPendulumRewardFunction();
	}
	if(tf == null){
		tf = new InvertedPendulumTerminalFunction();
	}

	FactoredModel model = new FactoredModel(smodel, rf ,tf);
	domain.setModel(model);

	domain.addActionType(new UniversalActionType(ACTION_LEFT))
			.addActionType(new UniversalActionType(ACTION_RIGHT))
			.addActionType(new UniversalActionType(ACTION_NO_FORCE));

	
	return domain;
}
 
开发者ID:jmacglashan,项目名称:burlap,代码行数:30,代码来源:InvertedPendulum.java

示例5: generateDomain

import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
	
	SADomain domain = new SADomain();


	MCModel smodel = new MCModel(this.physParams.copy());
	if(tf == null){
		tf = new ClassicMCTF(physParams.xmax);
	}
	if(rf == null){
		rf = new GoalBasedRF(tf, 100, 0);
	}

	FactoredModel model = new FactoredModel(smodel, rf, tf);

	domain.setModel(model);

	domain.addActionType(new UniversalActionType(ACTION_FORWARD))
			.addActionType(new UniversalActionType(ACTION_BACKWARDS))
			.addActionType(new UniversalActionType(ACTION_COAST));

	
	
	return domain;
}
 
开发者ID:jmacglashan,项目名称:burlap,代码行数:27,代码来源:MountainCar.java

示例6: generateDomain

import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
	
	SADomain domain = new SADomain();

	Map<Integer, Map<Integer, Set<NodeTransitionProbability>>> ctd = this.copyTransitionDynamics();

	GraphStateModel stateModel = new GraphStateModel(ctd);
	FactoredModel model = new FactoredModel(stateModel, rf, tf);

	domain.setModel(model);

	for(int i = 0; i < this.maxActions; i++){
		domain.addActionType(new GraphActionType(i, ctd));
	}
	
	
	return domain;
}
 
开发者ID:jmacglashan,项目名称:burlap,代码行数:20,代码来源:GraphDefinedDomain.java

示例7: generateDomain

import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
	
	SADomain domain = new SADomain();


	for(ActionType mdpActionType : this.podomain.getActionTypes()){
		domain.addActionType(mdpActionType);
	}

	domain.setModel(new BeliefModel(podomain));
	
	return domain;
}
 
开发者ID:jmacglashan,项目名称:burlap,代码行数:15,代码来源:BeliefMDPGenerator.java

示例8: generateDomain

import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
	
	SADomain domain = new SADomain();

	CPPhysicsParams cphys = this.physParams.copy();

	RewardFunction rf = this.rf;
	TerminalFunction tf = this.tf;

	if(rf == null){
		rf = new CartPoleRewardFunction();
	}
	if(tf == null){
		tf = new CartPoleTerminalFunction();
	}

	FullStateModel smodel = cphys.useCorrectModel ? new CPClassicModel(cphys) : new CPClassicModel(cphys);

	FactoredModel model = new FactoredModel(smodel, rf, tf);
	domain.setModel(model);


	domain.addActionType(new UniversalActionType(ACTION_LEFT))
			.addActionType(new UniversalActionType(ACTION_RIGHT));


	return domain;
}
 
开发者ID:jmacglashan,项目名称:burlap,代码行数:30,代码来源:CartPoleDomain.java


注:本文中的burlap.mdp.singleagent.SADomain.setModel方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。