本文整理汇总了Java中burlap.mdp.singleagent.SADomain.setModel方法的典型用法代码示例。如果您正苦于以下问题:Java SADomain.setModel方法的具体用法?Java SADomain.setModel怎么用?Java SADomain.setModel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类burlap.mdp.singleagent.SADomain
的用法示例。
在下文中一共展示了SADomain.setModel方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generateDomain
import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
SADomain domain = new SADomain();
domain.addActionTypes(
new UniversalActionType(ACTION_NORTH),
new UniversalActionType(ACTION_SOUTH),
new UniversalActionType(ACTION_EAST),
new UniversalActionType(ACTION_WEST));
GridWorldStateModel smodel = new GridWorldStateModel();
RewardFunction rf = new ExampleRF();
TerminalFunction tf = new ExampleTF();
domain.setModel(new FactoredModel(smodel, rf, tf));
return domain;
}
示例2: generateDomain
import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
SADomain domain = new SADomain();
domain.addActionTypes(
new UniversalActionType(NextActionEnumerations.YES.name()),
new UniversalActionType(NextActionEnumerations.NO.name()));
//unknown reward
RewardFunction rf = (state, action, state1) -> defaultReward;
//no terminal state
TerminalFunction tf = state -> false;
domain.setModel(new FactoredModel(model, rf, tf));
return domain;
}
示例3: generateDomain
import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
SADomain domain = new SADomain();
domain.addActionTypes(
new UniversalActionType(ACTION_NORTH),
new UniversalActionType(ACTION_SOUTH),
new UniversalActionType(ACTION_EAST),
new UniversalActionType(ACTION_WEST));
GridWorldStateModel smodel = new GridWorldStateModel();
RewardFunction rf = new ExampleRF(this.goalx, this.goaly);
TerminalFunction tf = new ExampleTF(this.goalx, this.goaly);
domain.setModel(new FactoredModel(smodel, rf, tf));
return domain;
}
示例4: generateDomain
import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
SADomain domain = new SADomain();
IPPhysicsParams cphys = this.physParams.copy();
IPModel smodel = new IPModel(cphys);
RewardFunction rf = this.rf;
TerminalFunction tf = this.tf;
if(rf == null){
rf = new InvertedPendulumRewardFunction();
}
if(tf == null){
tf = new InvertedPendulumTerminalFunction();
}
FactoredModel model = new FactoredModel(smodel, rf ,tf);
domain.setModel(model);
domain.addActionType(new UniversalActionType(ACTION_LEFT))
.addActionType(new UniversalActionType(ACTION_RIGHT))
.addActionType(new UniversalActionType(ACTION_NO_FORCE));
return domain;
}
示例5: generateDomain
import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
SADomain domain = new SADomain();
MCModel smodel = new MCModel(this.physParams.copy());
if(tf == null){
tf = new ClassicMCTF(physParams.xmax);
}
if(rf == null){
rf = new GoalBasedRF(tf, 100, 0);
}
FactoredModel model = new FactoredModel(smodel, rf, tf);
domain.setModel(model);
domain.addActionType(new UniversalActionType(ACTION_FORWARD))
.addActionType(new UniversalActionType(ACTION_BACKWARDS))
.addActionType(new UniversalActionType(ACTION_COAST));
return domain;
}
示例6: generateDomain
import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
SADomain domain = new SADomain();
Map<Integer, Map<Integer, Set<NodeTransitionProbability>>> ctd = this.copyTransitionDynamics();
GraphStateModel stateModel = new GraphStateModel(ctd);
FactoredModel model = new FactoredModel(stateModel, rf, tf);
domain.setModel(model);
for(int i = 0; i < this.maxActions; i++){
domain.addActionType(new GraphActionType(i, ctd));
}
return domain;
}
示例7: generateDomain
import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
SADomain domain = new SADomain();
for(ActionType mdpActionType : this.podomain.getActionTypes()){
domain.addActionType(mdpActionType);
}
domain.setModel(new BeliefModel(podomain));
return domain;
}
示例8: generateDomain
import burlap.mdp.singleagent.SADomain; //导入方法依赖的package包/类
@Override
public SADomain generateDomain() {
SADomain domain = new SADomain();
CPPhysicsParams cphys = this.physParams.copy();
RewardFunction rf = this.rf;
TerminalFunction tf = this.tf;
if(rf == null){
rf = new CartPoleRewardFunction();
}
if(tf == null){
tf = new CartPoleTerminalFunction();
}
FullStateModel smodel = cphys.useCorrectModel ? new CPClassicModel(cphys) : new CPClassicModel(cphys);
FactoredModel model = new FactoredModel(smodel, rf, tf);
domain.setModel(model);
domain.addActionType(new UniversalActionType(ACTION_LEFT))
.addActionType(new UniversalActionType(ACTION_RIGHT));
return domain;
}