本文整理汇总了Java中burlap.mdp.core.state.State类的典型用法代码示例。如果您正苦于以下问题:Java State类的具体用法?Java State怎么用?Java State使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
State类属于burlap.mdp.core.state包,在下文中一共展示了State类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: sample
import burlap.mdp.core.state.State; //导入依赖的package包/类
@Override
public DecisionState sample(State state, Action action) {
List<StateTransitionProb> reachableStates;
try {
reachableStates = stateTransitions(state, action);
} catch (NullPointerException e) {
reachableStates = Collections.singletonList(new StateTransitionProb(deadEnd, 1.0));
}
Collections.shuffle(reachableStates);
//sample random roll
double randomThreshold = Math.random(), sumOfProbability = 0;
for (StateTransitionProb reachableState : reachableStates) {
sumOfProbability = sumOfProbability + reachableState.p;
if (randomThreshold <= sumOfProbability) {
return ((DecisionState) reachableState.s).copy();
}
}
throw new IndexOutOfBoundsException("No state found!");
}
示例2: basicState
import burlap.mdp.core.state.State; //导入依赖的package包/类
/**
* Creates a grid world state with the agent in (0,0) and various different grid cell types scattered about.
* @return a grid world state with the agent in (0,0) and various different grid cell types scattered about.
*/
protected State basicState(){
GridWorldState s = new GridWorldState(
new GridAgent(0, 0),
new GridLocation(0, 0, 1, "loc0"),
new GridLocation(0, 4, 2, "loc1"),
new GridLocation(4, 4, 3, "loc2"),
new GridLocation(4, 0, 4, "loc3"),
new GridLocation(1, 0, 0, "loc4"),
new GridLocation(1, 2, 0, "loc5"),
new GridLocation(1, 4, 0, "loc6"),
new GridLocation(3, 1, 0, "loc7"),
new GridLocation(3, 3, 0, "loc8")
);
return s;
}
示例3: qValue
import burlap.mdp.core.state.State; //导入依赖的package包/类
@Override
public double qValue(State s, Action a) {
if(this.model.terminal(s)){
return 0.;
}
//what are the possible outcomes?
List<TransitionProb> tps = ((FullModel)this.model).transitions(s, a);
//aggregate over each possible outcome
double q = 0.;
for(TransitionProb tp : tps){
//what is reward for this transition?
double r = tp.eo.r;
//what is the value for the next state?
double vp = this.valueFunction.get(this.hashingFactory.hashState(tp.eo.op));
//add contribution weighted by transition probability and
//discounting the next state
q += tp.p * (r + this.gamma * vp);
}
return q;
}
示例4: planFromState
import burlap.mdp.core.state.State; //导入依赖的package包/类
@Override
public GreedyQPolicy planFromState(State initialState) {
HashableState hashedInitialState = this.hashingFactory.hashState(initialState);
if(this.valueFunction.containsKey(hashedInitialState)){
return new GreedyQPolicy(this); //already performed planning here!
}
//if the state is new, then find all reachable states from it first
this.performReachabilityFrom(initialState);
//now perform multiple iterations over the whole state space
for(int i = 0; i < this.numIterations; i++){
//iterate over each state
for(HashableState sh : this.valueFunction.keySet()){
//update its value using the bellman equation
this.valueFunction.put(sh, QProvider.Helper.maxQ(this, sh.s()));
}
}
return new GreedyQPolicy(this);
}
示例5: qValues
import burlap.mdp.core.state.State; //导入依赖的package包/类
@Override
public List<QValue> qValues(State s) {
//first get hashed state
HashableState sh = this.hashingFactory.hashState(s);
//check if we already have stored values
List<QValue> qs = this.qValues.get(sh);
//create and add initialized Q-values if we don't have them stored for this state
if(qs == null){
List<Action> actions = this.applicableActions(s);
qs = new ArrayList<QValue>(actions.size());
//create a Q-value for each action
for(Action a : actions){
//add q with initialized value
qs.add(new QValue(s, a, this.qinit.qValue(s, a)));
}
//store this for later
this.qValues.put(sh, qs);
}
return qs;
}
示例6: IPSS
import burlap.mdp.core.state.State; //导入依赖的package包/类
public static void IPSS(){
InvertedPendulum ip = new InvertedPendulum();
ip.physParams.actionNoise = 0.;
RewardFunction rf = new InvertedPendulum.InvertedPendulumRewardFunction(Math.PI/8.);
TerminalFunction tf = new InvertedPendulum.InvertedPendulumTerminalFunction(Math.PI/8.);
ip.setRf(rf);
ip.setTf(tf);
SADomain domain = ip.generateDomain();
State initialState = new InvertedPendulumState();
SparseSampling ss = new SparseSampling(domain, 1, new SimpleHashableStateFactory(), 10, 1);
ss.setForgetPreviousPlanResults(true);
ss.toggleDebugPrinting(false);
Policy p = new GreedyQPolicy(ss);
Episode e = PolicyUtils.rollout(p, initialState, domain.getModel(), 500);
System.out.println("Num steps: " + e.maxTimeStep());
Visualizer v = CartPoleVisualizer.getCartPoleVisualizer();
new EpisodeSequenceVisualizer(v, domain, Arrays.asList(e));
}
示例7: main
import burlap.mdp.core.state.State; //导入依赖的package包/类
public static void main(String[] args) {
GridWorldDomain gw = new GridWorldDomain(11,11); //11x11 grid world
gw.setMapToFourRooms(); //four rooms layout
gw.setProbSucceedTransitionDynamics(0.8); //stochastic transitions with 0.8 success rate
SADomain domain = gw.generateDomain(); //generate the grid world domain
//setup initial state
State s = new GridWorldState(new GridAgent(0, 0), new GridLocation(10, 10, "loc0"));
//create visualizer and explorer
Visualizer v = GridWorldVisualizer.getVisualizer(gw.getMap());
VisualExplorer exp = new VisualExplorer(domain, v, s);
//set control keys to use w-s-a-d
exp.addKeyAction("w", GridWorldDomain.ACTION_NORTH, "");
exp.addKeyAction("s", GridWorldDomain.ACTION_SOUTH, "");
exp.addKeyAction("a", GridWorldDomain.ACTION_WEST, "");
exp.addKeyAction("d", GridWorldDomain.ACTION_EAST, "");
exp.initGUI();
}
示例8: main
import burlap.mdp.core.state.State; //导入依赖的package包/类
public static void main(String [] args){
ExampleGridWorld gen = new ExampleGridWorld();
gen.setGoalLocation(10, 10);
SADomain domain = gen.generateDomain();
State initialState = new EXGridState(0, 0);
SimulatedEnvironment env = new SimulatedEnvironment(domain, initialState);
Visualizer v = gen.getVisualizer();
VisualExplorer exp = new VisualExplorer(domain, env, v);
exp.addKeyAction("w", ACTION_NORTH, "");
exp.addKeyAction("s", ACTION_SOUTH, "");
exp.addKeyAction("d", ACTION_EAST, "");
exp.addKeyAction("a", ACTION_WEST, "");
exp.initGUI();
}
示例9: validate
import burlap.mdp.core.state.State; //导入依赖的package包/类
public static void validate(State s) {
BCAgent a = (BCAgent)((OOState)s).object(HelperNameSpace.CLASS_AGENT);
if (a.x < 0) {
throw new IllegalStateException("Invalid agent x: " + a.x);
}
if (a.y < 0) {
throw new IllegalStateException("Invalid agent y: " + a.y);
}
if (a.z < 0) {
throw new IllegalStateException("Invalid agent z: " + a.z);
}
}
示例10: executeAction
import burlap.mdp.core.state.State; //导入依赖的package包/类
@Override
public EnvironmentOutcome executeAction(Action a) {
State startState = this.currentObservation();
ActionController ac = this.actionControllerMap.get(a.actionName());
int delay = ac.executeAction(a);
if (delay > 0) {
try {
Thread.sleep(delay);
} catch(InterruptedException e) {
e.printStackTrace();
}
}
State finalState = this.currentObservation();
this.lastReward = this.rewardFunction.reward(startState, a, finalState);
EnvironmentOutcome eo = new EnvironmentOutcome(startState, a, finalState, this.lastReward, this.isInTerminalState());
return eo;
}
示例11: isTerminal
import burlap.mdp.core.state.State; //导入依赖的package包/类
@Override
public boolean isTerminal(State s) {
OOState os = (OOState)s;
BCAgent a = (BCAgent)os.object(CLASS_AGENT);
HelperGeometry.Pose agentPose = HelperGeometry.Pose.fromXyz(a.x, a.y, a.z);
HelperGeometry.Pose goalPose = getGoalPose(s);
//are they at goal location or dead
double distance = goalPose.distance(agentPose);
//System.out.println("Distance: " + distance + " goal at: " + goalPose);
if (distance < 0.5) {
return true;
} else {
return false;
}
}
示例12: getGoalPose
import burlap.mdp.core.state.State; //导入依赖的package包/类
/**
* Find the gold block and return its pose.
* @param s the state
* @return the pose of the agent being one unit above the gold block.
*/
public static HelperGeometry.Pose getGoalPose(State s) {
OOState os = (OOState)s;
List<ObjectInstance> blocks = os.objectsOfClass(HelperNameSpace.CLASS_BLOCK);
for (ObjectInstance oblock : blocks) {
BCBlock block = (BCBlock)oblock;
if (block.type == 41) {
int goalX = block.x;
int goalY = block.y;
int goalZ = block.z;
return HelperGeometry.Pose.fromXyz(goalX, goalY + 1, goalZ);
}
}
return null;
}
示例13: allApplicableActions
import burlap.mdp.core.state.State; //导入依赖的package包/类
@Override
public List<Action> allApplicableActions(State s) {
BCAgent a = (BCAgent)((GenericOOState)s).object(CLASS_AGENT);
List<ObjectInstance> blocks = ((OOState)s).objectsOfClass(HelperNameSpace.CLASS_BLOCK);
for (ObjectInstance block : blocks) {
if (HelperActions.blockIsOneOf(Block.getBlockById(((BCBlock)block).type), HelperActions.dangerBlocks)) {
int dangerX = ((BCBlock)block).x;
int dangerY = ((BCBlock)block).y;
int dangerZ = ((BCBlock)block).z;
if ((a.x == dangerX) && (a.y - 1 == dangerY) && (a.z == dangerZ) || (a.x == dangerX) && (a.y == dangerY) && (a.z == dangerZ)) {
return new ArrayList<Action>();
}
}
}
//otherwise we pass check
return Arrays.<Action>asList(new SimpleAction(typeName));
}
示例14: processCommand
import burlap.mdp.core.state.State; //导入依赖的package包/类
@Override
public void processCommand(ICommandSender p_71515_1_, String[] p_71515_2_) {
MinecraftDomainGenerator mdg = new MinecraftDomainGenerator();
SADomain domain = mdg.generateDomain();
State in = MinecraftStateGeneratorHelper.getCurrentState(BurlapCraft.currentDungeon);
List<State> reachable = StateReachability.getReachableStates(in, domain, new SimpleHashableStateFactory());
for(State s : reachable){
OOState os = (OOState)s;
BCAgent a = (BCAgent)os.object(CLASS_AGENT);
System.out.println(a.x + ", " + a.y + ", " + a.z + ", " + a.rdir + ", "+ a.vdir + ", " + a.selected);
}
System.out.println(reachable.size());
}
示例15: features
import burlap.mdp.core.state.State; //导入依赖的package包/类
/**
* Returns a feature vector represented as a double array for a given input state.
*
* @param s the input state to turn into a feature vector.
* @return the feature vector represented as a double array.
*/
@Override
public double[] features(State s) {
double[] delegateFeatures = delegate.features(s);
double[] polinomial = new double[delegateFeatures.length*order+1];
//always have an intercept
polinomial[0] = intercept;
int i=1;
for(int original = 0; original<delegateFeatures.length; original++) {
for (int currentOrder = 1; currentOrder <= order; currentOrder++) {
polinomial[i++] = Math.pow(delegateFeatures[original],currentOrder);
}
}
assert i == polinomial.length;
return polinomial;
}