本文整理汇总了C#中State.getCells方法的典型用法代码示例。如果您正苦于以下问题:C# State.getCells方法的具体用法?C# State.getCells怎么用?C# State.getCells使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类State
的用法示例。
在下文中一共展示了State.getCells方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: createMDP
public static MDP createMDP(State<Double> state)
{
return new MDP(state.getCells(),
state.getCellAt(1, 1), new ActionsFunction(state),
new TransitionProbabilityFunction(state),
new RewardFunction());
}
示例2: MDPPolicyIteration
public MDPPolicyIteration()
{
S = new State<Double>(3, 3, 0);
Double[] val = { 1, 0, -1, 2, -1, -2, 3, -2, -3 };
S.setContent(val);
mdp = new MDP(S.getCells(), S.getCellAt(1, 1), new ActionsFunction(S),
new TransitionProbabilityFunction(S),
new RewardFunction());
double epsilon = 0.00001;
PolicyEvaluation pev = new PolicyEvaluation(1000, epsilon);
pi = new PolicyIteration(pev);
policy = pi.policyIteration(mdp);
}
示例3: values
public static void values()
{
State<Double> S = new State<Double>(3, 3, -1.0);
double r = -100;
double epsilon = 0.00001;
S.getCellAt(1, 3).setContent(r);
S.getCellAt(3, 3).setContent(10.0);
MDP mdp = new MDP(S.getCells(), S.getCellAt(1, 3), new ActionsFunction(S),
new TransitionProbabilityFunction(S),
new RewardFunction());
ValueIteration vi = new ValueIteration(0.99);
Dictionary<Cell<Double>, Double> map = vi.valueIteration(mdp, epsilon);
foreach (var c in map)
{
Console.Write(c.Key.getX() + " " + c.Key.getY() + ": ");
Console.WriteLine(c.Value);
Console.WriteLine();
}
}
示例4: values
public static void values()
{
LookupPolicy policy = null;
State<Double> S = new State<Double>(3, 3, 0);
MDP mdp1 = new MDP(S.getCells(), S.getCellAt(1, 1), new ActionsFunction(S),
new TransitionProbabilityFunction(S),
new RewardFunction());
State<Double> S1 = new State<Double>(3, 3, 0);
S1.getCellAt(1, 1).setContent(-1);
S1.getCellAt(1, 2).setContent(0);
S1.getCellAt(1, 3).setContent(-1);
S1.getCellAt(2, 1).setContent(-2);
S1.getCellAt(2, 2).setContent(1);
S1.getCellAt(2, 3).setContent(-2);
S1.getCellAt(3, 1).setContent(-3);
S1.getCellAt(3, 2).setContent(2);
S1.getCellAt(3, 3).setContent(-3);
MDP mdp2 = new MDP(S1.getCells(), S1.getCellAt(1, 1), new ActionsFunction(S1),
new TransitionProbabilityFunction(S1),
new RewardFunction());
State<Double> S2 = new State<Double>(3, 3, 0);
// double r = -100;
double epsilon = 0.00001;
S2.getCellAt(1, 1).setContent(1);
S2.getCellAt(1, 2).setContent(0);
S2.getCellAt(1, 3).setContent(-1);
S2.getCellAt(2, 1).setContent(2);
S2.getCellAt(2, 2).setContent(-1);
S2.getCellAt(2, 3).setContent(-2);
S2.getCellAt(3, 1).setContent(3);
S2.getCellAt(3, 2).setContent(-2);
S2.getCellAt(3, 3).setContent(-3);
MDP mdp = new MDP(S2.getCells(), S2.getCellAt(1, 1), new ActionsFunction(S2),
new TransitionProbabilityFunction(S2),
new RewardFunction());
PolicyEvaluationRecursive pev = new PolicyEvaluationRecursive(1000, epsilon);
PolicyIterationRecursive pi = new PolicyIterationRecursive(pev);
policy = pi.policyIteration(mdp, mdp1, mdp2);
foreach (var s in S.getCells())
{
try
{
CS8803AGA.PsychSim.State.Action a = policy.action(s);
Console.Write(s.getX() + " " + s.getY() + ": ");
Console.WriteLine(a.i);
Console.WriteLine();
}
catch (Exception e)
{
}
}
}