本文整理汇总了Golang中go-glue/googlecode/com/hg/rlglue.Observation类的典型用法代码示例。如果您正苦于以下问题:Golang Observation类的具体用法?Golang Observation怎么用?Golang Observation使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Observation类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: AgentStart
func (ra *RmaxFSSSAgent) AgentStart(obs rlglue.Observation) (act rlglue.Action) {
ra.stepsWithPlanner = 0
ra.lastState = discrete.State(ra.task.Obs.Ints.Index(obs.Ints()))
ra.Plan()
act = rlglue.NewAction(ra.task.Act.Ints.Values(ra.GetAction().Hashcode()), []float64{}, []byte{})
ra.lastAction = discrete.Action(ra.task.Act.Ints.Index(act.Ints()))
return
}
示例2: AgentStart
func (this *Agent) AgentStart(obs rlglue.Observation) (act rlglue.Action) {
this.stepsWithPlanner = 0
this.lastState = discrete.State(this.mdp.GetTask().Obs.Ints.Index(obs.Ints()))
this.Plan()
act = rlglue.NewAction(this.mdp.GetTask().Act.Ints.Values(this.GetAction()), []float64{}, []byte{})
this.lastAction = discrete.Action(this.mdp.GetTask().Act.Ints.Index(act.Ints()))
return
}
示例3: AgentStep
func (ra *BebAgent) AgentStep(reward float64, obs rlglue.Observation) (act rlglue.Action) {
nextState := discrete.State(ra.task.Obs.Ints.Index(obs.Ints()))
learned := ra.rmdp.Observe(ra.lastState, ra.lastAction, nextState, reward)
if learned {
vi.ValueIteration(ra.qt, ra.rmdp, ra.Cfg.Epsilon)
}
ra.lastState = nextState
act = rlglue.NewAction(ra.task.Act.Ints.Values(ra.qt.Pi(ra.lastState).Hashcode()), []float64{}, []byte{})
ra.lastAction = discrete.Action(ra.task.Act.Ints.Index(act.Ints()))
return
}
示例4: AgentStep
func (this *ROARAgent) AgentStep(reward float64, obs rlglue.Observation) rlglue.Action {
last := matrix.MakeDenseMatrix(this.LastObs.Doubles(), this.numFeatures, 1)
current := matrix.MakeDenseMatrix(obs.Doubles(), this.numFeatures, 1)
rm := matrix.MakeDenseMatrix([]float64{reward}, 1, 1)
outcome, _ := current.MinusDense(last)
sor, _ := last.Augment(outcome)
sor, _ = sor.Augment(rm)
actionIndex := this.task.Act.Ints.Index(this.LastAct.Ints())
this.rpost[actionIndex].Insert(sor)
this.LastObs = obs
return this.GetAction()
}
示例5: AgentStep
func (ra *RmaxFSSSAgent) AgentStep(reward float64, obs rlglue.Observation) (act rlglue.Action) {
ra.stepsWithPlanner++
nextState := discrete.State(ra.task.Obs.Ints.Index(obs.Ints()))
learned := ra.rmdp.Observe(ra.lastState, ra.lastAction, nextState, reward)
if learned {
ra.Forget()
}
ra.lastState = nextState
ra.Plan()
act = rlglue.NewAction(ra.task.Act.Ints.Values(ra.GetAction().Hashcode()), []float64{}, []byte{})
ra.lastAction = discrete.Action(ra.task.Act.Ints.Index(act.Ints()))
return
}
示例6: AgentStart
func (ra *BebAgent) AgentStart(obs rlglue.Observation) (act rlglue.Action) {
ra.lastState = discrete.State(ra.task.Obs.Ints.Index(obs.Ints()))
act = rlglue.NewAction(ra.task.Act.Ints.Values(ra.qt.Pi(ra.lastState).Hashcode()), []float64{}, []byte{})
ra.lastAction = discrete.Action(ra.task.Act.Ints.Index(act.Ints()))
return
}
示例7: getStateIndex
func (this *BFS3Agent) getStateIndex(state rlglue.Observation) (index uint64) {
return this.task.Obs.Ints.Index(state.Ints())
}
示例8: AgentStep
func (this *OptAgent) AgentStep(reward float64, obs rlglue.Observation) (act rlglue.Action) {
s := discrete.State(this.task.Obs.Ints.Index(obs.Ints()))
a := this.qt.Pi(s)
act = rlglue.NewAction([]int32{int32(a)}, []float64{}, []byte{})
return
}