本文整理汇总了Python中pybrain.datasets.SequentialDataSet.setField方法的典型用法代码示例。如果您正苦于以下问题:Python SequentialDataSet.setField方法的具体用法?Python SequentialDataSet.setField怎么用?Python SequentialDataSet.setField使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.datasets.SequentialDataSet
的用法示例。
在下文中一共展示了SequentialDataSet.setField方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ModelExperiment
# 需要导入模块: from pybrain.datasets import SequentialDataSet [as 别名]
# 或者: from pybrain.datasets.SequentialDataSet import setField [as 别名]
class ModelExperiment(EpisodicExperiment):
""" An experiment that learns a model of its (action, state) pair
with a Gaussian Process for each dimension of the state.
"""
def __init__(self, task, agent):
EpisodicExperiment.__init__(self, task, agent)
# create model and training set (action dimension + 1 for time)
self.modelds = SequentialDataSet(self.task.indim + 1, 1)
self.model = [GaussianProcess(indim=self.modelds.getDimension('input'),
start=(-10, -10, 0), stop=(10, 10, 300), step=(5, 5, 100))
for _ in range(self.task.outdim)]
# change hyper parameters for all gps
for m in self.model:
m.hyper = (20, 2.0, 0.01)
# m.autonoise = True
def doEpisodes(self, number = 1):
""" returns the rewards of each step as a list and learns
the model for each rollout.
"""
all_rewards = []
for dummy in range(number):
self.stepid = 0
rewards = []
# the agent is informed of the start of the episode
self.agent.newEpisode()
self.task.reset()
while not self.task.isFinished():
r = self._oneInteraction()
rewards.append(r)
all_rewards.append(rewards)
# clear model dataset (to retrain it)
self.modelds.clear()
print "retrain gp"
[m.trainOnDataset(self.modelds) for m in self.model]
for i in range(self.agent.history.getNumSequences()):
seq = self.agent.history.getSequence(i)
state, action, dummy, dummy = seq
l = len(action)
index = map(lambda x: int(floor(x)), mgrid[0:l-1:5j])
action = action[index, :]
inp = c_[action, array([index]).T]
self.modelds.setField('input', inp)
# add training data to all gaussian processes
for i,m in enumerate(self.model):
tar = state[index, i]
self.modelds.setField('target', array([tar]).T)
m.addDataset(self.modelds)
# print "updating GPs..."
# [m._calculate() for m in self.model]
# print "done."
return all_rewards
def _oneInteraction(self):
self.stepid += 1
obs = self.task.getObservation()
self.agent.integrateObservation(obs)
action = self.agent.getAction()
self.task.performAction(action)
# predict with model
#modelobs = array([0, 0, 0])
# time dimension
# if self.stepid < self.model[0].stop:
# steps = self.model[0].step
#
# # linear interpolation between two adjacent gp states
# try:
# modelobs = [ (1.0-float(self.stepid%steps)/steps) * self.model[i].pred_mean[int(floor(float(self.stepid)/steps))] +
# (float(self.stepid%steps)/steps) * self.model[i].pred_mean[int(ceil(float(self.stepid)/steps))]
# for i in range(self.task.outdim) ]
# except IndexError:
action = r_[action, array([self.stepid])]
action = reshape(action, (1, 3))
modelobs = [self.model[i].testOnArray(action) for i in range(self.task.outdim)]
# tell environment about model obs
self.task.env.model = [modelobs]
reward = self.task.getReward()
self.agent.giveReward(reward)
return reward