本文整理汇总了Python中chainer.optimizers.AdaDelta方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.AdaDelta方法的具体用法?Python optimizers.AdaDelta怎么用?Python optimizers.AdaDelta使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.optimizers
的用法示例。
在下文中一共展示了optimizers.AdaDelta方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import AdaDelta [as 别名]
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
child.Wc = chainer.Parameter(shape=(2, 3))
self.parent = link.Chain()
with self.parent.init_scope():
self.parent.child = child
self.parent.Wp = chainer.Parameter(shape=(2, 3))
self.optimizer = optimizers.AdaDelta()
self.optimizer.setup(self.parent)
self.parent.cleargrads()
self.optimizer.update() # init states
示例2: create
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import AdaDelta [as 别名]
def create(self):
return optimizers.AdaDelta(eps=1e-5)
示例3: setUp
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import AdaDelta [as 别名]
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
child.Wc = chainer.Parameter(shape=(2, 3))
self.parent = link.Chain()
with self.parent.init_scope():
self.parent.child = child
self.parent.Wp = chainer.Parameter(shape=(2, 3))
self.optimizer = optimizers.AdaDelta()
self.optimizer.setup(self.parent)
self.parent.cleargrads()
self.optimizer.update() # init all states
self.savez = numpy.savez_compressed if self.compress else numpy.savez
示例4: set_params
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import AdaDelta [as 别名]
def set_params(self, params):
self.gpu = params.get('gpu',False)
self.learning_rate = params.get('learning_rate',0.00025)
self.decay_rate = params.get('decay_rate',0.95)
self.discount = params.get('discount',0.95)
self.clip_err = params.get('clip_err',False)
self.target_net_update = params.get('target_net_update',10000)
self.double_DQN = params.get('double_DQN',False)
# setting up various possible gradient update algorithms
opt = params.get('optim_name', 'ADAM')
if opt == 'RMSprop':
self.optimizer = optimizers.RMSprop(lr=self.learning_rate, alpha=self.decay_rate)
elif opt == 'ADADELTA':
print("Supplied learning rate not used with ADADELTA gradient update method")
self.optimizer = optimizers.AdaDelta()
elif opt == 'ADAM':
self.optimizer = optimizers.Adam(alpha=self.learning_rate)
elif opt == 'SGD':
self.optimizer = optimizers.SGD(lr=self.learning_rate)
else:
print('The requested optimizer is not supported!!!')
exit()
if self.clip_err is not False:
self.optimizer.add_hook(chainer.optimizer.GradientClipping(self.clip_err))
self.optim_name = params['optim_name']