本文整理汇总了Python中chainer.functions.mean_absolute_error方法的典型用法代码示例。如果您正苦于以下问题:Python functions.mean_absolute_error方法的具体用法?Python functions.mean_absolute_error怎么用?Python functions.mean_absolute_error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.mean_absolute_error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calc_loss_style
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def calc_loss_style(hout_dict,hcomp_dict,hgt_dict):
layers = hgt_dict.keys()
for i,layer_name in enumerate(layers):
B,C,H,W = hout_dict[layer_name].shape
hout = F.reshape(hout_dict[layer_name],(B,C,H*W))
hcomp = F.reshape(hcomp_dict[layer_name],(B,C,H*W))
hgt = F.reshape(hgt_dict[layer_name],(B,C,H*W))
hout_gram = F.batch_matmul(hout,hout,transb=True)
hcomp_gram = F.batch_matmul(hcomp,hcomp,transb=True)
hgt_gram = F.batch_matmul(hgt,hgt,transb=True)
if i==0:
L_style_out = F.mean_absolute_error(hout_gram,hgt_gram)/(C*H*W)
L_style_comp = F.mean_absolute_error(hcomp_gram,hgt_gram)/(C*H*W)
else:
L_style_out += F.mean_absolute_error(hout_gram,hgt_gram)/(C*H*W)
L_style_comp += F.mean_absolute_error(hcomp_gram,hgt_gram)/(C*H*W)
return L_style_out + L_style_comp
示例2: gene_update_half
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def gene_update_half(self, a):
if a:
itr_x = self.itr_a
itr_y = self.itr_b
gene_xy = self.generator_ab
gene_yx = self.generator_ba
disc = self.discriminator_b
opt = self.opt_g_a
else:
itr_x = self.itr_b
itr_y = self.itr_a
gene_xy = self.generator_ba
gene_yx = self.generator_ab
disc = self.discriminator_a
opt = self.opt_g_b
x = Variable(self.converter(itr_x.next(), self.device))
y = Variable(self.converter(itr_y.next(), self.device))
xy = gene_xy(x)
xyx = gene_yx(xy)
yy = gene_xy(y)
xy_disc = disc(xy)
recon_loss = F.mean_absolute_error(x, xyx)
gan_loss = self.loss_hinge_gene(xy_disc)
ident_loss = F.mean_absolute_error(y, yy)
loss_gene = recon_loss*3.0 + gan_loss + ident_loss*0.5
gene_xy.cleargrads()
loss_gene.backward()
opt.update()
chainer.reporter.report({
'loss/g/recon': recon_loss,
'loss/g/ident': ident_loss,
'loss/g/gene': gan_loss})
示例3: gene_update_full
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def gene_update_full(self):
a = Variable(self.converter(self.itr_a.next(), self.device))
b = Variable(self.converter(self.itr_b.next(), self.device))
ab = self.generator_ab(a)
ba = self.generator_ba(b)
aba = self.generator_ba(ab)
bab = self.generator_ab(ba)
aa = self.generator_ba(a)
bb = self.generator_ab(b)
ab_disc = self.discriminator_b(ab)
ba_disc = self.discriminator_a(ba)
recon_loss = F.mean_absolute_error(a, aba) + F.mean_absolute_error(b, bab)
gan_loss = self.loss_hinge_gene(ab_disc) + self.loss_hinge_gene(ba_disc)
ident_loss = F.mean_absolute_error(a, aa) + F.mean_absolute_error(b, bb)
loss_gene = recon_loss*3.0 + gan_loss + ident_loss*0.5
self.generator_ab.cleargrads()
self.generator_ba.cleargrads()
loss_gene.backward()
self.opt_g_a.update()
self.opt_g_b.update()
chainer.reporter.report({
'loss/g/recon': recon_loss,
'loss/g/ident': ident_loss,
'loss/g/gene': gan_loss})
示例4: calc_loss_perceptual
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def calc_loss_perceptual(hout_dict,hcomp_dict,hgt_dict):
layers = list(hout_dict.keys())
layer_name = layers[0]
loss = F.mean_absolute_error(hout_dict[layer_name],hgt_dict[layer_name])
loss += F.mean_absolute_error(hcomp_dict[layer_name],hgt_dict[layer_name])
for layer_name in layers[1:]:
loss += F.mean_absolute_error(hout_dict[layer_name],hgt_dict[layer_name])
loss += F.mean_absolute_error(hcomp_dict[layer_name],hgt_dict[layer_name])
return loss
示例5: calc_loss_tv
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def calc_loss_tv(Icomp, mask, xp=np):
canvas = mask.data
canvas[:,:,:,:-1] += mask.data[:,:,:,1:] #mask left overlap
canvas[:,:,:,1:] += mask.data[:,:,:,:-1] #mask right overlap
canvas[:,:,:-1,:] += mask.data[:,:,1:,:] #mask up overlap
canvas[:,:,1:,:] += mask.data[:,:,:-1,:] #mask bottom overlap
P = Variable((xp.sign(canvas-0.5)+1.0)*0.5) #P region (hole mask: 1 pixel dilated region from hole)
return F.mean_absolute_error(P[:,:,:,1:]*Icomp[:,:,:,1:],P[:,:,:,:-1]*Icomp[:,:,:,:-1])+ F.mean_absolute_error(P[:,:,1:,:]*Icomp[:,:,1:,:],P[:,:,:-1,:]*Icomp[:,:,:-1,:])
示例6: loss_enc
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def loss_enc(self, enc, x_out, t_out, y_out, lam1=100, lam2=1):
batchsize, _, w, h = y_out.shape
loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
loss = loss_rec + loss_adv
chainer.report({'loss': loss}, enc)
return loss
示例7: loss_dec
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
batchsize, _, w, h = y_out.shape
loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
loss = loss_rec + loss_adv
chainer.report({'loss': loss}, dec)
return loss
示例8: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def forward(self, inputs, device):
x0, x1 = inputs
loss = functions.mean_absolute_error(x0, x1)
return loss,
示例9: test_invalid_dtype1
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def test_invalid_dtype1(self):
x0 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.int32))
x1 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.int32))
with self.assertRaises(type_check.InvalidType):
functions.mean_absolute_error(x0, x1)
示例10: test_invalid_dtype2
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def test_invalid_dtype2(self):
x0 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32))
x1 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float16))
with self.assertRaises(type_check.InvalidType):
functions.mean_absolute_error(x0, x1)
# See chainer#6702.
示例11: loss_enc
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def loss_enc(self, enc, x_out, t_out, y_out, lam1=100, lam2=1):
batchsize, _, w, h = y_out.data.shape
loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
loss = loss_rec + loss_adv
chainer.report({'loss': loss}, enc)
return loss
示例12: loss_dec
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
batchsize, _, w, h = y_out.data.shape
loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
loss = loss_rec + loss_adv
chainer.report({'loss': loss}, dec)
return loss
示例13: loss_func_rec_l1
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def loss_func_rec_l1(x_out, t):
return F.mean_absolute_error(x_out, t)
示例14: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def __call__(self, x):
h = x
h = F.leaky_relu(self.c0(h))
h = F.leaky_relu(self.c1(h))
h = F.leaky_relu(self.c2(h))
h = F.leaky_relu(self.c3(h))
h = F.leaky_relu(self.l4(h))
h = F.reshape(F.leaky_relu(self.l5(h)), (x.data.shape[0], self.ch, 4, 4))
h = F.leaky_relu(self.dc3(h))
h = F.leaky_relu(self.dc2(h))
h = F.leaky_relu(self.dc1(h))
h = F.tanh(self.dc0(h))
return F.mean_absolute_error(h, x)
示例15: _loss_predictor
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_absolute_error [as 别名]
def _loss_predictor(self, predictor, output, target, d_fake):
b, _, t = d_fake.data.shape
loss_mse = (F.mean_absolute_error(output, target))
chainer.report({'mse': loss_mse}, predictor)
loss_adv = F.sum(F.softplus(-d_fake)) / (b * t)
chainer.report({'adversarial': loss_adv}, predictor)
loss = self.loss_config.mse * loss_mse + self.loss_config.adversarial * loss_adv
chainer.report({'loss': loss}, predictor)
return loss