本文整理汇总了Python中breze.learn.mlp.Mlp.exprs["true_loss"]方法的典型用法代码示例。如果您正苦于以下问题:Python Mlp.exprs["true_loss"]方法的具体用法?Python Mlp.exprs["true_loss"]怎么用?Python Mlp.exprs["true_loss"]使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类breze.learn.mlp.Mlp
的用法示例。
在下文中一共展示了Mlp.exprs["true_loss"]方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: new_trainer
# 需要导入模块: from breze.learn.mlp import Mlp [as 别名]
# 或者: from breze.learn.mlp.Mlp import exprs["true_loss"] [as 别名]
def new_trainer(pars, data):
# 132 for the hand-crafted features
input_size = 156
# 13 as there are 12 fields
output_size = 13
batch_size = pars["batch_size"]
m = Mlp(
input_size,
pars["n_hidden"],
output_size,
hidden_transfers=pars["hidden_transfers"],
out_transfer="softmax",
loss="cat_ce",
batch_size=batch_size,
optimizer=pars["optimizer"],
)
climin.initialize.randomize_normal(m.parameters.data, 0, pars["par_std"])
weight_decay = (
(m.parameters.in_to_hidden ** 2).sum()
+ (m.parameters.hidden_to_hidden_0 ** 2).sum()
+ (m.parameters.hidden_to_out ** 2).sum()
)
weight_decay /= m.exprs["inpt"].shape[0]
m.exprs["true_loss"] = m.exprs["loss"]
c_wd = pars["L2"]
m.exprs["loss"] = m.exprs["loss"] + c_wd * weight_decay
# length of dataset should be 270000 (for no time-integration)
n_report = 270000 / batch_size
max_iter = n_report * 100
interrupt = climin.stops.OnSignal()
print dir(climin.stops)
stop = climin.stops.Any(
[
climin.stops.AfterNIterations(max_iter),
climin.stops.OnSignal(signal.SIGTERM),
# climin.stops.NotBetterThanAfter(1e-1,500,key='train_loss'),
]
)
pause = climin.stops.ModuloNIterations(n_report)
reporter = KeyPrinter(["n_iter", "train_loss", "val_loss"])
t = Trainer(m, stop=stop, pause=pause, report=reporter, interrupt=interrupt)
make_data_dict(t, data)
return t
示例2: in
# 需要导入模块: from breze.learn.mlp import Mlp [as 别名]
# 或者: from breze.learn.mlp.Mlp import exprs["true_loss"] [as 别名]
losses = []
print "max iter", max_iter
X, Z, TX, TZ = [breze.learn.base.cast_array_to_local_type(i) for i in (X, Z, TX, TZ)]
for layer in m.mlp.layers:
weights.append(m.parameters[layer.weights])
weight_decay = (weights[0] ** 2).sum() + (weights[1] ** 2).sum() + (weights[2] ** 2).sum()
weight_decay /= m.exprs["inpt"].shape[0]
m.exprs["true_loss"] = m.exprs["loss"]
c_wd = 0.1
m.exprs["loss"] = m.exprs["loss"] + c_wd * weight_decay
mae = T.abs_(
(m.exprs["output"] * np.std(train_labels, axis=0) + np.mean(train_labels, axis=0)) - m.exprs["target"]
).mean(axis=0)
f_mae = m.function(["inpt", "target"], mae)
rmse = T.sqrt(
T.square(
(m.exprs["output"] * np.std(train_labels, axis=0) + np.mean(train_labels, axis=0)) - m.exprs["target"]
).mean(axis=0)
)
示例3: run_mlp
# 需要导入模块: from breze.learn.mlp import Mlp [as 别名]
# 或者: from breze.learn.mlp.Mlp import exprs["true_loss"] [as 别名]
def run_mlp(func, step, momentum, X, Z, TX, TZ, wd, opt, counter):
print func, step, momentum, wd, opt, counter
seed = 3453
np.random.seed(seed)
batch_size = 25
# max_iter = max_passes * X.shape[ 0] / batch_size
max_iter = 25000000
n_report = X.shape[0] / batch_size
weights = []
input_size = len(X[0])
stop = climin.stops.AfterNIterations(max_iter)
pause = climin.stops.ModuloNIterations(n_report)
optimizer = opt, {"step_rate": step, "momentum": momentum}
typ = "plain"
if typ == "plain":
m = Mlp(
input_size,
[400, 100],
1,
X,
Z,
hidden_transfers=func,
out_transfer="identity",
loss="squared",
optimizer=optimizer,
batch_size=batch_size,
max_iter=max_iter,
)
elif typ == "fd":
m = FastDropoutNetwork(
2099,
[400, 100],
1,
X,
Z,
TX,
TZ,
hidden_transfers=["tanh", "tanh"],
out_transfer="identity",
loss="squared",
p_dropout_inpt=0.1,
p_dropout_hiddens=0.2,
optimizer=optimizer,
batch_size=batch_size,
max_iter=max_iter,
)
# climin.initialize.randomize_normal(m.parameters.data, 0, 1 / np.sqrt(m.n_inpt))
# Transform the test data
# TX = m.transformedData(TX)
TX = np.array([m.transformedData(TX) for _ in range(10)]).mean(axis=0)
print TX.shape
losses = []
print "max iter", max_iter
m.init_weights()
X, Z, TX, TZ = [breze.learn.base.cast_array_to_local_type(i) for i in (X, Z, TX, TZ)]
for layer in m.mlp.layers:
weights.append(m.parameters[layer.weights])
weight_decay = (weights[0] ** 2).sum() + (weights[1] ** 2).sum() + (weights[2] ** 2).sum()
weight_decay /= m.exprs["inpt"].shape[0]
m.exprs["true_loss"] = m.exprs["loss"]
c_wd = wd
m.exprs["loss"] = m.exprs["loss"] + c_wd * weight_decay
mae = T.abs_((m.exprs["output"] * np.std(train_labels) + np.mean(train_labels)) - m.exprs["target"]).mean()
f_mae = m.function(["inpt", "target"], mae)
rmse = T.sqrt(
T.square((m.exprs["output"] * np.std(train_labels) + np.mean(train_labels)) - m.exprs["target"]).mean()
)
f_rmse = m.function(["inpt", "target"], rmse)
start = time.time()
# Set up a nice printout.
keys = "#", "seconds", "loss", "val loss", "mae_train", "rmse_train", "mae_test", "rmse_test"
max_len = max(len(i) for i in keys)
header = "\t".join(i for i in keys)
print header
print "-" * len(header)
results = open("result_hp.txt", "a")
results.write(header + "\n")
results.write("-" * len(header) + "\n")
results.close()
EXP_DIR = os.getcwd()
base_path = os.path.join(EXP_DIR, "pars_hp" + str(counter) + ".pkl")
n_iter = 0
#.........这里部分代码省略.........