本文整理汇总了Python中dataset.Dataset.load_pkl方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.load_pkl方法的具体用法?Python Dataset.load_pkl怎么用?Python Dataset.load_pkl使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dataset.Dataset
的用法示例。
在下文中一共展示了Dataset.load_pkl方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predict
# 需要导入模块: from dataset import Dataset [as 别名]
# 或者: from dataset.Dataset import load_pkl [as 别名]
def predict():
preds = []
models = []
scalers = []
dataset = Dataset.load_pkl("data/all_data.pkl")
for i in [71, 72, 73, 74]: # load 4 models
print("load {}".format(i))
with open("models/mlp_{}.pkl".format(i), "rb") as f:
m = pickle.load(f)
model, scaler = m[0], m[1]
models.append(model)
scalers.append(scaler)
def callback(rec):
feats = rec["coupon_feats"]
pred = np.zeros(len(feats), dtype=np.float32)
for i, m in enumerate(models):
pred += m.predict(scalers[i].transform(feats))
pred /= len(models)
scores = zip(pred, rec["coupon_ids"])
scores = sorted(scores, key = lambda score: -score[0])
coupon_ids = " ".join(map(lambda score: str(score[1]), scores[0:10]))
preds.append([rec["user_id"], coupon_ids])
dataset.each_test(callback)
preds = sorted(preds, key=lambda rec: rec[0])
fp = open("submission_mlp.csv", "w")
fp.write("USER_ID_hash,PURCHASED_COUPONS\n")
for pred in preds:
fp.write("%s,%s\n" % (pred[0], pred[1]))
fp.close()
示例2: train
# 需要导入模块: from dataset import Dataset [as 别名]
# 或者: from dataset.Dataset import load_pkl [as 别名]
def train():
parser = argparse.ArgumentParser(description='nagadomi-coupon-purchase-prediction-solution')
parser.add_argument('--seed', '-s', default=71, type=int,
help='Random seed')
parser.add_argument('--validation', '-v', action="store_true",
help='Validation mode')
args = parser.parse_args()
model_name = "mlp"
if args.validation:
dataset = Dataset.load_pkl("data/valid_28.pkl")
model_name = model_name + "_valid"
else:
dataset = Dataset.load_pkl("data/all_data.pkl")
np.random.seed(args.seed)
model = MLP3({"input": dataset.dim(),
"lr": 0.01,
"h1": 512, "h2": 32,
"dropout1": 0.5,
"dropout2": 0.1,
})
scaler = StandardScaler()
# estimate mean,std
x, y = dataset.gen_train_data(num_nega=NEGA_WEIGHT)
scaler.fit(x)
if args.validation:
x0_test, x1_test = dataset.gen_valid_data_pairwise(num_nega=20)
x0_test = scaler.transform(x0_test)
x1_test = scaler.transform(x1_test)
# learning loop
for epoch in xrange(1, N_EPOCH+1):
print('**** epoch {}/{}'.format(epoch, N_EPOCH))
if epoch == 100:
model.learning_rate_decay(0.5)
# resampling the training dataset
x, y = dataset.gen_train_data(num_nega=NEGA_WEIGHT)
x = scaler.transform(x)
# update
model.train(x, y, batchsize=BATCH_SIZE, verbose=True)
# evaluate
if args.validation:
acc = pairwise_ranking_accuracy(model, x0_test, x1_test)
print("valid pairwise ranking accuracy: {}".format(float(acc.data)))
if epoch % 10 == 0:
eval_map(model, scaler, dataset, k=10)
if epoch % 10 == 0:
# save model
with open("models/{}_{}_epoch_{}.pkl".format(model_name, args.seed, epoch), "wb") as f:
pickle.dump([model, scaler], f)
with open("models/{}_{}.pkl".format(model_name, args.seed), "wb") as f:
pickle.dump([model, scaler], f)