本文整理汇总了Python中lightfm.lightfm.LightFM.item_biases方法的典型用法代码示例。如果您正苦于以下问题:Python LightFM.item_biases方法的具体用法?Python LightFM.item_biases怎么用?Python LightFM.item_biases使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lightfm.lightfm.LightFM
的用法示例。
在下文中一共展示了LightFM.item_biases方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_precision_at_k_with_ties
# 需要导入模块: from lightfm.lightfm import LightFM [as 别名]
# 或者: from lightfm.lightfm.LightFM import item_biases [as 别名]
def test_precision_at_k_with_ties():
no_users, no_items = (10, 100)
train, test = _generate_data(no_users, no_items)
model = LightFM(loss="bpr")
model.fit_partial(train)
# Make all predictions zero
model.user_embeddings = np.zeros_like(model.user_embeddings)
model.item_embeddings = np.zeros_like(model.item_embeddings)
model.user_biases = np.zeros_like(model.user_biases)
model.item_biases = np.zeros_like(model.item_biases)
k = 10
precision = evaluation.precision_at_k(model, test, k=k)
# Pessimistic precision with all ties
assert precision.mean() == 0.0
示例2: test_predict_ranks
# 需要导入模块: from lightfm.lightfm import LightFM [as 别名]
# 或者: from lightfm.lightfm.LightFM import item_biases [as 别名]
def test_predict_ranks():
no_users, no_items = (10, 100)
train = sp.coo_matrix((no_users, no_items), dtype=np.float32)
train = sp.rand(no_users, no_items, format="csr", random_state=42)
model = LightFM()
model.fit_partial(train)
# Compute ranks for all items
rank_input = sp.csr_matrix(np.ones((no_users, no_items)))
ranks = model.predict_rank(rank_input, num_threads=2).todense()
assert np.all(ranks.min(axis=1) == 0)
assert np.all(ranks.max(axis=1) == no_items - 1)
for row in range(no_users):
assert np.all(np.sort(ranks[row]) == np.arange(no_items))
# Train set exclusions. All ranks should be zero
# if train interactions is dense.
ranks = model.predict_rank(
rank_input, train_interactions=rank_input, check_intersections=False
).todense()
assert np.all(ranks == 0)
# Max rank should be num_items - 1 - number of positives
# in train in that row
ranks = model.predict_rank(
rank_input, train_interactions=train, check_intersections=False
).todense()
assert np.all(
np.squeeze(np.array(ranks.max(axis=1)))
== no_items - 1 - np.squeeze(np.array(train.getnnz(axis=1)))
)
# check error is raised when train and test have interactions in common
with pytest.raises(ValueError):
model.predict_rank(train, train_interactions=train, check_intersections=True)
# check error not raised when flag is False
model.predict_rank(train, train_interactions=train, check_intersections=False)
# check no errors raised when train and test have no interactions in common
not_train = sp.rand(no_users, no_items, format="csr", random_state=43) - train
not_train.data[not_train.data < 0] = 0
not_train.eliminate_zeros()
model.predict_rank(not_train, train_interactions=train, check_intersections=True)
# Make sure ranks are computed pessimistically when
# there are ties (that is, equal predictions for every
# item will assign maximum rank to each).
model.user_embeddings = np.zeros_like(model.user_embeddings)
model.item_embeddings = np.zeros_like(model.item_embeddings)
model.user_biases = np.zeros_like(model.user_biases)
model.item_biases = np.zeros_like(model.item_biases)
ranks = model.predict_rank(rank_input, num_threads=2).todense()
assert np.all(ranks.min(axis=1) == 99)
assert np.all(ranks.max(axis=1) == 99)
# Wrong input dimensions
with pytest.raises(ValueError):
model.predict_rank(sp.csr_matrix((5, 5)), num_threads=2)