# Example of how to benchmark multiple bandit algorithms to generate top-4 recommendations
from mab2rec import BanditRecommender, LearningPolicy
from mab2rec.pipeline import benchmark
from jurity.recommenders import BinaryRecoMetrics, RankingRecoMetrics
# Recommenders (many more available)
recommenders = {"Random": BanditRecommender(LearningPolicy.Random()),
"Popularity": BanditRecommender(LearningPolicy.Popularity()),
"LinGreedy": BanditRecommender(LearningPolicy.LinGreedy(epsilon=0.1))}
# Column names for the response, user, and item id columns
metric_params = {'click_column': 'score', 'user_id_column': 'user_id', 'item_id_column':'item_id'}
# Performance metrics for benchmarking (many more available)
metrics = []
for top_k in [3, 5, 10]:
metrics.append(BinaryRecoMetrics.CTR(**metric_params, k=top_k))
metrics.append(RankingRecoMetrics.NDCG(**metric_params, k=top_k))
# Benchmarking with a collection of recommenders and metrics
# This returns two dictionaries;
# reco_to_results: recommendations for each algorithm on cross-validation data
# reco_to_metrics: evaluation metrics for each algorithm
reco_to_results, reco_to_metrics = benchmark(recommenders,
metrics=metrics,
train_data="data/data_train.csv",
cv=5,
user_features="data/features_user.csv")