diff --git a/CoRML Demonstration.ipynb b/CoRML Demonstration.ipynb index 5c49b47..615a9fb 100644 --- a/CoRML Demonstration.ipynb +++ b/CoRML Demonstration.ipynb @@ -310,8 +310,8 @@ "\n", " self.lambda_ = config[\"lambda\"] # Weights for H and G in preference scores\n", " self.rho = config[\"dual_step_length\"] # Dual step length of ADMM\n", - " self.theta = config[\"l2_regularization\"] # L2-regularization for learning weight matrix G\n", - " self.norm_di = 2 * config[\"item_degree_norm\"] # Item degree norm for learning weight matrix G\n", + " self.theta = config[\"l2_regularization\"] # L2-regularization for learning weight matrix H\n", + " self.norm_di = 2 * config[\"item_degree_norm\"] # Item degree norm for learning weight matrix H\n", " self.eps = np.power(10, config[\"global_scaling\"]) # Global scaling in approximated ranking weights (in logarithm scale)\n", "\n", " self.sparse_approx = config[\"sparse_approx\"] # Sparse approximation to reduce storage size of H\n", @@ -359,7 +359,7 @@ " self.H_indices = []\n", " self.H_values = []\n", "\n", - " for ilist in tqdm(item_list, desc=\"Partition\", bar_format=\"{elapsed}\"):\n", + " for ilist in tqdm(item_list, desc=\"Partition\", bar_format=\"{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}]\"):\n", " H_triu = self.update_H_part(ilist)\n", " H_triu = torch.where(H_triu >= 5e-4, H_triu, 0).to_sparse_coo()\n", " self.H_indices.append(ilist[H_triu.indices()])\n", diff --git a/CoRML/model.py b/CoRML/model.py index 19e0970..39223d1 100644 --- a/CoRML/model.py +++ b/CoRML/model.py @@ -132,8 +132,8 @@ def __init__(self, config, dataset): self.lambda_ = config["lambda"] # Weights for H and G in preference scores self.rho = config["dual_step_length"] # Dual step length of ADMM - self.theta = config["l2_regularization"] # L2-regularization for learning weight matrix G - self.norm_di = 2 * config["item_degree_norm"] # Item degree norm for learning weight matrix G + self.theta = config["l2_regularization"] # L2-regularization for learning weight matrix H + self.norm_di = 2 * config["item_degree_norm"] # Item degree norm for learning weight matrix H self.eps = np.power(10, config["global_scaling"]) # Global scaling in approximated ranking weights (in logarithm scale) self.sparse_approx = config["sparse_approx"] # Sparse approximation to reduce storage size of H