-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathshow_messages.py
128 lines (101 loc) · 4.48 KB
/
show_messages.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# <show_messages.py>
#
# @Author: Lucas Pascotti Valem <[email protected]>
#
#-------------------------------------------------------------------------------
#
# This file is part of Unsupervised Selective Rank Fusion Framework (USRF).
#
# USRF is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# USRF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with USRF. If not, see <http://www.gnu.org/licenses/>.
#
from pprint import pprint
def show_usraf_header():
print(" Welcome to USRAF!\n")
def show_settings(parameters, dataset):
print("---------------------------------")
print(" General parameters:")
print("\tSelection Mode:", parameters["selection_mode"])
print("\tEffectiveness Estimation Measure:",
parameters["effectiveness_estimation_measure"])
print("\tCorrelation Measure:", parameters["correlation_measure"])
print("\tBeta (cor. coef.):", parameters["beta"])
print("\tTop K:", parameters["top_k"])
print("\tEffectiveness Measure:", parameters["supervised_effectiveness"])
print("\tTop Tuples to Fuse:", parameters["top_tuples_fusion"])
print("\tTop Tuples for Intersection:",
parameters["top_tuples_intersection"])
print("\tMax Tuple Size:", parameters["max_tuple_size"])
print("\tPerform Fusion:", parameters["perform_fusion"])
print("\tPerform Evaluation:", parameters["perform_evaluation"])
print("\tMultithreading Pools:", parameters["multithreading_pools"])
print(" Dataset info:")
print("\tDataset name:", dataset["name"])
print("\tDataset size:", dataset["size"])
print("\tRanked lists path:", dataset["path_ranked_lists"])
print("\tLists file path:", dataset["path_lists_file"])
print("\tClasses file path:", dataset["path_classes_file"])
print("---------------------------------")
def show_available_descriptors(descriptors):
print("\n Available descriptors:")
print("\t(", len(descriptors), "descriptors in total )")
print(end="\t")
pprint(descriptors, indent=8)
def show_computed_pairs(pairs):
print("\n Possible combinations (pairs):")
print("\t(", len(pairs), "pairs in total )")
pprint(pairs, indent=8)
def show_computed_combinations(combinations):
print("\n Possible combinations:")
print("\t(", len(combinations), "combinations in total )")
pprint(combinations, indent=8)
def show_effectiveness_results(effectiveness):
print("\n Effectiveness estimation results:")
pprint(effectiveness, indent=8)
def show_correlation_results(correlations):
print("\n Correlation for each pair:")
pprint(correlations, indent=8)
def show_pairs_selection_results(selection_pairs):
print("\n Selection score for each pair:")
pprint(selection_pairs, indent=8)
def show_tuples_selection_results(selection_tuples):
print("\n Selection score for each tuple:")
pprint(selection_tuples, indent=8)
def show_pairs_map_results(parameters, pairs_map):
print("\n", parameters["supervised_effectiveness"].upper(),
"results for each pair after CPRR fusion:")
pprint(pairs_map, indent=8)
def show_descriptors_map_results(parameters, descriptors_map):
print("\n", parameters["supervised_effectiveness"].upper(),
"results for each descriptor:")
pprint(descriptors_map, indent=8)
def show_avg_topk_lists(parameters,
selection_avg_topk,
best_avg_topk,
average_avg_topk,
worst_avg_topk):
class prettyfloat(float):
def __repr__(self):
return "%0.4f" % self
n_values_to_show = 10
print("\n Accumulated average",
parameters["supervised_effectiveness"].upper(),
"until top-" + str(n_values_to_show) + ":")
print(" USRAF =",
list(map(prettyfloat, selection_avg_topk))[:n_values_to_show])
print(" Best =",
list(map(prettyfloat, best_avg_topk))[:n_values_to_show])
print(" Avg =",
list(map(prettyfloat, average_avg_topk))[:n_values_to_show])
print(" Worst =",
list(map(prettyfloat, worst_avg_topk))[:n_values_to_show])