-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbaseline.py
79 lines (56 loc) · 2.37 KB
/
baseline.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
def baseline():
import os
from os.path import join, isfile
import pandas
import numpy
from collections import defaultdict
from sklearn import metrics
from errorAnalysis import meanError,setError
myPath = 'trainAll/'
listUsers = [f for f in os.listdir(str(myPath)) if isfile(join(myPath, f))]
#listUsers.remove('.DS_Store')
dictResult = defaultdict(float)
averageError = float()
errorSet = int()
listOfPredictions = []
for user in listUsers:
dictCount = defaultdict(int)
majority = float()
trainRank = numpy.array(pandas.read_csv('trainAll/stars/'+user, header=None))
trainRank = [val for sublist in trainRank for val in sublist]
trainRank = list(map(lambda x: int(x*5), trainRank))
validationRank = numpy.array(pandas.read_csv('validationAll/stars/'+user, header=None))
validationRank = [val for sublist in validationRank for val in sublist]
validationRank = list(map(lambda x: int(x*5), validationRank))
testRank = numpy.array(pandas.read_csv('testAll/stars/'+user, header=None))
testRank = [val for sublist in testRank for val in sublist]
testRank = list(map(lambda x: int(x*5), testRank))
for x in trainRank:
dictCount[x] += 1
for x in validationRank:
dictCount[x] += 1
for x in dictCount.keys():
if(dictCount[x] == max(dictCount.values())):
majority = x
break
prediction = list()
for x in range(len(testRank)):
prediction.append(majority)
dictResult[user] = metrics.accuracy_score(testRank, prediction)
averageError += meanError(prediction,testRank)
errorSet += setError(prediction,testRank)
#collecet list of prediction to execute later the t-test
listOfPredictions.append(prediction)
accuracy = float()
for key in dictResult.keys():
accuracy += dictResult[key]
accuracy /= len(listUsers)
print(accuracy)
print('=============ERROR=========')
print(averageError/len(listUsers)) #=> -0.15346161033753716
print(errorSet) #=> 736
return listOfPredictions
# trainAll, minReview 20: 0.441929922377 -> test same
# trainAll, minReview 50: 0.449429999815 -> test 0.454907309474
#prediction with the majority class of the dataset: 0.423
baseline()