-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbaselineV2.py
77 lines (54 loc) · 2.35 KB
/
baselineV2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def baseline(rank):
__author__ = 'vittorioselo'
import os
from os.path import join, isfile
import pandas
import numpy
from collections import defaultdict
from sklearn import metrics
from errorAnalysis import meanError,setError
myPath = 'trainPerRank/'
listUsers = [f for f in os.listdir(str(myPath)) if isfile(join(myPath, f))]
#listUsers.remove('.DS_Store')
dictResult = defaultdict(float)
averageError = float()
errorSet = int()
for user in listUsers:
dictCount = defaultdict(int)
majority = float()
trainRank = list(map(lambda x:x[0],numpy.array(pandas.read_csv('trainPerRank/'+rank+'/stars/'+user, header=None))))
# trainRank = [val for sublist in trainRank for val in sublist]
#trainRank = list(map(lambda x: int(x*5), trainRank))
validationRank = list(map(lambda x:x[0],numpy.array(pandas.read_csv('validationPerRank/'+rank+'/stars/'+user, header=None))))
#validationRank = [val for sublist in validationRank for val in sublist]
#validationRank = list(map(lambda x: int(x*5), validationRank))
testRank = list(map(lambda x:x[0],numpy.array(pandas.read_csv('testPerRank/'+rank+'/stars/'+user, header=None))))
#testRank = [val for sublist in testRank for val in sublist]
#testRank = list(map(lambda x: int(x*5), testRank))
for x in trainRank:
dictCount[x] += 1
for x in validationRank:
dictCount[x] += 1
for x in dictCount.keys():
if(dictCount[x] == max(dictCount.values())):
majority = x
break
prediction = list()
for x in range(len(testRank)):
prediction.append(majority)
dictResult[user] = metrics.accuracy_score(testRank, prediction)
averageError += meanError(prediction,testRank)
errorSet += setError(prediction,testRank)
accuracy = float()
for key in dictResult.keys():
accuracy += dictResult[key]
accuracy /= len(listUsers)
print(accuracy)
print('=============ERROR=========')
print(averageError/len(listUsers)) #=> -0.15346161033753716
print(errorSet) #=> 736
# trainAll, minReview 20: 0.441929922377 -> test same
# trainAll, minReview 50: 0.449429999815 -> test 0.454907309474
baseline('1-2')#0.917
baseline('3')#0.768
baseline('4-5')#0.463