-
-
Notifications
You must be signed in to change notification settings - Fork 59
/
top.py
143 lines (112 loc) · 3.59 KB
/
top.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import datetime
import os
import re
import xml.dom.minidom
import xml.etree.ElementTree as Et
import pytz
from wordsegment import load, segment
code_path = "Codes"
users = os.listdir(code_path)
def last_commit_date(file_path):
return "git log -1 --pretty=format:'%ct' '" + file_path + "'"
active = {}
complete = {}
for user in users:
# skip .DS_Store / .gitignore etc
if user.startswith("."):
continue
w = os.walk(os.path.join(code_path, user))
latest_active_date = -1
complete_count = 0
for parent, directories, files in w:
for file in files:
if file.startswith(".") or file.lower().startswith("readme"):
continue
complete_count += 1
f = os.path.join(parent, file)
t = int(os.popen(last_commit_date(f)).readlines()[0])
if t > latest_active_date:
latest_active_date = t
active[user] = latest_active_date
complete[user] = complete_count
def top_n(m, n):
return sorted(m.items(), key=lambda kv: kv[1], reverse=True)[:n]
top_10_active = top_n(active, 10)
top_10_complete = top_n(complete, 10)
def flush_readme(readme_file, local=False):
lines = []
count = 0
index = -1
with open(readme_file) as readme:
for line in readme:
if "| User | Completed |" in line:
index = count
lines.append(line)
count += 1
# skip header
index += 2
top3 = 3
for k, v in top_10_complete:
k = user_url(k)
if top3 > 0:
row = "| " + "**" + k + "**" + " | " + str(v) + " |\n"
else:
row = "| " + k + " | " + str(v) + " |\n"
lines[index] = row
index += 1
top3 -= 1
# skip header
index += 5
top3 = 3
for k, v in top_10_active:
k = user_url(k)
dt = datetime.datetime.utcfromtimestamp(v)
if local:
dt = dt.replace(tzinfo=pytz.utc).astimezone(pytz.timezone("Asia/Shanghai"))
date_stamp = dt.strftime("%Y-%m-%d %H:%M:%S")
if top3 > 0:
row = "| " + "**" + k + "**" + " | " + date_stamp + " |\n"
else:
row = "| " + k + " | " + date_stamp + " |\n"
lines[index] = row
index += 1
top3 -= 1
with open(readme_file, "w") as readme:
readme.write("".join(lines))
def user_url(k):
return "[%s](https://github.com/asdf2014/algorithm/tree/master/Codes/%s)" % (k, k)
def expand_dict():
# init
all_words = set(active.keys())
all_words.add("leetcode")
all_words.add("yuzhouwan")
# split
load()
for word in active.keys():
for seg in segment(word):
all_words.add(seg)
match = re.match(r"([a-z]+)([0-9]+)", word, re.I)
if match:
items = match.groups()
for item in items:
all_words.add(item)
all_words = sorted(all_words)
# build
component = Et.Element("component")
component.set("name", "ProjectDictionaryState")
dictionary = Et.SubElement(component, "dictionary")
dictionary.set("name", "yuzhouwan")
words = Et.SubElement(dictionary, "words")
for word in all_words:
if len(word) < 2:
continue
Et.SubElement(words, "w").text = word
data = Et.tostring(component).decode("utf-8")
# write
with open(".idea/dictionaries/yuzhouwan.xml", "w") as dict_xml:
dict_xml.write(xml.dom.minidom.parseString(data).toprettyxml())
# expand the names of all contributors into the dictionary
expand_dict()
# flush README
flush_readme("README.md", False)
flush_readme("README-zh.md", True)