-
Notifications
You must be signed in to change notification settings - Fork 53
/
Copy pathbase.py
441 lines (347 loc) · 11.9 KB
/
base.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
import aiohttp
import asyncio
import hashlib
import os
import re
import shutil
import Hx_config
from ARL.ArlScan import Scan
from CScan import CScan
from JSmessage.jsfinder import JSFinder
from OneForAll import oneforallMain
from ServerJiang.jiangMain import SendNotice
from Subfinder import subfinderMain
from Sublist3r import Sublist3rMain
from subDomainsBrute import subDomainsBruteMain
'''
init() 扫描初始化函数
功能:
初始化保存文件目录
'''
def init():
try:
if not os.path.exists(Hx_config.Save_path) or not os.path.exists(Hx_config.ARL_save_path) or not os.path.exists(
Hx_config.Crawlergo_save_path):
os.makedirs(Hx_config.Save_path)
os.makedirs(Hx_config.Xray_report_path)
os.makedirs(Hx_config.Xray_temp_report_path)
os.makedirs(Hx_config.CScan_report_path)
os.makedirs(Hx_config.Sub_report_path)
os.makedirs(Hx_config.Temp_path)
os.makedirs(Hx_config.JS_report_path)
os.makedirs(Hx_config.ARL_save_path)
os.makedirs(Hx_config.Crawlergo_save_path)
except Exception as e:
print(e)
exit(0)
print(f"{Hx_config.red}初始化完成{Hx_config.end}")
return
'''
cleanTempXrayReport()函数
功能:删除xray临时报告目录下的全部文件
'''
def cleanTempXrayReport():
shutil.rmtree("{}".format(Hx_config.Xray_temp_report_path))
os.mkdir("{}".format(Hx_config.Xray_temp_report_path))
return
'''
函数 checkXrayVersion()
功能:
检测xray为社区版还是专业版
专业版返回 true
社区版返回 false
'''
def checkXrayVersion(content):
if "snapshot" in content:
return False
return True
'''
函数 advancedMergeReport(resultList)
功能:
xray 专业版报告合并函数
'''
def advancedMergeReport(resultList):
context = ""
requestMd5Set = set()
with open("{}\\advancedModelFile.html".format(Hx_config.Root_Path), 'r', encoding='utf-8') as f:
context += f.read()
for result in resultList:
tempResultDict = eval(result)
tempDetailRequest = tempResultDict["detail"]["request"]
tempRequestMd5 = hashlib.md5(tempDetailRequest.encode('utf-8')).hexdigest()
if tempRequestMd5 not in requestMd5Set:
requestMd5Set.add(tempRequestMd5)
result = "<script class=\'web-vulns\'>webVulns.push({})</script>".format(result)
context += result
return context
'''
函数 communityMergeReport(resultList)
功能:
xray 社区版报告合并函数
'''
def communityMergeReport(resultList):
context = ""
requestMd5Set = set()
with open("{}\\communityModelFile.html".format(Hx_config.Root_Path), 'r', encoding='utf-8') as f:
context += f.read()
for result in resultList:
tempResultDict = eval(result)
tempDetailRequest = tempResultDict["detail"]["snapshot"][0][0]
tempRequestMd5 = hashlib.md5(tempDetailRequest.encode('utf-8')).hexdigest()
if tempRequestMd5 not in requestMd5Set:
requestMd5Set.add(tempRequestMd5)
result = "<script class=\'web-vulns\'>webVulns.push({})</script>".format(result)
context += result
return context
'''
mergeReport()函数
功能:合并报告
传入参数:目标保存文件名 filename
其中需要使用集合这种数据结构去除重复漏洞,其判断依据为:xray Request md5值
'''
def mergeReport(filename):
reportList = os.listdir(Hx_config.Xray_temp_report_path)
print(reportList)
if len(reportList) == 0:
return
resultList = []
pattern = re.compile(r'<script class=\'web-vulns\'>webVulns.push\((.*?)\)</script>')
for report in reportList:
tempReport = "{}\\{}".format(Hx_config.Xray_temp_report_path, report)
with open(tempReport, 'r', encoding='utf-8') as f:
temp = f.read()
result = pattern.findall(temp)
resultList += result
tempResult = eval(resultList[0])
if 'snapshot' in tempResult["detail"]:
context = communityMergeReport(resultList)
else:
context = advancedMergeReport(resultList)
with open("{}\\{}.html".format(Hx_config.Xray_report_path, filename), 'w', encoding='utf-8') as f:
f.write(context)
cleanTempXrayReport()
return
'''
transferJSFinder(url,filename)函数
参数:
url 待扫描的URL
filename 实际上为待扫描URL的MD5值,作为输出文件名的一部分
作用:
调用并魔改JSFinder代码
输出:
从JS中获取到的URL和subdomain
输出文件名为:
output_url_filename="url_"+outputfilename
output_subdomain_filename="subdomain"+outputfilename
'''
def transferJSFinder(url, filename):
try:
urls = JSFinder.find_by_url(url)
JSFinder.giveresult(urls, url, filename)
except Exception as e:
print(f"{Hx_config.red}JSFinder ERROR!{Hx_config.end}")
print(e)
pass
'''
transferCScan(url,filename) 函数
'''
def transferCScan(url, filename):
try:
CScan.CScanConsole(url, filename)
except Exception as e:
print(f"{Hx_config.red}C段扫描出错!{Hx_config.end}")
print(e)
pass
'''
subScan(target) 函数
参数:
target 待扫描的URL
filename 扫描目标 target 的对应md5之后的十六进制
作用:
对输入的target进行子域名的收集,并将结果存储到队列sub_queue里
输出:
结果保存在队列sub_queue里面,传递给队列去重函数
子域名收集整合模块:
OneForAll
Knock
subDomainsBrute
Subfinder
Sublist3r
...(可根据自己需要自行添加
'''
def subScan(target, filename):
'''
调用四个子域名搜集模块,并将结果保存在 sub_queue 里面
使用 queueDeduplication 进行子域名 -> 网址的转换 ,同时检测存活
:param target:
:param filename:
:return:
'''
Sub_report_path = Hx_config.Sub_report_path + filename + ".txt" # save_sub.txt
if os.path.exists(Sub_report_path):
print(f"{Hx_config.red}savesub/{filename}.txt文件存在, 跳过资产扫描{Hx_config.end}")
queueDeduplication(filename)
return # 存在subtxt文件则直接跳过以下扫描。
try:
oneforallMain.OneForAllScan(target)
pass
except Exception as e:
print(f'{Hx_config.red}OneForAllScan error :{Hx_config.end}', e)
try:
subDomainsBruteMain.subDomainsBruteScan(target, filename)
pass
except Exception as e:
print(f'{Hx_config.red}subDomainsBruteScan error :{Hx_config.end}', e)
try:
Sublist3rMain.Sublist3rScan(target)
pass
except Exception as e:
print(f'{Hx_config.red}Sublist3rScan error :{Hx_config.end}', e)
pass
try:
subfinderMain.subfinderScan(target, filename)
pass
except Exception as e:
print(f'{Hx_config.red}subfinderScan error:{Hx_config.end}', e)
pass
try:
queueDeduplication(filename)
pass
except Exception as e:
print(f'{Hx_config.red}queueDeduplication error:{Hx_config.end}', e)
pass
'''
urlCheck(url, f) 函数
参数:
url 需要检测存活性的URL
f 打开的文件流
作用:
url存活性检测
输出:
返回是否的布尔值
'''
async def urlCheck(target, f):
print(f"{Hx_config.blue}now url live check: {target}{Hx_config.end}")
async with aiohttp.ClientSession() as session:
try:
async with session.get(target, headers=Hx_config.GetHeaders()) as resp:
if resp.status < 400:
Hx_config.target_queue.put(target) # 存活的url
print(f"{Hx_config.green}now save :{target}{Hx_config.end}")
f.write(f"{target}\n")
except Exception as e:
return
return
def urlCheck_threads(__list, f):
loop = asyncio.get_event_loop()
__tasks = [
loop.create_task(urlCheck(url, f))
for url in __list
]
loop.run_until_complete(asyncio.wait(__tasks))
'''
queueDeduplication(filename) 队列去重函数
参数:
filename 扫描目标 target 的对应md5之后的十六进制
作用:
对子域名队列sub_queue里面的元素进行去重、验活处理
输出:
结果保存在target_queue队列里面,存储到saveSub文件夹下对应filename.txt中并且成为待扫描的目标
'''
def queueDeduplication(filename):
Sub_report_path = Hx_config.Sub_report_path + filename + ".txt" # save_sub.txt
sub_set = set()
while not Hx_config.sub_queue.empty():
target = Hx_config.sub_queue.get()
sub_set.add(target)
length = len(sub_set)
if os.path.exists(Sub_report_path):
with open(Sub_report_path, 'r+') as f:
lines = f.readlines()
if len(lines) > 1: # 文件有内容
for line in lines:
if line.strip() not in ['\n\r', '\n', '']:
Hx_config.target_queue.put(line.strip()) # 存活的url
print(f"{Hx_config.yellow}queueDeduplication End~{Hx_config.end}")
print(
f"{Hx_config.green}信息收集子域名搜集完毕,数量:{Hx_config.target_queue.qsize()},保存文件名:{filename}{Hx_config.end}")
SendNotice(f"信息收集子域名搜集完毕,数量:{length},保存文件名:{filename}") # server酱
return
with open(Sub_report_path, 'a+') as f:
if len(sub_set) != 0:
urlCheck_threads(list(sub_set), f) # 启动验活多线程
print(f"{Hx_config.yellow}queueDeduplication End~{Hx_config.end}")
SendNotice("信息收集子域名搜集完毕,数量:{},保存文件名:{}".format(length, filename))
return
'''
对没有添加http的url添加http
'''
def addHttpHeader(target):
pattern = re.compile(r'^http')
if not pattern.match(target.strip()):
target = "https://" + target.strip()
else:
target = target.strip()
return target
'''
checkBlackList(url)
检测目标URL是否在黑名单列表中
'''
def checkBlackList(url):
for i in Hx_config.blacklist:
if i in url:
return False
return True
'''
ARL扫描
'''
def ArlScan(name='', target=''):
print(f"{Hx_config.yellow}This is ArlScan ~{Hx_config.end}")
Scan(name, target).add_task()
'''
将队列变成列表
'''
def from_queue_to_list(_queue):
result = []
while not _queue.empty():
_ = Hx_config.target_queue.get() # 队列被掏空
result.append(_.strip())
for item in result: # 再次将队列填满,便于crawlergo动态爬虫使用
Hx_config.target_queue.put(item)
return result
'''
将http去除
oneforall的保存文件不带http。如果不进行过滤则无法打开文件
'''
def url_http_delete(url):
if 'https://' in url:
url = url[8:]
if 'http://' in url:
url = url[7:]
return url
'''
终极搜索文件方法,解决扫描的时候oneforall找文件的问题
'''
def get_filename(abs_path, name):
for i in os.walk(abs_path):
for j in i[2]:
if j[0:-4] in name:
return j
return False
'''
保存文件
'''
def save(__list, filepath='abs\\xxx.txt', host=''):
with open(filepath, 'a+') as f:
for i in __list:
if i == host or i == host + '/':
continue
f.write(i.strip() + '\n')
def main():
a = set()
a.add(1)
a.add(2)
print(list(a))
return
if __name__ == '__main__':
main()