diff --git a/.gitignore b/.gitignore index 60b358f..3f386a3 100644 --- a/.gitignore +++ b/.gitignore @@ -7,7 +7,7 @@ __pycache__/ # C extensions *.so /venv - +/save debug.log # Distribution / packaging .Python diff --git a/scan.py b/scan.py index eda0a5b..7de1c82 100644 --- a/scan.py +++ b/scan.py @@ -124,8 +124,9 @@ def foxScan(target): # 对搜集到的目标挨个进行扫描 req_pool = crawlergoMain.crawlergoGet(current_target) req_pool.add(current_target) + tempFilename=hashlib.md5(current_target.encode("utf-8")).hexdigest() # 对目标网址使用 crawlergoGet 页面URL动态爬取,保存在 req_pool 集合里 - threadPoolScan(req_pool, filename, target) + threadPoolScan(req_pool, tempFilename, target) print("InPuT T4rGet {} Sc3n EnD#".format(target)) return diff --git a/test.py b/test.py index 23f1c40..5dd452e 100644 --- a/test.py +++ b/test.py @@ -1,23 +1,56 @@ -import re -import shutil - -import requests -from subDomainsBrute import subDomainsBruteMain -from Sublist3r import Sublist3rMain -from Subfinder import subfinderMain -from OneForAll import oneforallMain -from CScan import CScan -from JSmessage.jsfinder import JSFinder +import hashlib +from crawlergo import crawlergoMain +from Xray import pppXray import config +import sys +import getopt +import base from ServerJiang.jiangMain import SendNotice -import os -import hashlib +from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED +def threadPoolScan(req_pool, filename, target): + print("req_pool num is {}".format(len(req_pool))) + thread = ThreadPoolExecutor(max_workers=config.ThreadNum) + i = 0 + all_task = [] + while len(req_pool) != 0: + # 将 req_pool 里的URL依次弹出并扫描 + temp_url = req_pool.pop() + current_filename = hashlib.md5(temp_url.encode("utf-8")).hexdigest() + # 调用 xray 进行扫描并保存 + # pppXray.xrayScan(temp_url, current_filename) + i += 1 + one_t = thread.submit(pppXray.xrayScan, temp_url, current_filename) + all_task.append(one_t) + if i == 5 or len(req_pool) == 0: + i = 0 + wait(all_task, return_when=ALL_COMPLETED) + all_task = [] + base.mergeReport(filename) + SendNotice("{} 花溪九尾扫描完毕".format(target)) +def foxScan(): + # 进行子域名搜集 + config.target_queue.put('127.0.0.1') + config.target_queue.put('http://testphp.vulnweb.com/') + + while not config.target_queue.empty(): + current_target = config.target_queue.get() + if base.checkBlackList(current_target): + # 对搜集到的目标挨个进行扫描 + req_pool = crawlergoMain.crawlergoGet(current_target) + req_pool.add(current_target) + tempFilename=hashlib.md5(current_target.encode("utf-8")).hexdigest() + # 对目标网址使用 crawlergoGet 页面URL动态爬取,保存在 req_pool 集合里 + threadPoolScan(req_pool, tempFilename, "aa") + print("InPuT T4rGet {} Sc3n EnD#".format("aa")) + return def main(): - subDomainsBruteMain.subDomainsBruteScan('wkj.work',"aa") + base.init() + foxScan() + # subDomainsBruteMain.subDomainsBruteScan('wkj.work',"aa") return if __name__ == '__main__':