From 3eda6a1ca9c3d7b6e9b3bb543f54e3c5253aa3c1 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Mon, 2 Dec 2019 23:22:47 -0800 Subject: [PATCH 01/15] Initial Commit of Windows port --- actions/utils.py | 4 + engine.py | 290 ++++++++++++++++------------------------------- requirements.txt | 2 +- 3 files changed, 102 insertions(+), 194 deletions(-) diff --git a/actions/utils.py b/actions/utils.py index 5bad6ee..97585e5 100644 --- a/actions/utils.py +++ b/actions/utils.py @@ -221,6 +221,8 @@ def get_interface(): """ Chooses an interface on the machine to use for socket testing. """ + """ + TODO: FIX ifaces = netifaces.interfaces() for iface in ifaces: if "lo" in iface: @@ -229,3 +231,5 @@ def get_interface(): # Filter for IPv4 addresses if netifaces.AF_INET in info: return iface + """ + return "Wi-Fi" diff --git a/engine.py b/engine.py index 7f45e08..a8f13e1 100644 --- a/engine.py +++ b/engine.py @@ -4,7 +4,6 @@ Given a strategy and a server port, the engine configures NFQueue so the strategy can run on the underlying connection. """ - import argparse import logging logging.getLogger("scapy.runtime").setLevel(logging.ERROR) @@ -14,11 +13,13 @@ import threading import time -import netfilterqueue - from scapy.layers.inet import IP from scapy.utils import wrpcap from scapy.config import conf +from scapy.all import send, Raw + +import pydivert #TODO +from pydivert.consts import Direction socket.setdefaulttimeout(1) @@ -52,224 +53,131 @@ def __init__(self, server_port, string_strategy, environment_id=None, output_dir # Used for conditional context manager usage self.strategy = actions.utils.parse(string_strategy, self.logger) - # Setup variables used by the NFQueue system - self.out_nfqueue_started = False - self.in_nfqueue_started = False - self.running_nfqueue = False - self.out_nfqueue = None - self.in_nfqueue = None - self.out_nfqueue_socket = None - self.in_nfqueue_socket = None - self.out_nfqueue_thread = None - self.in_nfqueue_thread = None + + # Instantialize a PyDivert channel, which we will use to redirect packets + self.divert = None + self.divert_thread = None + self.divert_thread_started = False + self.censorship_detected = False # Specifically define an L3Socket to send our packets. This is an optimization # for scapy to send packets more quickly than using just send(), as under the hood # send() creates and then destroys a socket each time, imparting a large amount # of overhead. - self.socket = conf.L3socket(iface=actions.utils.get_interface()) + self.socket = conf.L3socket(iface=actions.utils.get_interface()) # TODO: FIX - def __enter__(self): + def initialize_divert(self): """ - Allows the engine to be used as a context manager; simply launches the - engine. + Initializes Divert such that all packets for the connection will come through us """ - self.initialize_nfqueue() - return self - def __exit__(self, exc_type, exc_value, tb): - """ - Allows the engine to be used as a context manager; simply stops the engine - """ - self.shutdown_nfqueue() + self.logger.debug("Engine created with strategy %s (ID %s) to port %s", + str(self.strategy).strip(), self.environment_id, self.server_port) + + self.logger.debug("Initializing Divert") + + self.divert = pydivert.WinDivert("tcp.DstPort == %d || tcp.SrcPort == %d" % (int(self.server_port), int(self.server_port))) + self.divert.open() + self.divert_thread = threading.Thread(target=self.run_divert) + self.divert_thread.start() + + maxwait = 100 # 100 time steps of 0.01 seconds for a max wait of 10 seconds + i = 0 + # Give Divert time to startup, since it's running in background threads + # Block the main thread until this is done + while not self.divert_thread_started and i < maxwait: + time.sleep(0.1) + i += 1 + self.logger.debug("Divert Initialized after %d", int(i)) - def mysend(self, packet): + return + + def shutdown_divert(self): """ - Helper scapy sending method. Expects a Geneva Packet input. + Closes the divert connection """ - try: - self.logger.debug("Sending packet %s", str(packet)) - self.socket.send(packet.packet) - except Exception: - self.logger.exception("Error in engine mysend.") + if self.divert: + self.divert.close() + self.divert = None + + return - def delayed_send(self, packet, delay): + def run_divert(self): + """ + Runs actions on packets """ - Method to be started by a thread to delay the sending of a packet without blocking the main thread. - """ - self.logger.debug("Sleeping for %f seconds." % delay) - time.sleep(delay) - self.mysend(packet) + if self.divert: + self.divert_thread_started = True + + for packet in self.divert: + if packet.is_outbound: + # Send to outbound action tree, if any + self.handle_outbound_packet(packet) - def run_nfqueue(self, nfqueue, nfqueue_socket, direction): + elif packet.is_inbound: + # Send to inbound action tree, if any + self.handle_inbound_packet(packet) + + return + + def __enter__(self): """ - Handles running the outbound nfqueue socket with the socket timeout. + TODO """ - try: - while self.running_nfqueue: - try: - if direction == "out": - self.out_nfqueue_started = True - else: - self.in_nfqueue_started = True - - nfqueue.run_socket(nfqueue_socket) - except socket.timeout: - pass - except Exception: - self.logger.exception("Exception out of run_nfqueue() (direction=%s)", direction) + return self - def configure_iptables(self, remove=False): + def __exit__(self, exc_type, exc_value, tb): """ - Handles setting up ipables for this run + TODO """ - self.logger.debug("Configuring iptables rules") - - port1, port2 = "dport", "sport" - - out_chain = "OUTPUT" - in_chain = "INPUT" - - # Switch whether the command should add or delete the rules - add_or_remove = "A" - if remove: - add_or_remove = "D" - cmds = [] - for proto in ["tcp", "udp"]: - cmds += ["iptables -%s %s -p %s --%s %d -j NFQUEUE --queue-num 1" % - (add_or_remove, out_chain, proto, port1, self.server_port), - "iptables -%s %s -p %s --%s %d -j NFQUEUE --queue-num 2" % - (add_or_remove, in_chain, proto, port2, self.server_port)] - - for cmd in cmds: - self.logger.debug(cmd) - # If we're logging at DEBUG mode, keep stderr/stdout piped to us - # Otherwise, pipe them both to DEVNULL - if actions.utils.get_console_log_level() == logging.DEBUG: - subprocess.check_call(cmd.split(), timeout=60) - else: - subprocess.check_call(cmd.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL, timeout=60) - return cmds - - def initialize_nfqueue(self): + return + + def mysend(self, packet, dir): """ - Initializes the nfqueue for input and output forests. + Helper scapy sending method. Expects a Geneva Packet input. """ - self.logger.debug("Engine created with strategy %s (ID %s) to port %s", - str(self.strategy).strip(), self.environment_id, self.server_port) - self.configure_iptables() - - self.out_nfqueue_started = False - self.in_nfqueue_started = False - self.running_nfqueue = True - # Create our NFQueues - self.out_nfqueue = netfilterqueue.NetfilterQueue() - self.in_nfqueue = netfilterqueue.NetfilterQueue() - # Bind them - self.out_nfqueue.bind(1, self.out_callback) - self.in_nfqueue.bind(2, self.in_callback) - # Create our nfqueue sockets to allow for non-blocking usage - self.out_nfqueue_socket = socket.fromfd(self.out_nfqueue.get_fd(), - socket.AF_UNIX, - socket.SOCK_STREAM) - self.in_nfqueue_socket = socket.fromfd(self.in_nfqueue.get_fd(), - socket.AF_UNIX, - socket.SOCK_STREAM) - # Create our handling threads for packets - self.out_nfqueue_thread = threading.Thread(target=self.run_nfqueue, - args=(self.out_nfqueue, self.out_nfqueue_socket, "out")) - - self.in_nfqueue_thread = threading.Thread(target=self.run_nfqueue, - args=(self.in_nfqueue, self.in_nfqueue_socket, "in")) - # Start each thread - self.in_nfqueue_thread.start() - self.out_nfqueue_thread.start() + try: + self.logger.debug("Sending packet %s", str(packet)) - maxwait = 100 # 100 time steps of 0.01 seconds for a max wait of 10 seconds - i = 0 - # Give NFQueue time to startup, since it's running in background threads - # Block the main thread until this is done - while (not self.in_nfqueue_started or not self.out_nfqueue_started) and i < maxwait: - time.sleep(0.1) - i += 1 - self.logger.debug("NFQueue Initialized after %d", int(i)) + #Convert packet to pydivert - def shutdown_nfqueue(self): - """ - Shutdown nfqueue. - """ - self.logger.debug("Shutting down NFQueue") - self.out_nfqueue_started = False - self.in_nfqueue_started = False - self.running_nfqueue = False - # Give the handlers two seconds to leave the callbacks before we forcibly unbind - # the queues. - time.sleep(2) - if self.in_nfqueue: - self.in_nfqueue.unbind() - if self.out_nfqueue: - self.out_nfqueue.unbind() - self.configure_iptables(remove=True) - - packets_path = os.path.join(BASEPATH, - self.output_directory, - "packets", - "original_%s.pcap" % self.environment_id) - - # Write to disk the original packets we captured - wrpcap(packets_path, [p.packet for p in self.seen_packets]) - - # If the engine exits before it initializes for any reason, these threads may not be set - # Only join them if they are defined - if self.out_nfqueue_thread: - self.out_nfqueue_thread.join() - if self.in_nfqueue_thread: - self.in_nfqueue_thread.join() - - # Shutdown the logger - actions.utils.close_logger(self.logger) - - def out_callback(self, nfpacket): + #print(bytes(Raw(packet.packet))) + #print(packet.packet) + pack = bytes(packet.packet) + pack2 = bytearray(pack) + #print(pack2[0]) + #send(IP(packet.packet), iface="Wi-Fi") + #pack = bytearray(bytes(Raw(packet.packet)), "UTF-8") + #print(pack) + self.divert.send(pydivert.Packet(memoryview(pack2), (12, 0), dir), recalculate_checksum=False) # TODO: FIX + + except Exception: + self.logger.exception("Error in engine mysend.") + + def handle_outbound_packet(self, divert_packet): """ - Callback bound to the outgoing nfqueue rule to run the outbound strategy. + Handles outbound packets by sending them the the strategy """ - if not self.running_nfqueue: - return - - packet = actions.packet.Packet(IP(nfpacket.get_payload())) + #print(divert_packet) + packet = actions.packet.Packet(IP(divert_packet.raw.tobytes())) + #print(packet.show2()) self.logger.debug("Received outbound packet %s", str(packet)) - # Record this packet for a .pacp later + # Record this packet for a .pcap later self.seen_packets.append(packet) - # Drop the packet in NFQueue so the strategy can handle it - nfpacket.drop() - - self.handle_packet(packet) - - def handle_packet(self, packet): - """ - Handles processing an outbound packet through the engine. - """ packets_to_send = self.strategy.act_on_packet(packet, self.logger, direction="out") # Send all of the packets we've collected to send for out_packet in packets_to_send: - # If the strategy requested us to sleep before sending this packet, do so here - if out_packet.sleep: - # We can't block the main sending thread, so instead spin off a new thread to handle sleeping - threading.Thread(target=self.delayed_send, args=(out_packet, out_packet.sleep)).start() - else: - self.mysend(out_packet) - - def in_callback(self, nfpacket): + self.mysend(out_packet, Direction.OUTBOUND) + + def handle_inbound_packet(self, divert_packet): """ - Callback bound to the incoming nfqueue rule. Since we can't - manually send packets to ourself, process the given packet here. + Handles inbound packets. Process the packet and forward it to the strategy if needed. """ - if not self.running_nfqueue: - return - packet = actions.packet.Packet(IP(nfpacket.get_payload())) + + packet = actions.packet.Packet(IP(divert_packet.raw.tobytes())) self.seen_packets.append(packet) @@ -284,20 +192,16 @@ def in_callback(self, nfpacket): self.censorship_detected = True # Branching is disabled for the in direction, so we can only ever get - # back 1 or 0 packets. If zero, drop the packet. + # back 1 or 0 packets. If zero, return and do not send packet. if not packets: - nfpacket.drop() return - # Otherwise, overwrite this packet with the packet the action trees gave back - nfpacket.set_payload(bytes(packets[0])) - # If the strategy requested us to sleep before accepting on this packet, do so here if packets[0].sleep: time.sleep(packets[0].sleep) # Accept the modified packet - nfpacket.accept() + self.mysend(packets[0], Direction.INBOUND) def get_args(): @@ -327,11 +231,11 @@ def main(args): environment_id=args.get("environment_id"), output_directory = args.get("output_directory"), log_level=args["log"]) - eng.initialize_nfqueue() + eng.initialize_divert() while True: time.sleep(0.5) finally: - eng.shutdown_nfqueue() + eng.shutdown_divert() if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index 42fc3fe..3f2872a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ scapy==2.4.3 requests netifaces -netfilterqueue cryptography==2.5 requests anytree +pydivert \ No newline at end of file From 90edaff6baf67314bcaa74b6a8e9f5fdbf7e9c08 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Wed, 4 Dec 2019 00:18:38 -0800 Subject: [PATCH 02/15] First round of cleaning up, made engine more useable --- actions/utils.py | 24 +-- engine.py | 333 ++++++++++++++++++++++++++++++++++----- requirements.txt | 4 +- requirements_linux.txt | 7 + requirements_windows.txt | 7 + 5 files changed, 323 insertions(+), 52 deletions(-) create mode 100644 requirements_linux.txt create mode 100644 requirements_windows.txt diff --git a/actions/utils.py b/actions/utils.py index 97585e5..5520846 100644 --- a/actions/utils.py +++ b/actions/utils.py @@ -221,15 +221,15 @@ def get_interface(): """ Chooses an interface on the machine to use for socket testing. """ - """ - TODO: FIX - ifaces = netifaces.interfaces() - for iface in ifaces: - if "lo" in iface: - continue - info = netifaces.ifaddresses(iface) - # Filter for IPv4 addresses - if netifaces.AF_INET in info: - return iface - """ - return "Wi-Fi" + if os.name == 'nt': + # Windows code + return # TODO: Fix this + else: + ifaces = netifaces.interfaces() + for iface in ifaces: + if "lo" in iface: + continue + info = netifaces.ifaddresses(iface) + # Filter for IPv4 addresses + if netifaces.AF_INET in info: + return iface diff --git a/engine.py b/engine.py index a8f13e1..5c29029 100644 --- a/engine.py +++ b/engine.py @@ -18,9 +18,6 @@ from scapy.config import conf from scapy.all import send, Raw -import pydivert #TODO -from pydivert.consts import Direction - socket.setdefaulttimeout(1) import actions.packet @@ -29,8 +26,16 @@ BASEPATH = os.path.dirname(os.path.abspath(__file__)) +if os.name == 'nt': + WINDOWS = True +else: + WINDOWS = False + +if WINDOWS: + import pydivert + from pydivert.consts import Direction -class Engine(): +class Engine: def __init__(self, server_port, string_strategy, environment_id=None, output_directory="trials", log_level="info"): self.server_port = server_port self.seen_packets = [] @@ -53,18 +58,16 @@ def __init__(self, server_port, string_strategy, environment_id=None, output_dir # Used for conditional context manager usage self.strategy = actions.utils.parse(string_strategy, self.logger) - + self.censorship_detected = False + +class WindowsEngine(Engine): + def __init__(self, server_port, string_strategy, environment_id=None, output_directory="trials", log_level="info"): + super().__init__(server_port, string_strategy, environment_id=environment_id, output_directory=output_directory, log_level=log_level) # Instantialize a PyDivert channel, which we will use to redirect packets self.divert = None self.divert_thread = None self.divert_thread_started = False - - self.censorship_detected = False - # Specifically define an L3Socket to send our packets. This is an optimization - # for scapy to send packets more quickly than using just send(), as under the hood - # send() creates and then destroys a socket each time, imparting a large amount - # of overhead. - self.socket = conf.L3socket(iface=actions.utils.get_interface()) # TODO: FIX + self.interface = None # Using lazy evaluating as divert should know this def initialize_divert(self): """ @@ -99,8 +102,6 @@ def shutdown_divert(self): if self.divert: self.divert.close() self.divert = None - - return def run_divert(self): """ @@ -110,6 +111,8 @@ def run_divert(self): self.divert_thread_started = True for packet in self.divert: + if not self.interface: + self.interface = packet.interface if packet.is_outbound: # Send to outbound action tree, if any self.handle_outbound_packet(packet) @@ -122,14 +125,17 @@ def run_divert(self): def __enter__(self): """ - TODO + Allows the engine to be used as a context manager; simply launches the + engine. """ + self.initialize_divert() return self def __exit__(self, exc_type, exc_value, tb): """ - TODO + Allows the engine to be used as a context manager; simply stops the engine """ + self.shutdown_divert() return def mysend(self, packet, dir): @@ -138,19 +144,10 @@ def mysend(self, packet, dir): """ try: self.logger.debug("Sending packet %s", str(packet)) - - #Convert packet to pydivert - - #print(bytes(Raw(packet.packet))) - #print(packet.packet) - pack = bytes(packet.packet) - pack2 = bytearray(pack) - #print(pack2[0]) - #send(IP(packet.packet), iface="Wi-Fi") - #pack = bytearray(bytes(Raw(packet.packet)), "UTF-8") - #print(pack) - self.divert.send(pydivert.Packet(memoryview(pack2), (12, 0), dir), recalculate_checksum=False) # TODO: FIX - + # Convert the packet to a bytearray so memoryview can edit the underlying memory + pack = bytearray(bytes(packet.packet)) + # Don't recalculate checksum since sometimes we want to change it + self.divert.send(pydivert.Packet(memoryview(pack), self.interface, dir), recalculate_checksum=False) except Exception: self.logger.exception("Error in engine mysend.") @@ -158,9 +155,7 @@ def handle_outbound_packet(self, divert_packet): """ Handles outbound packets by sending them the the strategy """ - #print(divert_packet) packet = actions.packet.Packet(IP(divert_packet.raw.tobytes())) - #print(packet.show2()) self.logger.debug("Received outbound packet %s", str(packet)) # Record this packet for a .pcap later @@ -203,6 +198,257 @@ def handle_inbound_packet(self, divert_packet): # Accept the modified packet self.mysend(packets[0], Direction.INBOUND) +class LinuxEngine(Engine): + def __init__(self, server_port, string_strategy, environment_id=None, output_directory="trials", log_level="info"): + + super().__init__(server_port, string_strategy, environment_id=environment_id, output_directory=output_directory, log_level=log_level) + + # Setup variables used by the NFQueue system + self.out_nfqueue_started = False + self.in_nfqueue_started = False + self.running_nfqueue = False + self.out_nfqueue = None + self.in_nfqueue = None + self.out_nfqueue_socket = None + self.in_nfqueue_socket = None + self.out_nfqueue_thread = None + self.in_nfqueue_thread = None + + # Specifically define an L3Socket to send our packets. This is an optimization + # for scapy to send packets more quickly than using just send(), as under the hood + # send() creates and then destroys a socket each time, imparting a large amount + # of overhead. + self.socket = conf.L3socket(iface=actions.utils.get_interface()) + + def __enter__(self): + """ + Allows the engine to be used as a context manager; simply launches the + engine. + """ + self.initialize_nfqueue() + return self + + def __exit__(self, exc_type, exc_value, tb): + """ + Allows the engine to be used as a context manager; simply stops the engine + """ + self.shutdown_nfqueue() + + def mysend(self, packet): + """ + Helper scapy sending method. Expects a Geneva Packet input. + """ + try: + self.logger.debug("Sending packet %s", str(packet)) + self.socket.send(packet.packet) + except Exception: + self.logger.exception("Error in engine mysend.") + + def delayed_send(self, packet, delay): + """ + Method to be started by a thread to delay the sending of a packet without blocking the main thread. + """ + self.logger.debug("Sleeping for %f seconds." % delay) + time.sleep(delay) + self.mysend(packet) + + def run_nfqueue(self, nfqueue, nfqueue_socket, direction): + """ + Handles running the outbound nfqueue socket with the socket timeout. + """ + try: + while self.running_nfqueue: + try: + if direction == "out": + self.out_nfqueue_started = True + else: + self.in_nfqueue_started = True + + nfqueue.run_socket(nfqueue_socket) + except socket.timeout: + pass + except Exception: + self.logger.exception("Exception out of run_nfqueue() (direction=%s)", direction) + + def configure_iptables(self, remove=False): + """ + Handles setting up ipables for this run + """ + self.logger.debug("Configuring iptables rules") + + port1, port2 = "dport", "sport" + + out_chain = "OUTPUT" + in_chain = "INPUT" + + # Switch whether the command should add or delete the rules + add_or_remove = "A" + if remove: + add_or_remove = "D" + cmds = [] + for proto in ["tcp", "udp"]: + cmds += ["iptables -%s %s -p %s --%s %d -j NFQUEUE --queue-num 1" % + (add_or_remove, out_chain, proto, port1, self.server_port), + "iptables -%s %s -p %s --%s %d -j NFQUEUE --queue-num 2" % + (add_or_remove, in_chain, proto, port2, self.server_port)] + + for cmd in cmds: + self.logger.debug(cmd) + # If we're logging at DEBUG mode, keep stderr/stdout piped to us + # Otherwise, pipe them both to DEVNULL + if actions.utils.get_console_log_level() == logging.DEBUG: + subprocess.check_call(cmd.split(), timeout=60) + else: + subprocess.check_call(cmd.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL, timeout=60) + return cmds + + def initialize_nfqueue(self): + """ + Initializes the nfqueue for input and output forests. + """ + self.logger.debug("Engine created with strategy %s (ID %s) to port %s", + str(self.strategy).strip(), self.environment_id, self.server_port) + self.configure_iptables() + + self.out_nfqueue_started = False + self.in_nfqueue_started = False + self.running_nfqueue = True + # Create our NFQueues + self.out_nfqueue = netfilterqueue.NetfilterQueue() + self.in_nfqueue = netfilterqueue.NetfilterQueue() + # Bind them + self.out_nfqueue.bind(1, self.out_callback) + self.in_nfqueue.bind(2, self.in_callback) + # Create our nfqueue sockets to allow for non-blocking usage + self.out_nfqueue_socket = socket.fromfd(self.out_nfqueue.get_fd(), + socket.AF_UNIX, + socket.SOCK_STREAM) + self.in_nfqueue_socket = socket.fromfd(self.in_nfqueue.get_fd(), + socket.AF_UNIX, + socket.SOCK_STREAM) + # Create our handling threads for packets + self.out_nfqueue_thread = threading.Thread(target=self.run_nfqueue, + args=(self.out_nfqueue, self.out_nfqueue_socket, "out")) + + self.in_nfqueue_thread = threading.Thread(target=self.run_nfqueue, + args=(self.in_nfqueue, self.in_nfqueue_socket, "in")) + # Start each thread + self.in_nfqueue_thread.start() + self.out_nfqueue_thread.start() + + maxwait = 100 # 100 time steps of 0.01 seconds for a max wait of 10 seconds + i = 0 + # Give NFQueue time to startup, since it's running in background threads + # Block the main thread until this is done + while (not self.in_nfqueue_started or not self.out_nfqueue_started) and i < maxwait: + time.sleep(0.1) + i += 1 + self.logger.debug("NFQueue Initialized after %d", int(i)) + + def shutdown_nfqueue(self): + """ + Shutdown nfqueue. + """ + self.logger.debug("Shutting down NFQueue") + self.out_nfqueue_started = False + self.in_nfqueue_started = False + self.running_nfqueue = False + # Give the handlers two seconds to leave the callbacks before we forcibly unbind + # the queues. + time.sleep(2) + if self.in_nfqueue: + self.in_nfqueue.unbind() + if self.out_nfqueue: + self.out_nfqueue.unbind() + self.configure_iptables(remove=True) + + packets_path = os.path.join(BASEPATH, + self.output_directory, + "packets", + "original_%s.pcap" % self.environment_id) + + # Write to disk the original packets we captured + wrpcap(packets_path, [p.packet for p in self.seen_packets]) + + # If the engine exits before it initializes for any reason, these threads may not be set + # Only join them if they are defined + if self.out_nfqueue_thread: + self.out_nfqueue_thread.join() + if self.in_nfqueue_thread: + self.in_nfqueue_thread.join() + + # Shutdown the logger + actions.utils.close_logger(self.logger) + + def out_callback(self, nfpacket): + """ + Callback bound to the outgoing nfqueue rule to run the outbound strategy. + """ + if not self.running_nfqueue: + return + + packet = actions.packet.Packet(IP(nfpacket.get_payload())) + self.logger.debug("Received outbound packet %s", str(packet)) + + # Record this packet for a .pacp later + self.seen_packets.append(packet) + + # Drop the packet in NFQueue so the strategy can handle it + nfpacket.drop() + + self.handle_packet(packet) + + def handle_packet(self, packet): + """ + Handles processing an outbound packet through the engine. + """ + packets_to_send = self.strategy.act_on_packet(packet, self.logger, direction="out") + + # Send all of the packets we've collected to send + for out_packet in packets_to_send: + # If the strategy requested us to sleep before sending this packet, do so here + if out_packet.sleep: + # We can't block the main sending thread, so instead spin off a new thread to handle sleeping + threading.Thread(target=self.delayed_send, args=(out_packet, out_packet.sleep)).start() + else: + self.mysend(out_packet) + + def in_callback(self, nfpacket): + """ + Callback bound to the incoming nfqueue rule. Since we can't + manually send packets to ourself, process the given packet here. + """ + if not self.running_nfqueue: + return + packet = actions.packet.Packet(IP(nfpacket.get_payload())) + + self.seen_packets.append(packet) + + self.logger.debug("Received packet: %s", str(packet)) + + # Run the given strategy + packets = self.strategy.act_on_packet(packet, self.logger, direction="in") + + # GFW will send RA packets to disrupt a TCP stream + if packet.haslayer("TCP") and packet.get("TCP", "flags") == "RA": + self.logger.debug("Detected GFW censorship - strategy failed.") + self.censorship_detected = True + + # Branching is disabled for the in direction, so we can only ever get + # back 1 or 0 packets. If zero, drop the packet. + if not packets: + nfpacket.drop() + return + + # Otherwise, overwrite this packet with the packet the action trees gave back + nfpacket.set_payload(bytes(packets[0])) + + # If the strategy requested us to sleep before accepting on this packet, do so here + if packets[0].sleep: + time.sleep(packets[0].sleep) + + # Accept the modified packet + nfpacket.accept() def get_args(): """ @@ -226,16 +472,27 @@ def main(args): Kicks off the engine with the given arguments. """ try: - eng = Engine(args["server_port"], - args["strategy"], - environment_id=args.get("environment_id"), - output_directory = args.get("output_directory"), - log_level=args["log"]) - eng.initialize_divert() + if WINDOWS: + eng = WindowsEngine(args["server_port"], + args["strategy"], + environment_id=args.get("environment_id"), + output_directory = args.get("output_directory"), + log_level=args["log"]) + eng.initialize_divert() + else: + eng = LinuxEngine(args["server_port"], + args["strategy"], + environment_id=args.get("environment_id"), + output_directory = args.get("output_directory"), + log_level=args["log"]) + eng.initialize_nfqueue() while True: time.sleep(0.5) finally: - eng.shutdown_divert() + if WINDOWS: + eng.shutdown_divert() + else: + eng.shutdown_nfqueue() if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index 3f2872a..0840c7a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ scapy==2.4.3 requests netifaces +netfilterqueue cryptography==2.5 requests -anytree -pydivert \ No newline at end of file +anytree \ No newline at end of file diff --git a/requirements_linux.txt b/requirements_linux.txt new file mode 100644 index 0000000..0840c7a --- /dev/null +++ b/requirements_linux.txt @@ -0,0 +1,7 @@ +scapy==2.4.3 +requests +netifaces +netfilterqueue +cryptography==2.5 +requests +anytree \ No newline at end of file diff --git a/requirements_windows.txt b/requirements_windows.txt new file mode 100644 index 0000000..3f2872a --- /dev/null +++ b/requirements_windows.txt @@ -0,0 +1,7 @@ +scapy==2.4.3 +requests +netifaces +cryptography==2.5 +requests +anytree +pydivert \ No newline at end of file From b85fd72be04896ebd4982f21ba5ba02de605580d Mon Sep 17 00:00:00 2001 From: George Hughey Date: Wed, 4 Dec 2019 00:20:04 -0800 Subject: [PATCH 03/15] Readded import --- engine.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/engine.py b/engine.py index 5c29029..d3314c4 100644 --- a/engine.py +++ b/engine.py @@ -34,6 +34,8 @@ if WINDOWS: import pydivert from pydivert.consts import Direction +else: + import netfilterqueue class Engine: def __init__(self, server_port, string_strategy, environment_id=None, output_directory="trials", log_level="info"): From 97f398d3b60659c0cc8a8055576425ec85edcfb8 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Mon, 9 Dec 2019 19:21:51 -0800 Subject: [PATCH 04/15] Removed requirements, update readme --- README.md | 14 +++++++++----- requirements.txt | 7 ------- 2 files changed, 9 insertions(+), 12 deletions(-) delete mode 100644 requirements.txt diff --git a/README.md b/README.md index e2e2804..e6a7302 100644 --- a/README.md +++ b/README.md @@ -8,17 +8,21 @@ This code release specifically contains the strategy engine used by Geneva, its ## Setup -Geneva has been developed and tested for Centos or Debian-based systems. Due to limitations of -netfilter and raw sockets, Geneva does not work on OS X or Windows at this time and requires *python3.6* (with more versions coming soon). +Geneva has been developed and tested for Centos or Debian-based systems. Windows support is currently in beta and requires more testing, but is available in this repository. Due to limitations of netfilter and raw sockets, Geneva does not work on OS X at this time and requires *python3.6* on Linux (with more versions coming soon). -Install netfilterqueue dependencies: +Install netfilterqueue dependencies (Linux): ``` # sudo apt-get install build-essential python-dev libnetfilter-queue-dev libffi-dev libssl-dev iptables python3-pip ``` -Install Python dependencies: +Install Python dependencies (Linux): ``` -# python3 -m pip install -r requirements.txt +# python3 -m pip install -r requirements_linux.txt +``` + +Install Python dependencies (Windows): +``` +# python3 -m pip install -r requirements_windows.txt ``` ## Running it diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 0840c7a..0000000 --- a/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -scapy==2.4.3 -requests -netifaces -netfilterqueue -cryptography==2.5 -requests -anytree \ No newline at end of file From 556f94380d62f6711c5d8f9e8ceedafce32765c5 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Mon, 9 Dec 2019 23:36:56 -0800 Subject: [PATCH 05/15] Small cleanup --- engine.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/engine.py b/engine.py index d3314c4..582bc61 100644 --- a/engine.py +++ b/engine.py @@ -123,8 +123,6 @@ def run_divert(self): # Send to inbound action tree, if any self.handle_inbound_packet(packet) - return - def __enter__(self): """ Allows the engine to be used as a context manager; simply launches the @@ -138,7 +136,6 @@ def __exit__(self, exc_type, exc_value, tb): Allows the engine to be used as a context manager; simply stops the engine """ self.shutdown_divert() - return def mysend(self, packet, dir): """ @@ -148,7 +145,7 @@ def mysend(self, packet, dir): self.logger.debug("Sending packet %s", str(packet)) # Convert the packet to a bytearray so memoryview can edit the underlying memory pack = bytearray(bytes(packet.packet)) - # Don't recalculate checksum since sometimes we want to change it + # Don't recalculate checksum since sometimes we will have already changed it self.divert.send(pydivert.Packet(memoryview(pack), self.interface, dir), recalculate_checksum=False) except Exception: self.logger.exception("Error in engine mysend.") From 5c4a4ea08ddb865b72e55ce345cca637681b34b4 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Thu, 12 Dec 2019 16:44:24 -0800 Subject: [PATCH 06/15] Add strategy num --- engine.py | 14 ++++++++++++-- library.py | 26 ++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 library.py diff --git a/engine.py b/engine.py index 582bc61..71ca84f 100644 --- a/engine.py +++ b/engine.py @@ -18,6 +18,8 @@ from scapy.config import conf from scapy.all import send, Raw +from library import LIBRARY + socket.setdefaulttimeout(1) import actions.packet @@ -457,6 +459,7 @@ def get_args(): parser.add_argument('--server-port', type=int, action='store', required=True) parser.add_argument('--environment-id', action='store', help="ID of the current strategy under test. If not provided, one will be generated.") parser.add_argument('--strategy', action='store', help="Strategy to deploy") + parser.add_argument('--strategy-index', action='store', help="Strategy to deploy, specified by index in the library") parser.add_argument('--output-directory', default="trials", action='store', help="Where to output logs, captures, and results. Defaults to trials/.") parser.add_argument('--log', action='store', default="debug", choices=("debug", "info", "warning", "critical", "error"), @@ -471,16 +474,23 @@ def main(args): Kicks off the engine with the given arguments. """ try: + if args["strategy"]: + strategy = args["strategy"] + elif args["strategy-index"]: + strategy = LIBRARY[int(args["strategy-index"])][0] + else: + # Default to first strategy + strategy = LIBRARY[0][0] if WINDOWS: eng = WindowsEngine(args["server_port"], - args["strategy"], + strategy, environment_id=args.get("environment_id"), output_directory = args.get("output_directory"), log_level=args["log"]) eng.initialize_divert() else: eng = LinuxEngine(args["server_port"], - args["strategy"], + strategy, environment_id=args.get("environment_id"), output_directory = args.get("output_directory"), log_level=args["log"]) diff --git a/library.py b/library.py new file mode 100644 index 0000000..f4dab66 --- /dev/null +++ b/library.py @@ -0,0 +1,26 @@ +LIBRARY = [ + ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{TCP:chksum:corrupt},),)-|", 98%, 100%, 0%), + ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{IP:ttl:replace:10},),)-|", 98%, 100%, 0%), + ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{TCP:ack:corrupt},),)-|", 94%, 100%, 0%), + ("[TCP:flags:PA]-duplicate(tamper{TCP:options-wscale:corrupt}(tamper{TCP:dataofs:replace:8},),)-|", 98%, 100%, 0%), + ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{TCP:chksum:corrupt},),)-|", 80%, 100%, 0%), + ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{IP:ttl:replace:8},),)-|", 98%, 100%, 0%), + ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{TCP:ack:corrupt},),)-|", 87%, 100%, 0%), + ("[TCP:flags:S]-duplicate(,tamper{TCP:load:corrupt})-|", 3%, 100%, 0%), + ("[TCP:flags:PA]-duplicate(tamper{IP:len:replace:64},)-|", 3%, 0%, 100%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:R}(tamper{TCP:chksum:corrupt},))-|", 95%, 0%, 0%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:R}(tamper{IP:ttl:replace:10},))-|", 87%, 0%, 0%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:options-md5header:corrupt}(tamper{TCP:flags:replace:R},))-|", 86%, 0%, 0%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:RA}(tamper{TCP:chksum:corrupt},))-|", 80%, 0%, 0%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:RA}(tamper{IP:ttl:replace:10},))-|", 94%, 0%, 0%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:options-md5header:corrupt}(tamper{TCP:flags:replace:R},))-|", 94%, 0%, 0%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FRAPUEN}(tamper{TCP:chksum:corrupt},))-|", 89%, 0%, 0%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FREACN}(tamper{IP:ttl:replace:10},))-|", 96%, 0%, 0%), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FRAPUN}(tamper{TCP:options-md5header:corrupt},))-|", 94%, 0%, 0%), + ("[TCP:flags:PA]-fragment{tcp:8:False}-| [TCP:flags:A]-tamper{TCP:seq:corrupt}-|", 94%, 100%, 100%), + ("[TCP:flags:PA]-fragment{tcp:8:True}(,fragment{tcp:4:True})-|", 98%, 100%, 100%), + ("[TCP:flags:PA]-fragment{tcp:-1:True}-|", 3%, 100%, 100%), + ("[TCP:flags:PA]-duplicate(tamper{TCP:flags:replace:F}(tamper{IP:len:replace:78},),)-|", 53%, 0%, 100%), + ("[TCP:flags:S]-duplicate(tamper{TCP:flags:replace:SA},)-|", 3%, 100%, 0%), + ("[TCP:flags:PA]-tamper{TCP:options-uto:corrupt}-|", 3%, 0%, 100%) +] \ No newline at end of file From 4f1f70e0b3718a41dedf279d66d106155d463be0 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Thu, 12 Dec 2019 18:30:58 -0800 Subject: [PATCH 07/15] Made Engine more user friendly with ABC --- engine.py | 125 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 56 deletions(-) diff --git a/engine.py b/engine.py index 71ca84f..2fa34af 100644 --- a/engine.py +++ b/engine.py @@ -39,8 +39,31 @@ else: import netfilterqueue -class Engine: +from abc import ABC, abstractmethod + +def Engine(server_port, string_strategy, environment_id=None, output_directory="trials", log_level="info"): + # Factory function to dynamically choose which engine to use. + # Users should initialize an Engine using this. + if WINDOWS: + eng = WindowsEngine(server_port, + string_strategy, + environment_id=environment_id, + output_directory=output_directory, + log_level=log_level) + else: + eng = LinuxEngine(server_port, + string_strategy, + environment_id=environment_id, + output_directory=output_directory, + log_level=log_level) + + return eng + +class GenericEngine(ABC): + # Abstract Base Class defining an engine. + # Users should follow the contract laid out here to create custom engines. def __init__(self, server_port, string_strategy, environment_id=None, output_directory="trials", log_level="info"): + # Do common setup self.server_port = server_port self.seen_packets = [] # Set up the directory and ID for logging @@ -53,18 +76,42 @@ def __init__(self, server_port, string_strategy, environment_id=None, output_dir self.environment_id = environment_id # Set up a logger self.logger = actions.utils.get_logger(BASEPATH, - output_directory, - __name__, - "engine", - environment_id, - log_level=log_level) + output_directory, + __name__, + "engine", + environment_id, + log_level=log_level) self.output_directory = output_directory # Used for conditional context manager usage self.strategy = actions.utils.parse(string_strategy, self.logger) self.censorship_detected = False -class WindowsEngine(Engine): + @abstractmethod + def initialize(self): + # Initialize the Engine. Users should call this directly. + pass + + @abstractmethod + def shutdown(self): + # Clean up the Engine. Users should call this directly. + pass + + def __enter__(self): + """ + Allows the engine to be used as a context manager; simply launches the + engine. + """ + self.initialize() + return self + + def __exit__(self, exc_type, exc_value, tb): + """ + Allows the engine to be used as a context manager; simply stops the engine + """ + self.shutdown() + +class WindowsEngine(GenericEngine): def __init__(self, server_port, string_strategy, environment_id=None, output_directory="trials", log_level="info"): super().__init__(server_port, string_strategy, environment_id=environment_id, output_directory=output_directory, log_level=log_level) # Instantialize a PyDivert channel, which we will use to redirect packets @@ -73,7 +120,7 @@ def __init__(self, server_port, string_strategy, environment_id=None, output_dir self.divert_thread_started = False self.interface = None # Using lazy evaluating as divert should know this - def initialize_divert(self): + def initialize(self): """ Initializes Divert such that all packets for the connection will come through us """ @@ -99,7 +146,7 @@ def initialize_divert(self): return - def shutdown_divert(self): + def shutdown(self): """ Closes the divert connection """ @@ -124,21 +171,7 @@ def run_divert(self): elif packet.is_inbound: # Send to inbound action tree, if any self.handle_inbound_packet(packet) - - def __enter__(self): - """ - Allows the engine to be used as a context manager; simply launches the - engine. - """ - self.initialize_divert() - return self - - def __exit__(self, exc_type, exc_value, tb): - """ - Allows the engine to be used as a context manager; simply stops the engine - """ - self.shutdown_divert() - + def mysend(self, packet, dir): """ Helper scapy sending method. Expects a Geneva Packet input. @@ -199,11 +232,9 @@ def handle_inbound_packet(self, divert_packet): # Accept the modified packet self.mysend(packets[0], Direction.INBOUND) -class LinuxEngine(Engine): +class LinuxEngine(GenericEngine): def __init__(self, server_port, string_strategy, environment_id=None, output_directory="trials", log_level="info"): - super().__init__(server_port, string_strategy, environment_id=environment_id, output_directory=output_directory, log_level=log_level) - # Setup variables used by the NFQueue system self.out_nfqueue_started = False self.in_nfqueue_started = False @@ -221,19 +252,10 @@ def __init__(self, server_port, string_strategy, environment_id=None, output_dir # of overhead. self.socket = conf.L3socket(iface=actions.utils.get_interface()) - def __enter__(self): - """ - Allows the engine to be used as a context manager; simply launches the - engine. - """ - self.initialize_nfqueue() - return self - - def __exit__(self, exc_type, exc_value, tb): """ Allows the engine to be used as a context manager; simply stops the engine """ - self.shutdown_nfqueue() + self.shutdown() def mysend(self, packet): """ @@ -303,7 +325,7 @@ def configure_iptables(self, remove=False): subprocess.check_call(cmd.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL, timeout=60) return cmds - def initialize_nfqueue(self): + def initialize(self): """ Initializes the nfqueue for input and output forests. """ @@ -346,7 +368,7 @@ def initialize_nfqueue(self): i += 1 self.logger.debug("NFQueue Initialized after %d", int(i)) - def shutdown_nfqueue(self): + def shutdown(self): """ Shutdown nfqueue. """ @@ -473,36 +495,27 @@ def main(args): """ Kicks off the engine with the given arguments. """ + try: if args["strategy"]: strategy = args["strategy"] - elif args["strategy-index"]: - strategy = LIBRARY[int(args["strategy-index"])][0] + elif args["strategy_index"]: + strategy = LIBRARY[int(args["strategy_index"])][0] else: # Default to first strategy strategy = LIBRARY[0][0] - if WINDOWS: - eng = WindowsEngine(args["server_port"], + eng = Engine(args["server_port"], strategy, environment_id=args.get("environment_id"), output_directory = args.get("output_directory"), log_level=args["log"]) - eng.initialize_divert() - else: - eng = LinuxEngine(args["server_port"], - strategy, - environment_id=args.get("environment_id"), - output_directory = args.get("output_directory"), - log_level=args["log"]) - eng.initialize_nfqueue() + eng.initialize() while True: time.sleep(0.5) + except Exception as e: + print(e) finally: - if WINDOWS: - eng.shutdown_divert() - else: - eng.shutdown_nfqueue() - + eng.shutdown() if __name__ == "__main__": main(vars(get_args())) From abd3cdc2f526de3b2edbf49e3f5ccb9e3a372634 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Thu, 12 Dec 2019 18:33:20 -0800 Subject: [PATCH 08/15] Fixed library --- library.py | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/library.py b/library.py index f4dab66..b95d6c3 100644 --- a/library.py +++ b/library.py @@ -1,26 +1,26 @@ LIBRARY = [ - ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{TCP:chksum:corrupt},),)-|", 98%, 100%, 0%), - ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{IP:ttl:replace:10},),)-|", 98%, 100%, 0%), - ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{TCP:ack:corrupt},),)-|", 94%, 100%, 0%), - ("[TCP:flags:PA]-duplicate(tamper{TCP:options-wscale:corrupt}(tamper{TCP:dataofs:replace:8},),)-|", 98%, 100%, 0%), - ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{TCP:chksum:corrupt},),)-|", 80%, 100%, 0%), - ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{IP:ttl:replace:8},),)-|", 98%, 100%, 0%), - ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{TCP:ack:corrupt},),)-|", 87%, 100%, 0%), - ("[TCP:flags:S]-duplicate(,tamper{TCP:load:corrupt})-|", 3%, 100%, 0%), - ("[TCP:flags:PA]-duplicate(tamper{IP:len:replace:64},)-|", 3%, 0%, 100%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:R}(tamper{TCP:chksum:corrupt},))-|", 95%, 0%, 0%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:R}(tamper{IP:ttl:replace:10},))-|", 87%, 0%, 0%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:options-md5header:corrupt}(tamper{TCP:flags:replace:R},))-|", 86%, 0%, 0%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:RA}(tamper{TCP:chksum:corrupt},))-|", 80%, 0%, 0%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:RA}(tamper{IP:ttl:replace:10},))-|", 94%, 0%, 0%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:options-md5header:corrupt}(tamper{TCP:flags:replace:R},))-|", 94%, 0%, 0%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FRAPUEN}(tamper{TCP:chksum:corrupt},))-|", 89%, 0%, 0%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FREACN}(tamper{IP:ttl:replace:10},))-|", 96%, 0%, 0%), - ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FRAPUN}(tamper{TCP:options-md5header:corrupt},))-|", 94%, 0%, 0%), - ("[TCP:flags:PA]-fragment{tcp:8:False}-| [TCP:flags:A]-tamper{TCP:seq:corrupt}-|", 94%, 100%, 100%), - ("[TCP:flags:PA]-fragment{tcp:8:True}(,fragment{tcp:4:True})-|", 98%, 100%, 100%), - ("[TCP:flags:PA]-fragment{tcp:-1:True}-|", 3%, 100%, 100%), - ("[TCP:flags:PA]-duplicate(tamper{TCP:flags:replace:F}(tamper{IP:len:replace:78},),)-|", 53%, 0%, 100%), - ("[TCP:flags:S]-duplicate(tamper{TCP:flags:replace:SA},)-|", 3%, 100%, 0%), - ("[TCP:flags:PA]-tamper{TCP:options-uto:corrupt}-|", 3%, 0%, 100%) + ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{TCP:chksum:corrupt},),)-|", 98, 100, 0), + ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{IP:ttl:replace:10},),)-|", 98, 100, 0), + ("[TCP:flags:PA]-duplicate(tamper{TCP:dataofs:replace:10}(tamper{TCP:ack:corrupt},),)-|", 94, 100, 0), + ("[TCP:flags:PA]-duplicate(tamper{TCP:options-wscale:corrupt}(tamper{TCP:dataofs:replace:8},),)-|", 98, 100, 0), + ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{TCP:chksum:corrupt},),)-|", 80, 100, 0), + ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{IP:ttl:replace:8},),)-|", 98, 100, 0), + ("[TCP:flags:PA]-duplicate(tamper{TCP:load:corrupt}(tamper{TCP:ack:corrupt},),)-|", 87, 100, 0), + ("[TCP:flags:S]-duplicate(,tamper{TCP:load:corrupt})-|", 3, 100, 0), + ("[TCP:flags:PA]-duplicate(tamper{IP:len:replace:64},)-|", 3, 0, 100), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:R}(tamper{TCP:chksum:corrupt},))-|", 95, 0, 0), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:R}(tamper{IP:ttl:replace:10},))-|", 87, 0, 0), + ("[TCP:flags:A]-duplicate(,tamper{TCP:options-md5header:corrupt}(tamper{TCP:flags:replace:R},))-|", 86, 0, 0), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:RA}(tamper{TCP:chksum:corrupt},))-|", 80, 0, 0), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:RA}(tamper{IP:ttl:replace:10},))-|", 94, 0, 0), + ("[TCP:flags:A]-duplicate(,tamper{TCP:options-md5header:corrupt}(tamper{TCP:flags:replace:R},))-|", 94, 0, 0), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FRAPUEN}(tamper{TCP:chksum:corrupt},))-|", 89, 0, 0), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FREACN}(tamper{IP:ttl:replace:10},))-|", 96, 0, 0), + ("[TCP:flags:A]-duplicate(,tamper{TCP:flags:replace:FRAPUN}(tamper{TCP:options-md5header:corrupt},))-|", 94, 0, 0), + ("[TCP:flags:PA]-fragment{tcp:8:False}-| [TCP:flags:A]-tamper{TCP:seq:corrupt}-|", 94, 100, 100), + ("[TCP:flags:PA]-fragment{tcp:8:True}(,fragment{tcp:4:True})-|", 98, 100, 100), + ("[TCP:flags:PA]-fragment{tcp:-1:True}-|", 3, 100, 100), + ("[TCP:flags:PA]-duplicate(tamper{TCP:flags:replace:F}(tamper{IP:len:replace:78},),)-|", 53, 0, 100), + ("[TCP:flags:S]-duplicate(tamper{TCP:flags:replace:SA},)-|", 3, 100, 0), + ("[TCP:flags:PA]-tamper{TCP:options-uto:corrupt}-|", 3, 0, 100) ] \ No newline at end of file From 7b983670651d961fbdfcd7451a12ea132fe7c35d Mon Sep 17 00:00:00 2001 From: George Hughey Date: Thu, 12 Dec 2019 21:57:09 -0800 Subject: [PATCH 09/15] Fixed whitespace --- engine.py | 1 - 1 file changed, 1 deletion(-) diff --git a/engine.py b/engine.py index 2fa34af..d3b5879 100644 --- a/engine.py +++ b/engine.py @@ -490,7 +490,6 @@ def get_args(): args = parser.parse_args() return args - def main(args): """ Kicks off the engine with the given arguments. From c8250db14338c3a54e054d7a1c5635617e7c76f4 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Sat, 14 Dec 2019 00:37:24 -0800 Subject: [PATCH 10/15] Initial commit of overlapping segments --- actions/fragment.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/actions/fragment.py b/actions/fragment.py index e16a94e..e26cfdf 100644 --- a/actions/fragment.py +++ b/actions/fragment.py @@ -6,7 +6,7 @@ class FragmentAction(Action): - def __init__(self, environment_id=None, correct_order=None, fragsize=-1, segment=True): + def __init__(self, environment_id=None, correct_order=None, fragsize=-1, segment=True, overlap=0): ''' correct_order specifies if the fragmented packets should come in the correct order fragsize specifies how @@ -17,6 +17,7 @@ def __init__(self, environment_id=None, correct_order=None, fragsize=-1, segment self.terminal = False self.fragsize = fragsize self.segment = segment + self.overlap = overlap if correct_order == None: self.correct_order = self.get_rand_order() @@ -87,6 +88,9 @@ def tcp_segment(self, packet, logger): Segments a packet into two, given the size of the first packet (0:fragsize) Always returns two packets, since fragment is a branching action, so if we are unable to segment, it will duplicate the packet. + + If overlap is specified, it will select n bytes from the second packet + and append them to the first, and increment the sequence number accordingly """ if not packet.haslayer("TCP") or not hasattr(packet["TCP"], "load") or not packet["TCP"].load: return packet, packet.copy() # duplicate if no TCP or no payload to segment @@ -101,7 +105,11 @@ def tcp_segment(self, packet, logger): fragsize = int(len(payload)/2) # Craft new packets - pkt1 = IP(packet["IP"])/payload[:fragsize] + + # Make sure we don't go out of bounds by choosing the min + overlapBytes = min(payload[fragsize:], overlap) + # Attach these bytes to the first packet + pkt1 = IP(packet["IP"])/payload[:fragsize + overlapBytes] pkt2 = IP(packet["IP"])/payload[fragsize:] # We cannot rely on scapy's native parsing here - if a previous action has changed the From 61acc0970da8028f22204d1379190efa125c492d Mon Sep 17 00:00:00 2001 From: George Hughey Date: Sun, 15 Dec 2019 17:21:17 -0800 Subject: [PATCH 11/15] Add overlapping segmentation --- actions/fragment.py | 37 ++++++++++++++++----- tests/test_fragment.py | 74 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 9 deletions(-) diff --git a/actions/fragment.py b/actions/fragment.py index a0701c8..d47f37b 100644 --- a/actions/fragment.py +++ b/actions/fragment.py @@ -107,7 +107,7 @@ def tcp_segment(self, packet, logger): # Craft new packets # Make sure we don't go out of bounds by choosing the min - overlapBytes = min(payload[fragsize:], overlap) + overlapBytes = min(len(payload[fragsize:]), self.overlap) # Attach these bytes to the first packet pkt1 = IP(packet["IP"])/payload[:fragsize + overlapBytes] pkt2 = IP(packet["IP"])/payload[fragsize:] @@ -155,10 +155,15 @@ def __str__(self): Returns a string representation with the fragsize """ s = Action.__str__(self) + if self.overlap == 0: + ending = "}" + else: + ending = ":" + str(self.overlap) + "}" + if self.segment: - s += "{" + "tcp" + ":" + str(self.fragsize) + ":" + str(self.correct_order) + "}" + s += "{" + "tcp" + ":" + str(self.fragsize) + ":" + str(self.correct_order) + ending else: - s += "{" + "ip" + ":"+ str(self.fragsize) + ":" + str(self.correct_order) + "}" + s += "{" + "ip" + ":"+ str(self.fragsize) + ":" + str(self.correct_order) + ending return s def parse(self, string, logger): @@ -177,22 +182,36 @@ def parse(self, string, logger): num_parameters = string.count(":") # If num_parameters is greater than 2, it's not a valid fragment action - if num_parameters != 2: - msg = "Cannot parse fragment action %s" % string - logger.error(msg) - raise Exception(msg) - else: + if num_parameters == 2: params = string.split(":") seg, fragsize, correct_order = params + overlap = 0 + if "tcp" in seg: + self.segment = True + else: + self.segment = False + + elif num_parameters == 3: + params = string.split(":") + seg, fragsize, correct_order, overlap = params + if overlap.endswith("}"): + overlap = overlap[:-1] # Chop off trailing } if "tcp" in seg: self.segment = True else: self.segment = False + + else: + msg = "Cannot parse fragment action %s" % string + logger.error(msg) + raise Exception(msg) try: # Try to convert to int self.fragsize = int(fragsize) - except ValueError: + self.overlap = int(overlap) + except ValueError as e: + print(e) msg = "Cannot parse fragment action %s" % string logger.error(msg) raise Exception(msg) diff --git a/tests/test_fragment.py b/tests/test_fragment.py index 3e7bcee..5beaec3 100644 --- a/tests/test_fragment.py +++ b/tests/test_fragment.py @@ -217,4 +217,78 @@ def test_ip_only_fragment(): assert packet1["Raw"].load == b'datadata', "Left packet incorrectly fragmented" assert packet2["Raw"].load == b"11datadata", "Right packet incorrectly fragmented" +def test_overlapping_segment(): + """ + Basic test for overlapping segments. + """ + fragment = actions.fragment.FragmentAction(correct_order=True) + fragment.parse("fragment{tcp:-1:True:4}", logger) + + packet = actions.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(seq=100)/("datadata11datadata")) + packet1, packet2 = fragment.run(packet, logger) + + assert id(packet1) != id(packet2), "Duplicate aliased packet objects" + + assert packet1["Raw"].load != packet2["Raw"].load, "Packets were not different" + assert packet1["Raw"].load == b'datadata11dat', "Left packet incorrectly segmented" + assert packet2["Raw"].load == b"1datadata", "Right packet incorrectly fragmented" + + assert packet1["TCP"].seq == 100, "First packet sequence number incorrect" + assert packet2["TCP"].seq == 109, "Second packet sequence number incorrect" + +def test_overlapping_segment_no_overlap(): + """ + Basic test for overlapping segments with no overlap. (shouldn't ever actually happen) + """ + fragment = actions.fragment.FragmentAction(correct_order=True) + fragment.parse("fragment{tcp:-1:True:0}", logger) + + packet = actions.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(seq=100)/("datadata11datadata")) + packet1, packet2 = fragment.run(packet, logger) + + assert id(packet1) != id(packet2), "Duplicate aliased packet objects" + + assert packet1["Raw"].load != packet2["Raw"].load, "Packets were not different" + assert packet1["Raw"].load == b'datadata1', "Left packet incorrectly segmented" + assert packet2["Raw"].load == b"1datadata", "Right packet incorrectly fragmented" + + assert packet1["TCP"].seq == 100, "First packet sequence number incorrect" + assert packet2["TCP"].seq == 109, "Second packet sequence number incorrect" + +def test_overlapping_segment_entire_packet(): + """ + Basic test for overlapping segments overlapping entire packet. + """ + fragment = actions.fragment.FragmentAction(correct_order=True) + fragment.parse("fragment{tcp:-1:True:9}", logger) + + packet = actions.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(seq=100)/("datadata11datadata")) + packet1, packet2 = fragment.run(packet, logger) + + assert id(packet1) != id(packet2), "Duplicate aliased packet objects" + + assert packet1["Raw"].load != packet2["Raw"].load, "Packets were not different" + assert packet1["Raw"].load == b'datadata11datadata', "Left packet incorrectly segmented" + assert packet2["Raw"].load == b"1datadata", "Right packet incorrectly fragmented" + + assert packet1["TCP"].seq == 100, "First packet sequence number incorrect" + assert packet2["TCP"].seq == 109, "Second packet sequence number incorrect" + +def test_overlapping_segment_out_of_bounds(): + """ + Basic test for overlapping segments overlapping beyond the edge of the packet. + """ + fragment = actions.fragment.FragmentAction(correct_order=True) + fragment.parse("fragment{tcp:-1:True:20}", logger) + + packet = actions.packet.Packet(IP(src="127.0.0.1", dst="127.0.0.1")/TCP(seq=100)/("datadata11datadata")) + packet1, packet2 = fragment.run(packet, logger) + + assert id(packet1) != id(packet2), "Duplicate aliased packet objects" + + assert packet1["Raw"].load != packet2["Raw"].load, "Packets were not different" + assert packet1["Raw"].load == b'datadata11datadata', "Left packet incorrectly segmented" + assert packet2["Raw"].load == b"1datadata", "Right packet incorrectly fragmented" + assert packet1["TCP"].seq == 100, "First packet sequence number incorrect" + assert packet2["TCP"].seq == 109, "Second packet sequence number incorrect" \ No newline at end of file From 25014462f01e541f898646b58b9039e274fbb21d Mon Sep 17 00:00:00 2001 From: George Hughey Date: Sun, 15 Dec 2019 17:37:43 -0800 Subject: [PATCH 12/15] Add parsing test for overlapping segment --- tests/test_fragment.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/test_fragment.py b/tests/test_fragment.py index 5beaec3..03a1c64 100644 --- a/tests/test_fragment.py +++ b/tests/test_fragment.py @@ -291,4 +291,12 @@ def test_overlapping_segment_out_of_bounds(): assert packet2["Raw"].load == b"1datadata", "Right packet incorrectly fragmented" assert packet1["TCP"].seq == 100, "First packet sequence number incorrect" - assert packet2["TCP"].seq == 109, "Second packet sequence number incorrect" \ No newline at end of file + assert packet2["TCP"].seq == 109, "Second packet sequence number incorrect" + +def test_overlapping_segmentation_parse(): + """ + Basic test for parsing overlapping segments. + """ + + fragment = actions.fragment.FragmentAction(correct_order=False, fragsize=2, segment=True, overlap=3) + assert str(fragment) == "fragment{tcp:2:False:3}", "Fragment returned incorrect string representation: %s" % str(fragment) From 337f916920637eb392220ab58a543894c08a9de7 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Sun, 15 Dec 2019 17:40:42 -0800 Subject: [PATCH 13/15] Let travis test for Linux --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9e296ac..92f4803 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,7 +32,7 @@ install: # Copy in the sudoers file - sudo cp /tmp/sudoers.tmp /etc/sudoers # Now that sudo is good to go, finish installing dependencies - - sudo python3 -m pip install -r requirements.txt + - sudo python3 -m pip install -r requirements_linux.txt - sudo python3 -m pip install slackclient pytest-cov script: From 586f6c8174fe33d8b00831d8b63b4391b6eaa88e Mon Sep 17 00:00:00 2001 From: George Hughey Date: Fri, 7 Feb 2020 06:42:31 -0800 Subject: [PATCH 14/15] test --- actions/http.py | 602 ++++++++++++++++++++++++++++++++++++++++++++ actions/layer.py | 77 ++++++ actions/packet.py | 23 +- actions/strategy.py | 1 + actions/tamper.py | 197 +++++++++++++-- actions/utils.py | 3 + engine.py | 8 +- 7 files changed, 880 insertions(+), 31 deletions(-) create mode 100644 actions/http.py diff --git a/actions/http.py b/actions/http.py new file mode 100644 index 0000000..7a4da28 --- /dev/null +++ b/actions/http.py @@ -0,0 +1,602 @@ + +# This file gutted from scapy as the module there doesn't quite work there. + +# This file is part of Scapy +# See http://www.secdev.org/projects/scapy for more information +# Copyright (C) 2019 Gabriel Potter +# Copyright (C) 2012 Luca Invernizzi +# Copyright (C) 2012 Steeve Barbeau +# This file is a modified version of the former scapy_http plugin. +# It was reimplemented for scapy 2.4.3+ using sessions, stream handling. +# Original Authors : Steeve Barbeau, Luca Invernizzi +# Originally published under a GPLv2 license + +import os +import re +import struct +import subprocess + +from scapy.compat import plain_str, bytes_encode, \ + gzip_compress, gzip_decompress +from scapy.config import conf +from scapy.consts import WINDOWS +from scapy.error import warning +from scapy.fields import StrField +from scapy.packet import Packet, bind_layers, bind_bottom_up, Raw +from scapy.utils import get_temp_file, ContextManagerSubprocess + +from scapy.layers.inet import TCP, TCP_client + +from scapy.modules import six + +if "http" not in conf.contribs: + conf.contribs["http"] = {} + conf.contribs["http"]["auto_compression"] = True + +# https://en.wikipedia.org/wiki/List_of_HTTP_header_fields + +GENERAL_HEADERS = [ + "Cache-Control", + "Connection", + "Permanent", + "Content-Length", + "Content-MD5", + "Content-Type", + "Date", + "Keep-Alive", + "Pragma", + "Upgrade", + "Via", + "Warning" +] + +COMMON_UNSTANDARD_GENERAL_HEADERS = [ + "X-Request-ID", + "X-Correlation-ID" +] + +REQUEST_HEADERS = [ + "A-IM", + "Accept", + "Accept-Charset", + "Accept-Encoding", + "Accept-Language", + "Accept-Datetime", + "Access-Control-Request-Method", + "Access-Control-Request-Headers", + "Authorization", + "Cookie", + "Expect", + "Forwarded", + "From", + "Host", + "HTTP2-Settings", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Range", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Proxy-Authorization", + "Range", + "Referer", + "TE", + "User-Agent" +] + +COMMON_UNSTANDARD_REQUEST_HEADERS = [ + "Upgrade-Insecure-Requests", + "Upgrade-Insecure-Requests", + "X-Requested-With", + "DNT", + "X-Forwarded-For", + "X-Forwarded-Host", + "X-Forwarded-Proto", + "Front-End-Https", + "X-Http-Method-Override", + "X-ATT-DeviceId", + "X-Wap-Profile", + "Proxy-Connection", + "X-UIDH", + "X-Csrf-Token", + "Save-Data", +] + +RESPONSE_HEADERS = [ + "Access-Control-Allow-Origin", + "Access-Control-Allow-Credentials", + "Access-Control-Expose-Headers", + "Access-Control-Max-Age", + "Access-Control-Allow-Methods", + "Access-Control-Allow-Headers", + "Accept-Patch", + "Accept-Ranges", + "Age", + "Allow", + "Alt-Svc", + "Content-Disposition", + "Content-Encoding", + "Content-Language", + "Content-Location", + "Content-Range", + "Delta-Base", + "ETag", + "Expires", + "IM", + "Last-Modified", + "Link", + "Location", + "Permanent", + "P3P", + "Proxy-Authenticate", + "Public-Key-Pins", + "Retry-After", + "Server", + "Set-Cookie", + "Strict-Transport-Security", + "Trailer", + "Transfer-Encoding", + "Tk", + "Vary", + "WWW-Authenticate", + "X-Frame-Options", +] + +COMMON_UNSTANDARD_RESPONSE_HEADERS = [ + "Content-Security-Policy", + "X-Content-Security-Policy", + "X-WebKit-CSP", + "Refresh", + "Status", + "Timing-Allow-Origin", + "X-Content-Duration", + "X-Content-Type-Options", + "X-Powered-By", + "X-UA-Compatible", + "X-XSS-Protection", +] + +def _strip_header_name(name): + """Takes a header key (i.e., "Host" in "Host: www.google.com", + and returns a stripped representation of it + """ + return plain_str(name.strip()).replace("-", "_") + +def _header_line(name, val): + """Creates a HTTP header line""" + # Python 3.4 doesn't support % on bytes + return bytes_encode(name) + b": " + bytes_encode(val) + +def _parse_headers(s): + headers = s.split(b"\r\n") + headers_found = {} + for header_line in headers: + try: + key, value = header_line.split(b':', 1) + except ValueError: + continue + header_key = _strip_header_name(key).lower() + # headers_found[header_key] = (key, value.strip()) # The first big change occurs here, using the header_line instead of value.strip() + headers_found[header_key] = (key, header_line .strip()) + return headers_found + +def _parse_headers_and_body(s): + ''' Takes a HTTP packet, and returns a tuple containing: + _ the first line (e.g., "GET ...") + _ the headers in a dictionary + _ the body + ''' + crlfcrlf = b"\r\n\r\n" + crlfcrlfIndex = s.find(crlfcrlf) + if crlfcrlfIndex != -1: + headers = s[:crlfcrlfIndex + len(crlfcrlf)] + body = s[crlfcrlfIndex + len(crlfcrlf):] + else: + headers = s + body = b'' + first_line, headers = headers.split(b"\r\n", 1) + return first_line.strip(), _parse_headers(headers), body + +def _dissect_headers(obj, s): + """Takes a HTTP packet as the string s, and populates the scapy layer obj + (either HTTPResponse or HTTPRequest). Returns the first line of the + HTTP packet, and the body + """ + first_line, headers, body = _parse_headers_and_body(s) + for f in obj.fields_desc: + # We want to still parse wrongly capitalized fields + stripped_name = _strip_header_name(f.name).lower() + try: + _, value = headers.pop(stripped_name) + except KeyError: + continue + obj.setfieldval(f.name, value) + if headers: + headers = {key: value for key, value in six.itervalues(headers)} + obj.setfieldval('Unknown_Headers', headers) + return first_line, body + +class _HTTPContent(Packet): + # https://developer.mozilla.org/fr/docs/Web/HTTP/Headers/Transfer-Encoding + def _get_encodings(self): + encodings = [] + if isinstance(self, HTTPResponse): + if self.Transfer_Encoding: + encodings += [plain_str(x).strip().lower() for x in + plain_str(self.Transfer_Encoding).split(",")] + if self.Content_Encoding: + encodings += [plain_str(x).strip().lower() for x in + plain_str(self.Content_Encoding).split(",")] + return encodings + + def hashret(self): + # The only field both Answers and Responses have in common + return self.Http_Version + + def post_dissect(self, s): + if not conf.contribs["http"]["auto_compression"]: + return s + encodings = self._get_encodings() + # Un-chunkify + if "chunked" in encodings: + data = b"" + while s: + length, _, body = s.partition(b"\r\n") + try: + length = int(length, 16) + except ValueError: + # Not a valid chunk. Ignore + break + else: + load = body[:length] + if body[length:length + 2] != b"\r\n": + # Invalid chunk. Ignore + break + s = body[length + 2:] + data += load + if not s: + s = data + # Decompress + try: + if "deflate" in encodings: + import zlib + s = zlib.decompress(s) + elif "gzip" in encodings: + s = gzip_decompress(s) + elif "compress" in encodings: + import lzw + s = lzw.decompress(s) + except Exception: + # Cannot decompress - probably incomplete data + pass + return s + + def post_build(self, pkt, pay): + if not conf.contribs["http"]["auto_compression"]: + return pkt + pay + encodings = self._get_encodings() + # Compress + if "deflate" in encodings: + import zlib + pay = zlib.compress(pay) + elif "gzip" in encodings: + pay = gzip_compress(pay) + elif "compress" in encodings: + import lzw + pay = lzw.compress(pay) + return pkt + pay + + def self_build(self, field_pos_list=None): + ''' Takes an HTTPRequest or HTTPResponse object, and creates its + string representation.''' + if not isinstance(self.underlayer, HTTP): + warning( + "An HTTPResponse/HTTPRequest should always be below an HTTP" + ) + p = b"" + # Walk all the fields, in order + for f in self.fields_desc: + if f.name == "Unknown_Headers": + continue + # Get the field value + val = self.getfieldval(f.name) + if not val: + # Not specified. Skip + continue + # Fields used in the first line have a space as a separator, + # whereas headers are terminated by a new line + if isinstance(self, HTTPRequest): + if f.name in ['Method', 'Path']: + separator = b' ' + else: + separator = b'\r\n' + elif isinstance(self, HTTPResponse): + if f.name in ['Http_Version', 'Status_Code']: + separator = b' ' + else: + separator = b'\r\n' + # Add the field into the packet + p = f.addfield(self, p, val + separator) + # Handle Unknown_Headers + if self.Unknown_Headers: + headers_text = b"" + for name, value in six.iteritems(self.Unknown_Headers): + headers_text += _header_line(name, value) + b"\r\n" + p = self.get_field("Unknown_Headers").addfield( + self, p, headers_text + ) + # The packet might be empty, and in that case it should stay empty. + if p: + # Add an additional line after the last header + p = f.addfield(self, p, b'\r\n') + return p + + def guess_payload_class(self, payload): + """Detect potential payloads + """ + if self.Connection and b"Upgrade" in self.Connection: + from scapy.contrib.http2 import H2Frame + return H2Frame + return super(_HTTPContent, self).guess_payload_class(payload) + +class _HTTPHeaderField(StrField): + """Modified StrField to handle HTTP Header names""" + __slots__ = ["real_name"] + + def __init__(self, name, default): + self.real_name = name + name = _strip_header_name(name) + StrField.__init__(self, name, default, fmt="H") + +def _generate_headers(*args): + """Generate the header fields based on their name""" + # Order headers + all_headers = [] + for headers in args: + all_headers += headers + # Generate header fields + results = [] + for h in sorted(all_headers): + results.append(_HTTPHeaderField(h, None)) + return results + +# Create Request and Response packets +class HTTPRequest(_HTTPContent): + name = "HTTPRequest" + fields_desc = [ + # First line + _HTTPHeaderField("Method", "GET"), + _HTTPHeaderField("Path", "/"), + _HTTPHeaderField("Http-Version", "HTTP/1.1"), + # Headers + ] + ( + _generate_headers( + GENERAL_HEADERS, + REQUEST_HEADERS, + COMMON_UNSTANDARD_GENERAL_HEADERS, + COMMON_UNSTANDARD_REQUEST_HEADERS + ) + ) + [ + _HTTPHeaderField("Unknown-Headers", None), + ] + + def do_dissect(self, s): + """From the HTTP packet string, populate the scapy object""" + first_line, body = _dissect_headers(self, s) + try: + Method, Path, HTTPVersion = re.split(br"\s+", first_line, 2) + self.setfieldval('Method', Method) + self.setfieldval('Path', Path) + self.setfieldval('Http_Version', HTTPVersion) + except ValueError: + pass + if body: + self.raw_packet_cache = s[:-len(body)] + else: + self.raw_packet_cache = s + return body + + def mysummary(self): + return self.sprintf( + "%HTTPRequest.Method% %HTTPRequest.Path% " + "%HTTPRequest.Http_Version%" + ) + +# TODO: decide to keep or not +class HTTPResponse(_HTTPContent): + name = "HTTP Response" + fields_desc = [ + # First line + _HTTPHeaderField("Http-Version", "HTTP/1.1"), + _HTTPHeaderField("Status-Code", "200"), + _HTTPHeaderField("Reason-Phrase", "OK"), + # Headers + ] + ( + _generate_headers( + GENERAL_HEADERS, + RESPONSE_HEADERS, + COMMON_UNSTANDARD_GENERAL_HEADERS, + COMMON_UNSTANDARD_RESPONSE_HEADERS + ) + ) + [ + _HTTPHeaderField("Unknown-Headers", None), + ] + + def answers(self, other): + return HTTPRequest in other + + def do_dissect(self, s): + ''' From the HTTP packet string, populate the scapy object ''' + first_line, body = _dissect_headers(self, s) + try: + HTTPVersion, Status, Reason = re.split(br"\s+", first_line, 2) + self.setfieldval('Http_Version', HTTPVersion) + self.setfieldval('Status_Code', Status) + self.setfieldval('Reason_Phrase', Reason) + except ValueError: + pass + if body: + self.raw_packet_cache = s[:-len(body)] + else: + self.raw_packet_cache = s + return body + + def mysummary(self): + return self.sprintf( + "%HTTPResponse.Http_Version% %HTTPResponse.Status_Code% " + "%HTTPResponse.Reason_Phrase%" + ) + +class HTTP(Packet): + name = "HTTP 1" + fields_desc = [] + show_indent = 0 + + @classmethod + def dispatch_hook(cls, _pkt=None, *args, **kargs): + if _pkt and len(_pkt) >= 9: + from scapy.contrib.http2 import _HTTP2_types, H2Frame + # To detect a valid HTTP2, we check that the type is correct + # that the Reserved bit is set and length makes sense. + while _pkt: + if len(_pkt) < 9: + # Invalid total length + return cls + if ord(_pkt[3:4]) not in _HTTP2_types: + # Invalid type + return cls + length = struct.unpack("!I", b"\0" + _pkt[:3])[0] + 9 + if length > len(_pkt): + # Invalid length + return cls + sid = struct.unpack("!I", _pkt[5:9])[0] + if sid >> 31 != 0: + # Invalid Reserved bit + return cls + _pkt = _pkt[length:] + return H2Frame + return cls + + # tcp_reassemble is used by TCPSession in session.py + @classmethod + def tcp_reassemble(cls, data, metadata): + detect_end = metadata.get("detect_end", None) + is_unknown = metadata.get("detect_unknown", True) + if not detect_end or is_unknown: + metadata["detect_unknown"] = False + http_packet = HTTP(data) + # Detect packing method + if not isinstance(http_packet.payload, _HTTPContent): + return http_packet + length = http_packet.Content_Length + if length is not None: + # The packet provides a Content-Length attribute: let's + # use it. When the total size of the frags is high enough, + # we have the packet + length = int(length) + # Subtract the length of the "HTTP*" layer + if http_packet.payload.payload or length == 0: + http_length = len(data) - len(http_packet.payload.payload) + detect_end = lambda dat: len(dat) - http_length >= length + else: + # The HTTP layer isn't fully received. + detect_end = lambda dat: False + metadata["detect_unknown"] = True + else: + # It's not Content-Length based. It could be chunked + encodings = http_packet[HTTP].payload._get_encodings() + chunked = ("chunked" in encodings) + if chunked: + detect_end = lambda dat: dat.endswith(b"\r\n\r\n") + else: + # If neither Content-Length nor chunked is specified, + # it means it's the TCP packet that contains the data, + # or that the information hasn't been given yet. + detect_end = lambda dat: metadata.get("tcp_end", False) + metadata["detect_unknown"] = True + metadata["detect_end"] = detect_end + if detect_end(data): + return http_packet + else: + if detect_end(data): + http_packet = HTTP(data) + return http_packet + + def guess_payload_class(self, payload): + """Decides if the payload is an HTTP Request or Response, or + something else. + """ + try: + prog = re.compile( + br"^(?:OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) " + br"(?:.+?) " + br"HTTP/\d\.\d$" + ) + crlfIndex = payload.index(b"\r\n") + req = payload[:crlfIndex] + result = prog.match(req) + if result: + return HTTPRequest + else: + prog = re.compile(br"^HTTP/\d\.\d \d\d\d .*$") + result = prog.match(req) + if result: + return HTTPResponse + except ValueError: + # Anything that isn't HTTP but on port 80 + pass + return Raw + + +class GenevaHTTPRequest(): + """ + Defines a Geneva HTTP request, where we can replace, set, delete, or insert to existing fields. + """ + def __init__(self, content): + """ + content: Raw string of the request (Bytes) + Creates a Geneva HTTP request + """ + self.original_content = content # Save off the original just in case + self.parsed_content = HTTPRequest(content) + + def replace(self, header, index, content): + # replace contents at index + if header not in self.parsed_content["HTTPRequest"].fields: + # TODO: throw some sort of error, this header doesn't exist + return None + + if index+len(content) > len(self.parsed_content["HTTPRequest"].fields[header]): + # TODO: throw some sort of error, this index is too large + return None + + self.parsed_content["HTTPRequest"].fields[header] = self.parsed_content["HTTPRequest"].fields[header][0:index] \ + + content + \ + self.parsed_content["HTTPRequest"].fields[header][index+len(content):] + return str(self) + + def delete(self, header, index, num): + # delete num characters from header beginning at index + if header not in self.parsed_content["HTTPRequest"].fields: + # TODO: throw some sort of error, this header doesn't exist + return None + + if index+num > len(self.parsed_content["HTTPRequest"].fields[header]): + # TODO: throw some sort of error, this index is too large + return None + + self.parsed_content["HTTPRequest"].fields[header] = self.parsed_content["HTTPRequest"].fields[header][0:index] \ + + self.parsed_content["HTTPRequest"].fields[header][index+num:] + return str(self) + + def set_header(self, header, content): + # Completely replaces a header with content. We don't care if the + # field already exists. + + self.parsed_content["HTTPRequest"].fields[header] = content + + return str(self) + + def __str__(self): + return str(self.parsed_content) diff --git a/actions/layer.py b/actions/layer.py index 1f16991..be8e99a 100644 --- a/actions/layer.py +++ b/actions/layer.py @@ -6,6 +6,8 @@ from scapy.all import IP, RandIP, UDP, DNS, DNSQR, Raw, TCP, fuzz +from actions.http import HTTPRequest + class Layer(): """ @@ -601,6 +603,81 @@ def __init__(self, layer): } +class HTTPRequestLayer(Layer): + """ + Defines an interface to access parsed HTTP fields + """ + + name = "HTTPRequest" + protocol = HTTPRequest + + _fields = [ + "Method", + "Path", + "Http_Version", + "Host" + ] + fields = _fields + + def __init__(self, layer): + """ + Initializes the HTTP layer. + """ + Layer.__init__(self, layer) + self.request = HTTPRequest(bytes(layer)) # TODO: I dont like this + self.getters = { + 'load' : self.get_load, + } + self.setters = { + 'load' : self.set_load, + } + self.generators = { + 'load' : self.gen_load, + } + + def get(self, field): + """ + Override get, since the HTTPRequest doesn't immediately make its fields known + """ + assert field in self.fields + if field in self.getters: + return self.getters[field](field) + return getattr(self.request, field) + + def set(self, packet, field, value): + """ + Override get, since the HTTPRequest doesn't immediately make its fields known + """ + print('entering set, but I dont like this') + print(packet) + print(field) + print(value) + assert field in self.fields + base = field.split("-")[0] + if field in self.setters: + self.setters[field](packet, field, value) + + # Dual field accessors are fields that require two pieces of information + # to retrieve them (for example, "options-eol"). These are delimited by + # a dash "-". + elif "-" in field and base in self.setters: + self.setters[base](packet, field, value) + else: + setattr(self.layer, field, value) + + # Request the packet be reparsed to confirm the value is stable + # XXX Temporarily disabling the reconstitution check due to scapy bug (#2034) + #assert bytes(self.protocol(bytes(self.layer))) == bytes(self.layer) + + + @classmethod + def name_matches(cls, name): + """ + Override the name parsing to check for HTTP REQUEST here. + """ + return name.upper() in ["HTTPREQUEST"] + + class DNSLayer(Layer): """ Defines an interface to access DNS header fields. diff --git a/actions/packet.py b/actions/packet.py index f40b6d1..5b8bc90 100644 --- a/actions/packet.py +++ b/actions/packet.py @@ -3,13 +3,18 @@ import actions.layer +from scapy.layers.http import HTTP as ScapyHTTP, HTTPRequest as ScapyHTTPRequest +from scapy.all import IP, TCP +from actions.http import HTTPRequest as GenevaHTTPRequest _SUPPORTED_LAYERS = [ actions.layer.IPLayer, actions.layer.TCPLayer, actions.layer.UDPLayer, actions.layer.DNSLayer, - actions.layer.DNSQRLayer + actions.layer.DNSQRLayer, + #actions.layer.HTTPLayer, + actions.layer.HTTPRequestLayer ] SUPPORTED_LAYERS = _SUPPORTED_LAYERS @@ -23,7 +28,13 @@ def __init__(self, packet=None): """ Initializes the packet object. """ - self.packet = packet + if(ScapyHTTPRequest in packet.layers()): + # We have a ScapyHTTPRequest. Instead, let's convert to our HttpRequest. + # Overwrite the current ScapyHTTPRequest with a GenevaHTTPRequest + self.packet = packet + self.packet[ScapyHTTPRequest] = GenevaHTTPRequest(bytes(packet[ScapyHTTPRequest])) + else: + self.packet = packet self.layers = self.setup_layers() self.sleep = 0 @@ -131,7 +142,7 @@ def setup_layers(self): """ layers = {} for layer in self.read_layers(): - layers[layer.name.upper()] = layer + layers[layer.name] = layer return layers def copy(self): @@ -179,7 +190,11 @@ def set(self, str_protocol, field, value): if self.haslayer("TCP"): del self.packet["TCP"].chksum - return self.layers[str_protocol].set(self.packet, field, value) + lay = self.layers[str_protocol].set(self.packet, field, value) + + print(lay) + + return lay def get(self, str_protocol, field): """ diff --git a/actions/strategy.py b/actions/strategy.py index 613f029..c7330d7 100644 --- a/actions/strategy.py +++ b/actions/strategy.py @@ -85,4 +85,5 @@ def run_on_packet(self, packet, logger, direction): # If no action tree was applicable, send the packet unimpeded if not ran: packets_to_send = [packet] + return packets_to_send diff --git a/actions/tamper.py b/actions/tamper.py index 480cb33..696e901 100644 --- a/actions/tamper.py +++ b/actions/tamper.py @@ -15,44 +15,82 @@ from actions.layer import DNSLayer import random +from actions.http import HTTPRequest as HTTPRequest +import urllib.parse +import string # All supported tamper primitives -SUPPORTED_PRIMITIVES = ["corrupt", "replace", "add", "compress"] +SUPPORTED_PRIMITIVES = ["corrupt", "replace", "add", "compress", "insert", "delete"] class TamperAction(Action): """ Defines the TamperAction for Geneva. """ - def __init__(self, environment_id=None, field=None, tamper_type=None, tamper_value=None, tamper_proto="TCP"): + def __init__(self, environment_id=None, field=None, tamper_type=None, tamper_value=None, tamper_proto="TCP", start_index=None, end_index=None, encoded_payload=None): Action.__init__(self, "tamper", "both") self.field = field self.tamper_value = tamper_value self.tamper_proto = actions.utils.string_to_protocol(tamper_proto) self.tamper_proto_str = tamper_proto self.tamper_type = tamper_type + self.start_index = start_index + self.end_index = end_index + self.encoded_payload = encoded_payload + if encoded_payload: + self.decoded_payload = bytes(urllib.parse.unquote(encoded_payload), "UTF-8") def tamper(self, packet, logger): """ Edits a given packet according to the action settings. """ + # Return packet untouched if not applicable if not packet.haslayer(self.tamper_proto_str): return packet - + # Retrieve the old value of the field for logging purposes old_value = packet.get(self.tamper_proto_str, self.field) - + new_value = self.tamper_value # If corrupting the packet field, generate a value for it try: if self.tamper_type == "corrupt": - new_value = packet.gen(self.tamper_proto_str, self.field) + if self.tamper_proto == HTTPRequest: + packet = corrupt(packet, self.field, self.start_index, self.end_index) + del packet["IP"].chksum + del packet["IP"].len + del packet["TCP"].chksum + del packet["TCP"].dataofs + return packet + else: + new_value = packet.gen(self.tamper_proto_str, self.field) elif self.tamper_type == "add": new_value = int(self.tamper_value) + int(old_value) elif self.tamper_type == "compress": return packet.dns_decompress(logger) + elif self.tamper_type == "insert": + packet = insert(packet, self.field, self.start_index, self.decoded_payload) + + del packet["IP"].chksum + del packet["IP"].len + del packet["TCP"].chksum + del packet["TCP"].dataofs + return packet + elif self.tamper_type == "replace": + packet = replace(packet, self.field, self.start_index, self.decoded_payload) + + del packet["IP"].chksum + del packet["IP"].len + del packet["TCP"].chksum + del packet["TCP"].dataofs + + return packet + elif self.tamper_type == "delete": + packet = delete(packet, self.field, self.start_index, self.end_index) + return packet + except NotImplementedError: # If a primitive does not support the type of packet given return packet @@ -62,7 +100,7 @@ def tamper(self, packet, logger): logger.debug(" - Tampering %s field `%s` (%s) by %s (to %s)" % (self.tamper_proto_str, self.field, str(old_value), self.tamper_type, str(new_value))) - + print("about to call set") packet.set(self.tamper_proto_str, self.field, new_value) return packet @@ -82,11 +120,19 @@ def __str__(self): """ s = Action.__str__(self) if self.tamper_type == "corrupt": - s += "{%s:%s:%s}" % (self.tamper_proto_str, self.field, self.tamper_type) - elif self.tamper_type in ["replace", "add"]: - s += "{%s:%s:%s:%s}" % (self.tamper_proto_str, self.field, self.tamper_type, self.tamper_value) + if self.tamper_proto == HTTPRequest: + s += "{%s:%s:%s:%s}" % (self.tamper_proto_str, self.field, self.tamper_type, str(self.start_index) + "-" + str(self.end_index)) + else: + s += "{%s:%s:%s}" % (self.tamper_proto_str, self.field, self.tamper_type) + elif self.tamper_type in ["replace", "add", "insert"]: + if self.tamper_proto == HTTPRequest: + s += "{%s:%s:%s:%s:%s}" % (self.tamper_proto_str, self.field, self.tamper_type, str(self.start_index), self.encoded_payload) + else: + s += "{%s:%s:%s:%s}" % (self.tamper_proto_str, self.field, self.tamper_type, self.tamper_value) elif self.tamper_type == "compress": s += "{%s:%s:compress}" % ("DNS", "qd", ) + elif self.tamper_type == "delete": + s += "{%s:%s:%s:%s}" % (self.tamper_proto_str, self.field, self.tamper_type, str(self.start_index) + "-" + str(self.end_index)) return s @@ -105,28 +151,129 @@ def parse(self, string, logger): # Count the number of params in this given string num_parameters = string.count(":") - # If num_parameters is greater than 3, it's not a valid tamper action - if num_parameters > 3 or num_parameters < 2: + # If num_parameters is greater than 4, it's not a valid tamper action + if num_parameters > 4 or num_parameters < 2: msg = "Cannot parse tamper action %s" % string logger.error(msg) raise Exception(msg) params = string.split(":") - if num_parameters == 3: - self.tamper_proto_str, self.field, self.tamper_type, self.tamper_value = params + if num_parameters == 4: + # HTTP replace or insert + self.tamper_proto_str, self.field, self.tamper_type, self.start_index, self.encoded_payload = params + self.start_index = int(self.start_index) + self.tamper_proto = actions.utils.string_to_protocol(self.tamper_proto_str) + self.decoded_payload = bytes(urllib.parse.unquote(self.encoded_payload), "UTF-8") + + elif num_parameters == 3: + # HTTP corrupt or delete could be here, check for those first + self.tamper_proto_str = params[0] self.tamper_proto = actions.utils.string_to_protocol(self.tamper_proto_str) - if "options" in self.field: - if not self.tamper_value: - self.tamper_value = '' # An empty string instead of an empty byte literal - - # tamper_value might be parsed as a string despite being an integer in most cases. - # Try to parse it out here - try: - if "load" not in self.field: - self.tamper_value = int(self.tamper_value) - except: - pass - else: + if self.tamper_proto_str == "HTTPRequest": + self.field = params[1] + self.tamper_type = params[2] + indices = params[3].split('-') + self.start_index = int(indices[0]) + self.end_index = int(indices[1]) + else: + self.tamper_proto_str, self.field, self.tamper_type, self.tamper_value = params + if "options" in self.field: + if not self.tamper_value: + self.tamper_value = '' # An empty string instead of an empty byte literal + # tamper_value might be parsed as a string despite being an integer in most cases. + # Try to parse it out here + try: + if "load" not in self.field: + self.tamper_value = int(self.tamper_value) + except: + pass + + elif num_parameters == 2: self.tamper_proto_str, self.field, self.tamper_type = params self.tamper_proto = actions.utils.string_to_protocol(self.tamper_proto_str) return True + + +def insert(packet, header, index, content): + """ + Helper method to insert content into packet[header][index] + """ + if header not in packet["HTTPRequest"].fields: + # TODO: throw some sort of error, this header doesn't exist + return None + + if index > len(packet["HTTPRequest"].fields[header]): + # TODO: throw some sort of error, this index is too large + return None + packet["HTTPRequest"].fields[header] = packet["HTTPRequest"].fields[header][0:index] \ + + content + \ + packet["HTTPRequest"].fields[header][index:] + + return packet + +def replace(packet, header, index, content): + """ + Helper method to replace packet[header][index] with content + """ + if header not in packet["HTTPRequest"].fields: + # TODO: throw some sort of error, this header doesn't exist + return None + + if index+len(content) > len(packet["HTTPRequest"].fields[header]): + # TODO: throw some sort of error, this index is too large + return None + + packet["HTTPRequest"].fields[header] = packet["HTTPRequest"].fields[header][0:index] \ + + content + \ + packet["HTTPRequest"].fields[header][index+len(content):] + return packet + + +def delete(packet, header, start_index, end_index): + """ + Helper method to remove the characters at header[start_index] to header[end_index] + """ + if header not in packet["HTTPRequest"].fields: + # TODO: throw some sort of error, this header doesn't exist + return None + + if end_index+1 > len(packet["HTTPRequest"].fields[header]): + # TODO: throw some sort of error, this index is too large + return None + + packet["HTTPRequest"].fields[header] = packet["HTTPRequest"].fields[header][0:start_index] \ + + packet["HTTPRequest"].fields[header][end_index+1:] + return packet + +def corrupt(packet, header, start_index, end_index): + """ + Helper method to remove the characters at header[start_index] to header[end_index] + """ + print("in cor") + if header not in packet["HTTPRequest"].fields: + # TODO: throw some sort of error, this header doesn't exist + return None + + if end_index+1 > len(packet["HTTPRequest"].fields[header]): + # TODO: throw some sort of error, this index is too large + return None + print("in cor") + try: + old_field = packet["HTTPRequest"].fields[header] + tampered_field = packet["HTTPRequest"].fields[header][0:start_index] + + for i in range(0, end_index - start_index + 1): + # Ensure we're getting a new character + new_character = bytes(random.choice(string.printable), "UTF-8") + while(new_character == old_field[i]): + new_character = bytes(random.choice(string.printable), "UTF-8") + tampered_field = tampered_field + new_character + + tampered_field = tampered_field + packet["HTTPRequest"].fields[header][end_index+1:] + print("settings tampered field to:") + print(tampered_field) + packet["HTTPRequest"].fields[header] = bytes(tampered_field, "UTF-8") + except Exception as e: + print(e) + + return packet diff --git a/actions/utils.py b/actions/utils.py index a795442..b39834c 100644 --- a/actions/utils.py +++ b/actions/utils.py @@ -14,6 +14,7 @@ import actions.packet from scapy.all import TCP, IP, UDP, rdpcap +from actions.http import HTTPRequest import netifaces @@ -153,6 +154,8 @@ def string_to_protocol(protocol): return IP elif protocol.upper() == "UDP": return UDP + elif protocol.upper() == "HTTPREQUEST": + return HTTPRequest def get_id(): diff --git a/engine.py b/engine.py index 0f6cf08..fbecd03 100644 --- a/engine.py +++ b/engine.py @@ -177,10 +177,14 @@ def mysend(self, packet, dir): try: self.logger.debug("Sending packet %s", str(packet)) # Convert the packet to a bytearray so memoryview can edit the underlying memory + print("About to cnvert") pack = bytearray(bytes(packet.packet)) + print("about to send") + print(pack) # Don't recalculate checksum since sometimes we will have already changed it self.divert.send(pydivert.Packet(memoryview(pack), self.interface, dir), recalculate_checksum=False) - except Exception: + except Exception as e: + print(e) self.logger.exception("Error in engine mysend.") def handle_outbound_packet(self, divert_packet): @@ -197,7 +201,7 @@ def handle_outbound_packet(self, divert_packet): # Send all of the packets we've collected to send for out_packet in packets_to_send: - self.mysend(out_packet, Direction.OUTBOUND) + self.mysend(out_packet, Direction.OUTBOUND) def handle_inbound_packet(self, divert_packet): """ From 72e1b8c185aef7113f191550e28d702f8ce95dd2 Mon Sep 17 00:00:00 2001 From: George Hughey Date: Mon, 17 Feb 2020 21:26:42 -0800 Subject: [PATCH 15/15] Add UDP support --- engine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/engine.py b/engine.py index fbecd03..d8dee65 100644 --- a/engine.py +++ b/engine.py @@ -128,7 +128,8 @@ def initialize(self): self.logger.debug("Initializing Divert") - self.divert = pydivert.WinDivert("tcp.DstPort == %d || tcp.SrcPort == %d" % (int(self.server_port), int(self.server_port))) + self.divert = pydivert.WinDivert("tcp.DstPort == %d || tcp.SrcPort == %d || udp.DstPort == %d || udp.SrcPort == %d" \ + % (int(self.server_port), int(self.server_port), int(self.server_port), int(self.server_port))) self.divert.open() self.divert_thread = threading.Thread(target=self.run_divert) self.divert_thread.start()