diff --git a/darwin_api.py b/darwin_api.py index 3652de05..5bbb7dd5 100644 --- a/darwin_api.py +++ b/darwin_api.py @@ -151,7 +151,7 @@ def _get_proc_info_by_pid(pid): if status == 0: # This means to data was written, this is an error - raise Exception,"Errno:"+str(get_ctypes_errno())+", Error: "+get_ctypes_error_str() + raise Exception("Errno:"+str(get_ctypes_errno())+", Error: "+get_ctypes_error_str()) def get_process_cpu_time(pid): diff --git a/emulcomm.py b/emulcomm.py index 6f02ef6d..8b069292 100755 --- a/emulcomm.py +++ b/emulcomm.py @@ -230,7 +230,7 @@ def _is_already_connected_exception(exceptionobj): # Convert the errno to and error string name try: errname = errno.errorcode[errnum] - except Exception,e: + except Exception as e: # The error is unknown for some reason... errname = None @@ -267,7 +267,7 @@ def _is_addr_in_use_exception(exceptionobj): # Convert the errno to and error string name try: errname = errno.errorcode[errnum] - except Exception,e: + except Exception as e: # The error is unknown for some reason... errname = None @@ -304,7 +304,7 @@ def _is_addr_unavailable_exception(exceptionobj): # Convert the errno to and error string name try: errname = errno.errorcode[errnum] - except Exception,e: + except Exception as e: # The error is unknown for some reason... errname = None @@ -341,7 +341,7 @@ def _is_conn_refused_exception(exceptionobj): # Convert the errno to and error string name try: errname = errno.errorcode[errnum] - except Exception,e: + except Exception as e: # The error is unknown for some reason... errname = None @@ -379,7 +379,7 @@ def _is_conn_aborted_exception(exceptionobj): # Convert the errno to and error string name try: errname = errno.errorcode[errnum] - except Exception,e: + except Exception as e: # The error is unknown for some reason... errname = None @@ -419,7 +419,7 @@ def _is_network_down_exception(exceptionobj): # Convert the errno to and error string name try: errname = errno.errorcode[errnum] - except Exception,e: + except Exception as e: # The error is unknown for some reason... errname = None @@ -460,7 +460,7 @@ def _is_recoverable_network_exception(exceptionobj): # Convert the errno to and error string name try: errname = errno.errorcode[errnum] - except Exception,e: + except Exception as e: # The error is unknown for some reason... errname = None @@ -960,7 +960,7 @@ def sendmessage(destip, destport, message, localip, localport): return bytessent - except Exception, e: + except Exception as e: try: # If we're borrowing the socket, closing is not appropriate. @@ -1064,7 +1064,7 @@ def listenformessage(localip, localport): # preserve send functionality on this port. _BOUND_SOCKETS[("UDP", localip, localport)] = sock - except Exception, e: + except Exception as e: # Check if this an already in use error if _is_addr_in_use_exception(e): @@ -1218,7 +1218,7 @@ def _timed_conn_initialize(localip,localport,destip,destport, timeout): sock.connect((destip, destport)) connected = True break - except Exception, e: + except Exception as e: # Check if we are already connected if _is_already_connected_exception(e): connected = True @@ -1385,7 +1385,7 @@ def openconnection(destip, destport,localip, localport, timeout): # Register this socket as an outsocket nanny.tattle_add_item('outsockets',id(sock)) - except Exception, e: + except Exception as e: # Check if this an already in use error if _is_addr_in_use_exception(e): @@ -1498,7 +1498,7 @@ def listenforconnection(localip, localport): else: sock.listen(5) - except Exception, e: + except Exception as e: # Check if this an already in use error if _is_addr_in_use_exception(e): @@ -1593,7 +1593,7 @@ def _check_socket_state(realsock, waitfor="rw", timeout=0.0): """ # Check that waitfor is valid if waitfor not in ["rw","r","w"]: - raise Exception, "Illegal waitfor argument!" + raise Exception("Illegal waitfor argument!") # Array to hold the socket sock_array = [realsock] @@ -1755,7 +1755,7 @@ def close(self): - def recv(self,bytes): + def recv(self,some_bytes): """ Receives data from a socket. It may receive fewer bytes than @@ -1801,7 +1801,7 @@ def recv(self,bytes): raise KeyError # Socket is closed locally # Try to recieve the data - data_recieved = sock.recv(bytes) + data_recieved = sock.recv(some_bytes) # Calculate the length of the data data_length = len(data_recieved) @@ -1825,7 +1825,7 @@ def recv(self,bytes): except RepyException: raise # Pass up from inner block - except Exception, e: + except Exception as e: # Check if this a recoverable error if _is_recoverable_network_exception(e): # Operation would block @@ -1924,7 +1924,7 @@ def send(self,message): raise SocketClosedLocal("The socket is closed!") except RepyException: raise # pass up from inner block - except Exception, e: + except Exception as e: # Check if this a recoverable error if _is_recoverable_network_exception(e): # Operation would block @@ -2075,7 +2075,7 @@ def getmessage(self): # Let these through from the inner block raise - except Exception, e: + except Exception as e: # Check if this is a would-block error if _is_recoverable_network_exception(e): raise SocketWouldBlockError("No messages currently available!") @@ -2267,7 +2267,7 @@ def getconnection(self): # Let these through from the inner block raise - except Exception, e: + except Exception as e: # Check if this is a would-block error if _is_recoverable_network_exception(e): raise SocketWouldBlockError("No connections currently available!") diff --git a/emulfile.py b/emulfile.py index b7b3cb63..da65caf6 100755 --- a/emulfile.py +++ b/emulfile.py @@ -32,11 +32,11 @@ # Fix for SeattleTestbed/attic#983. # By retaining a reference to unicode, we prevent os.path.abspath from # failing in some versions of python when the unicode builtin is overwritten. -os.path.unicode = unicode +#os.path.unicode = unicode # Store a reference to open, so that we retain access # after the builtin's are disabled -safe_open = open +#safe_open = open ##### Constants @@ -81,7 +81,7 @@ def listfiles(): A list of strings (file names) """ # We will consume 4K of fileread - nanny.tattle_quantity('fileread', 4096) + #nanny.tattle_quantity('fileread', 4096) # Get the list of files from the current directory files = os.listdir(repy_constants.REPY_CURRENT_DIR) @@ -129,12 +129,12 @@ def removefile(filename): absolute_filename = os.path.abspath(os.path.join(repy_constants.REPY_CURRENT_DIR, filename)) # Check if the file exists - nanny.tattle_quantity('fileread', 4096) + #nanny.tattle_quantity('fileread', 4096) if not os.path.isfile(absolute_filename): raise FileNotFoundError('Cannot remove non-existent file "'+filename+'".') # Consume the filewrite resources - nanny.tattle_quantity('filewrite',4096) + #nanny.tattle_quantity('filewrite',4096) # Remove the file (failure is an internal error) os.remove(absolute_filename) @@ -272,16 +272,16 @@ def __init__(self, filename, create): # Get the absolute file name self.abs_filename = os.path.abspath(os.path.join(repy_constants.REPY_CURRENT_DIR, filename)) - + # Here is where we try to allocate a "file" resource from the # nanny system. We will restore this below if there is an exception # This may raise a ResourceExhautedError - nanny.tattle_add_item('filesopened', self.abs_filename) + #nanny.tattle_add_item('filesopened', self.abs_filename) # charge for checking if the file exists. - nanny.tattle_quantity('fileread', 4096) + #nanny.tattle_quantity('fileread', 4096) exists = os.path.isfile(self.abs_filename) # if there isn't a file already... @@ -291,13 +291,13 @@ def __init__(self, filename, create): raise FileNotFoundError('Cannot openfile non-existent file "'+filename+'" without creating it!') # okay, we should create it... - nanny.tattle_quantity('filewrite', 4096) - safe_open(self.abs_filename, "w").close() # Forces file creation + #nanny.tattle_quantity('filewrite', 4096) + open(self.abs_filename, "w").close() # Forces file creation # Store a file handle # Always open in mode r+b, this avoids Windows text-mode # quirks, and allows reading and writing - self.fobj = safe_open(self.abs_filename, "r+b") + self.fobj = open(self.abs_filename, "r+") # Add the filename to the open files OPEN_FILES.add(filename) @@ -307,7 +307,7 @@ def __init__(self, filename, create): except RepyException: # Restore the file handle we tattled - nanny.tattle_remove_item('filesopened', self.abs_filename) + #nanny.tattle_remove_item('filesopened', self.abs_filename) raise finally: @@ -336,7 +336,7 @@ def close(self): OPEN_FILES_LOCK.acquire() # Tell nanny we're gone. - nanny.tattle_remove_item('filesopened', self.abs_filename) + #nanny.tattle_remove_item('filesopened', self.abs_filename) # Acquire the seek lock self.seek_lock.acquire() @@ -386,7 +386,7 @@ def readat(self,sizelimit,offset): end of the file, or if the sizelimit was 0. """ # Check the arguments - if sizelimit < 0 and sizelimit != None: + if sizelimit != None and sizelimit < 0: raise RepyArgumentError("Negative sizelimit specified!") if offset < 0: raise RepyArgumentError("Negative read offset speficied!") @@ -408,7 +408,7 @@ def readat(self,sizelimit,offset): fobj.seek(offset) # Wait for available file read resources - nanny.tattle_quantity('fileread',0) + #nanny.tattle_quantity('fileread',0) if sizelimit != None: # Read the data @@ -428,7 +428,7 @@ def readat(self,sizelimit,offset): disk_blocks_read += 1 # Charge 4K per block - nanny.tattle_quantity('fileread', disk_blocks_read*4096) + #nanny.tattle_quantity('fileread', disk_blocks_read*4096) # Return the data return data @@ -464,6 +464,7 @@ def writeat(self,data,offset): raise RepyArgumentError("Negative read offset speficied!") if type(data) is not str: raise RepyArgumentError("Data must be specified as a string!") + #data = data.encode('utf-8') # Get the seek lock self.seek_lock.acquire() @@ -482,7 +483,7 @@ def writeat(self,data,offset): fobj.seek(offset) # Wait for available file write resources - nanny.tattle_quantity('filewrite',0) + #nanny.tattle_quantity('filewrite',0) # Write the data and flush to disk fobj.write(data) @@ -503,7 +504,7 @@ def writeat(self,data,offset): disk_blocks_written += 1 # Charge 4K per block - nanny.tattle_quantity('filewrite', disk_blocks_written*4096) + #nanny.tattle_quantity('filewrite', disk_blocks_written*4096) def __del__(self): diff --git a/emulmisc.py b/emulmisc.py index 5b7ba228..57f52123 100755 --- a/emulmisc.py +++ b/emulmisc.py @@ -26,7 +26,7 @@ import nonportable # for getruntime import harshexit # for harshexit() import threading # for Lock() -import thread # to catch thread.error +import _thread as thread # to catch thread.error from exception_hierarchy import * ##### Public Functions @@ -63,7 +63,7 @@ def randombytes(): # unique from all other exit calls in repy. try: randomdata = os.urandom(1024) - except NotImplementedError, e: + except NotImplementedError as e: tracebackrepy.handle_internalerror("os.urandom is not implemented " + \ "(Exception was: %s)" % e.message, 217) @@ -198,8 +198,7 @@ def log(*args): Nothing """ for arg in args: - print arg, - + print(arg) ##### Class Declarations diff --git a/emultimer.py b/emultimer.py index a84ff533..1e3091a7 100755 --- a/emultimer.py +++ b/emultimer.py @@ -10,7 +10,7 @@ """ import threading -import thread # Armon: this is to catch thread.error +import _thread as thread # Armon: this is to catch thread.error import nanny import idhelper @@ -61,7 +61,7 @@ def sleep(seconds): """ # Check seconds to ensure it is a valid type. - if type(seconds) not in [long, float, int]: + if type(seconds) not in [float, int]: raise RepyArgumentError("Invalid type " + str(type(seconds))) # Using getruntime() in lieu of time.time() because we want elapsed time @@ -107,7 +107,7 @@ def createthread(function): # Generate a unique handle and see if there are resources available eventhandle = EVENT_PREFIX + idhelper.getuniqueid() - nanny.tattle_add_item('events', eventhandle) + #nanny.tattle_add_item('events', eventhandle) # Wrap the provided function def wrapped_func(): @@ -117,9 +117,10 @@ def wrapped_func(): # Exit if they throw an uncaught exception tracebackrepy.handle_exception() harshexit.harshexit(30) - finally: + #finally: # Remove the event before I exit - nanny.tattle_remove_item('events',eventhandle) + #nanny.tattle_remove_item('events',eventhandle) + # Create a thread object tobj = threading.Thread(target=wrapped_func, name=idhelper.get_new_thread_name(EVENT_PREFIX)) diff --git a/exception_hierarchy.py b/exception_hierarchy.py index ee02bb1d..f7ec7113 100644 --- a/exception_hierarchy.py +++ b/exception_hierarchy.py @@ -15,26 +15,11 @@ "ResourceUsageError", "ResourceExhaustedError", "ResourceForbiddenError", + "LockDoubleReleaseError", + "TimeoutError", "FileError", "FileNotFoundError", "FileInUseError", - "SeekPastEndOfFileError", - "FileClosedError", - "LockDoubleReleaseError", - "NetworkError", - "NetworkAddressError", - "AlreadyListeningError", - "DuplicateTupleError", - "CleanupInProgressError", - "InternetConnectivityError", - "AddressBindingError", - "ConnectionRefusedError", - "LocalIPChanged", - "SocketClosedLocal", - "SocketClosedRemote", - "SocketWouldBlockError", - "TCPServerSocketInvalidError", - "TimeoutError", ] @@ -108,42 +93,6 @@ class ResourceForbiddenError (ResourceUsageError): pass -##### File Related Exceptions - -class FileError (RepyException): - """All File-Related Exceptions derive from this exception.""" - pass - -class FileNotFoundError (FileError): - """ - This Exception indicates that a file which does not exist was - used as an argument to a function expecting a real file. - """ - pass - -class FileInUseError (FileError): - """ - This Exception indicates that a file which is in use was - used as an argument to a function expecting the file to - be un-used. - """ - pass - -class SeekPastEndOfFileError (FileError): - """ - This Exception indicates that an attempt was made to - seek past the end of a file. - """ - pass - -class FileClosedError (FileError): - """ - This Exception indicates that the file is closed, - and that the operation is therfor invalid. - """ - pass - - ##### Safety exceptions from safe.py class SafeException(RepyException): @@ -175,88 +124,37 @@ class LockDoubleReleaseError(RepyException): """ pass +##### File Related Exceptions -##### Network exceptions - -class NetworkError (RepyException): - """ - This exception parent-classes all of the networking exceptions. - """ - pass - -class NetworkAddressError (NetworkError): - """ - This exception is raised when a DNS lookup fails. - """ - pass - -class AlreadyListeningError (NetworkError): - """ - This exception indicates that there is an existing - listen on the local IP / Port pair that are specified. - """ - pass - -class DuplicateTupleError (NetworkError): - """ - This exception indicates that there is another socket - which has a duplicate tuple (local ip, local port, remote ip, remote port) - """ - pass - -class CleanupInProgressError (NetworkError): - """ - This exception indicates that the socket is still - being cleaned up by the operating system, and that - it is unavailable. - """ - pass - -class InternetConnectivityError (NetworkError): - """ - This exception is raised when there is no route to an IP passed to - sendmessage or openconnection. - """ - pass - -class AddressBindingError (NetworkError): - """ - This exception is raised when binding to an ip and port fails. - """ - pass - -class ConnectionRefusedError (NetworkError): - """ - This exception is raised when a TCP connection request is refused. - """ +class FileError (RepyException): + """All File-Related Exceptions derive from this exception.""" pass -class LocalIPChanged (NetworkError): +class FileNotFoundError (FileError): """ - This exception indicates that the local IP has changed. + This Exception indicates that a file which does not exist was + used as an argument to a function expecting a real file. """ pass -class SocketClosedLocal (NetworkError): +class FileInUseError (FileError): """ - This indicates that the socket was closed locally. + This Exception indicates that a file which is in use was + used as an argument to a function expecting the file to + be un-used. """ pass -class SocketClosedRemote (NetworkError): +class SeekPastEndOfFileError (FileError): """ - This indicates that the socket was closed on the remote end. + This Exception indicates that an attempt was made to + seek past the end of a file. """ pass -class SocketWouldBlockError (NetworkError): +class FileClosedError (FileError): """ - This indicates that the socket operation would have blocked. + This Exception indicates that the file is closed, + and that the operation is therfor invalid. """ pass - -class TCPServerSocketInvalidError(NetworkError): - """ - This indicates that the TCP server socket has become invalid, e.g. - because the local IP address changed. - """ diff --git a/harshexit.py b/harshexit.py index e2f95273..30722292 100644 --- a/harshexit.py +++ b/harshexit.py @@ -12,38 +12,20 @@ # needed for signal numbers import signal -# needed for changing polling constants on the Nokia N800 -import repy_constants - -# Needed for kill_process; This will fail on non-windows systems -try: - import windows_api -except: - windows_api = None - -# need for status retrieval -import statusstorage - # This prevents writes to the nanny's status information after we want to stop -statuslock = statusstorage.statuslock - - ostype = None osrealtype = None - # this indicates if we are exiting. Wrapping in a list to prevent needing a # global (the purpose of this is described below) statusexiting = [False] - class UnsupportedSystemException(Exception): pass - def portablekill(pid): global ostype global osrealtype @@ -64,10 +46,10 @@ def portablekill(pid): elif ostype == 'Windows': # Use new api - windows_api.kill_process(pid) + os.kill(pid, signal.SIGKILL) else: - raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" + raise UnsupportedSystemException("Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")") @@ -88,9 +70,6 @@ def harshexit(val): # do this once (now) statusexiting[0] = True - # prevent concurrent writes to status info (acquire the lock to stop others, - # but do not block... - statuslock.acquire() # we are stopped by the stop file watcher, not terminated through another # mechanism @@ -98,16 +77,6 @@ def harshexit(val): # we were stopped by another thread. Let's exit pass - # Special Termination signal to notify the NM of excessive threads - elif val == 56: - statusstorage.write_status("ThreadErr") - - elif val == 44: - statusstorage.write_status("Stopped") - - else: - # generic error, normal exit, or exitall in the user code... - statusstorage.write_status("Terminated") # We intentionally do not release the lock. We don't want anyone else # writing over our status information (we're killing them). @@ -125,7 +94,7 @@ def harshexit(val): sys.stderr.flush() os._exit(val) else: - raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" + raise UnsupportedSystemException("Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")") @@ -139,11 +108,6 @@ def init_ostype(): # The Nokia N800 (and N900) uses the ARM architecture, # and we change the constants on it to make disk checks happen less often - if platform.machine().startswith('armv'): - if osrealtype == 'Linux' or osrealtype == 'Darwin' or osrealtype == 'FreeBSD': - repy_constants.CPU_POLLING_FREQ_LINUX = repy_constants.CPU_POLLING_FREQ_WINCE; - repy_constants.RESOURCE_POLLING_FREQ_LINUX = repy_constants.RESOURCE_POLLING_FREQ_WINCE; - if osrealtype == 'Linux' or osrealtype == 'Windows' or osrealtype == 'Darwin': ostype = osrealtype return diff --git a/linux_api.py b/linux_api.py index bc4e91dc..122a279d 100644 --- a/linux_api.py +++ b/linux_api.py @@ -136,7 +136,7 @@ def _get_proc_info_by_pid(pid): # Check the state, raise an exception if the process is a zombie if "Z" in last_stat_data[FIELDS["state"]]: - raise Exception, "Queried Process is a zombie (dead)!" + raise Exception("Queried Process is a zombie (dead)!") def get_process_cpu_time(pid): @@ -262,7 +262,7 @@ def get_system_uptime(): return uptime else: - raise Exception, "Could not find /proc/uptime!" + raise Exception("Could not find /proc/uptime!") def get_uptime_granularity(): """ @@ -295,7 +295,7 @@ def get_uptime_granularity(): # Convert granularity to a number return pow(10, 0-granularity) else: - raise Exception, "Could not find /proc/uptime!" + raise Exception("Could not find /proc/uptime!") def get_system_thread_count(): @@ -357,3 +357,4 @@ def get_interface_ip_addresses(interfaceName): # Done, return the interfaces return ipaddressList + diff --git a/loggingrepy.py b/loggingrepy.py index eb5c754a..4f4424a9 100755 --- a/loggingrepy.py +++ b/loggingrepy.py @@ -15,7 +15,7 @@ get_size = loggingrepy_core.get_size -myfile = loggingrepy_core.myfile +#myfile = loggingrepy_core.myfile class flush_logger(loggingrepy_core.flush_logger_core): @@ -27,19 +27,19 @@ class flush_logger(loggingrepy_core.flush_logger_core): def write(self, writeitem): # block if already over - nanny.tattle_quantity('lograte', 0) + #nanny.tattle_quantity('lograte', 0) # do the actual write loggingrepy_core.flush_logger_core.write(self, writeitem) # block if over after log write writeamt = len(str(writeitem)) - nanny.tattle_quantity('lograte', writeamt) + #nanny.tattle_quantity('lograte', writeamt) def writelines(self, writelist): # block if already over - nanny.tattle_quantity('lograte', 0) + #nanny.tattle_quantity('lograte', 0) # do the actual writelines() loggingrepy_core.flush_logger_core.writelines(self, writelist) @@ -48,7 +48,7 @@ def writelines(self, writelist): writeamt = 0 for writeitem in writelist: writeamt = writeamt + len(str(writeitem)) - nanny.tattle_quantity('lograte', writeamt) + #nanny.tattle_quantity('lograte', writeamt) @@ -71,7 +71,7 @@ class circular_logger(loggingrepy_core.circular_logger_core): """ - def __init__(self, fnp, mbs = 16*1024, use_nanny=True): + def __init__(self, fnp, mbs = 16*1024, use_nanny=False): loggingrepy_core.circular_logger_core.__init__(self, fnp, mbs) # Should we be using the nanny to limit the lograte @@ -87,13 +87,14 @@ def write(self, writeitem): if self.should_nanny: # Only invoke the nanny if the should_nanny flag is set. # block if already over - nanny.tattle_quantity('lograte',0) - + #nanny.tattle_quantity('lograte',0) + pass writeamt = self.writedata(writeitem) if self.should_nanny: # Only invoke the nanny if the should_nanny flag is set. - nanny.tattle_quantity('lograte',writeamt) + #nanny.tattle_quantity('lograte',writeamt) + pass finally: self.writelock.release() @@ -108,7 +109,8 @@ def writelines(self, writelist): if self.should_nanny: # Only invoke the nanny if the should_nanny flag is set. # block if already over - nanny.tattle_quantity('lograte',0) + #nanny.tattle_quantity('lograte',0) + pass writeamt = 0 for writeitem in writelist: @@ -116,7 +118,8 @@ def writelines(self, writelist): if self.should_nanny: # Only invoke the nanny if the should_nanny flag is set. - nanny.tattle_quantity('lograte',writeamt) + #nanny.tattle_quantity('lograte',writeamt) + pass finally: self.writelock.release() diff --git a/loggingrepy_core.py b/loggingrepy_core.py index b1ee733b..87500d11 100755 --- a/loggingrepy_core.py +++ b/loggingrepy_core.py @@ -15,7 +15,7 @@ import threading # I need to rename file so that the checker doesn't complain... -myfile = file +#file = file # used to make stdout flush as written This is private to my code @@ -61,7 +61,7 @@ def writelines(self,writelist): # helper function def get_size(fn): - fo = myfile(fn,"r") + fo = file(fn,"r") data = fo.read() fo.close() return len(data) @@ -111,7 +111,7 @@ def __init__(self, fnp, mbs = 16 * 1024): # the old file exists too (the common case) self.currentsize = get_size(self.newfn) - self.activefo = myfile(self.newfn,"a") + self.activefo = file(self.newfn,"a") self.first = False # now we have the fileobject and the size set up. We're ready... return @@ -121,7 +121,7 @@ def __init__(self, fnp, mbs = 16 * 1024): # copied over os.rename(self.newfn, self.oldfn) self.currentsize = 0 - self.activefo = myfile(self.newfn,"w") + self.activefo = file(self.newfn,"w") self.first = False return @@ -131,7 +131,7 @@ def __init__(self, fnp, mbs = 16 * 1024): # the old file name exists, so we should start from here self.currentsize = get_size(self.oldfn) - self.activefo = myfile(self.oldfn,"a") + self.activefo = file(self.oldfn,"a") self.first = True # now we have the fileobject and the size set up. We're ready... return @@ -139,7 +139,7 @@ def __init__(self, fnp, mbs = 16 * 1024): else: # starting from nothing... self.currentsize = 0 - self.activefo = myfile(self.oldfn,"w") + self.activefo = file(self.oldfn,"w") self.first = True return @@ -193,12 +193,12 @@ def rotate_log(self): os.remove(self.oldfn) os.rename(self.newfn, self.oldfn) - self.activefo = myfile(self.newfn,"w") + self.activefo = file(self.newfn,"w") def write_first_log(self): self.activefo.close() - self.activefo = myfile(self.newfn,"w") + self.activefo = file(self.newfn,"w") @@ -260,14 +260,14 @@ def writedata(self, data): os.remove(self.oldfn) os.remove(self.newfn) - oldfo = myfile(self.oldfn,"w") + oldfo = file(self.oldfn,"w") # write the data counting backwards from the end of the file oldfo.write(data[-(lastchunk+self.maxbuffersize):-lastchunk]) oldfo.close() # next... - self.activefo = myfile(self.newfn,"w") + self.activefo = file(self.newfn,"w") # now write the last bit of data... self.activefo.write(str(data[-lastchunk:])) diff --git a/namespace.py b/namespace.py index 6768ae4a..df288de1 100644 --- a/namespace.py +++ b/namespace.py @@ -76,11 +76,6 @@ functions that are wrapped and inserted into the user context when wrap_and_insert_api_functions() is called. - FILE_OBJECT_WRAPPER_INFO - LOCK_OBJECT_WRAPPER_INFO - TCP_SOCKET_OBJECT_WRAPPER_INFO - TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO - UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO The above four dictionaries define the methods available on the wrapped @@ -108,19 +103,20 @@ import types -# To check if objects are thread.LockType objects. -import thread +import safe # Used to get SafeDict +import tracebackrepy +import virtual_namespace -import emulcomm +from exception_hierarchy import * import emulfile import emulmisc import emultimer import nonportable -import safe # Used to get SafeDict -import tracebackrepy -import virtual_namespace -from exception_hierarchy import * +# KEVIN: Need to add these? +import emulcomm +import _thread as thread + # Save a copy of a few functions not available at runtime. _saved_getattr = getattr @@ -128,84 +124,14 @@ _saved_hash = hash _saved_id = id - -############################################################################## -# Public functions of this module to be called from the outside. -############################################################################## - -def wrap_and_insert_api_functions(usercontext): - """ - This is the main public function in this module at the current time. It will - wrap each function in the usercontext dict in a wrapper with custom - restrictions for that specific function. These custom restrictions are - defined in the dictionary USERCONTEXT_WRAPPER_INFO. - """ - - _init_namespace() - - for function_name in USERCONTEXT_WRAPPER_INFO: - function_info = USERCONTEXT_WRAPPER_INFO[function_name] - wrapperobj = NamespaceAPIFunctionWrapper(function_info) - usercontext[function_name] = wrapperobj.wrapped_function - - - - - -############################################################################## -# Helper functions for the above public function. -############################################################################## - -# Whether _init_namespace() has already been called. -initialized = False - -def _init_namespace(): - """ - Performs one-time initialization of the namespace module. - """ - global initialized - if not initialized: - initialized = True - _prepare_wrapped_functions_for_object_wrappers() - - - - - -# These dictionaries will ultimately contain keys whose names are allowed -# methods that can be called on the objects and values which are the wrapped -# versions of the functions which are exposed to users. If a dictionary -# is empty, it means no methods can be called on a wrapped object of that type. -file_object_wrapped_functions_dict = {} -lock_object_wrapped_functions_dict = {} -tcp_socket_object_wrapped_functions_dict = {} -tcp_server_socket_object_wrapped_functions_dict = {} -udp_server_socket_object_wrapped_functions_dict = {} -virtual_namespace_object_wrapped_functions_dict = {} - -def _prepare_wrapped_functions_for_object_wrappers(): - """ - Wraps functions that will be used whenever a wrapped object is created. - After this has been called, the dictionaries such as - file_object_wrapped_functions_dict have been populated and therefore can be - used by functions such as wrap_socket_obj(). - """ - objects_tuples = [(FILE_OBJECT_WRAPPER_INFO, file_object_wrapped_functions_dict), - (LOCK_OBJECT_WRAPPER_INFO, lock_object_wrapped_functions_dict), - (TCP_SOCKET_OBJECT_WRAPPER_INFO, tcp_socket_object_wrapped_functions_dict), - (TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO, tcp_server_socket_object_wrapped_functions_dict), - (UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO, udp_server_socket_object_wrapped_functions_dict), - (VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO, virtual_namespace_object_wrapped_functions_dict)] - - for description_dict, wrapped_func_dict in objects_tuples: - for function_name in description_dict: - function_info = description_dict[function_name] - wrapperobj = NamespaceAPIFunctionWrapper(function_info, is_method=True) - wrapped_func_dict[function_name] = wrapperobj.wrapped_function - - - - +# These are the functions in the user's name space excluding the builtins we +# allow. Each function is a key in the dictionary. Each value is a dictionary +# that defines the functions to be used by the wrapper when a call is +# performed. It is the same dictionary that is passed as a constructor to +# the NamespaceAPIFunctionWrapper class to create the actual wrappers. +# The public function wrap_and_insert_api_functions() uses this dictionary as +# the basis for what is populated in the user context. Anything function +# defined here will be wrapped and made available to untrusted user code. ############################################################################## # Helper functions. @@ -215,6 +141,7 @@ def _handle_internalerror(message, exitcode): """ Terminate the running program. This is used rather than tracebackrepy.handle_internalerror directly in order to make testing easier.""" + #print("Message: " + message) tracebackrepy.handle_internalerror(message, exitcode) @@ -234,10 +161,6 @@ def _is_in(obj, sequence): return True return False - - - - ############################################################################## # Constants that define which functions should be wrapped and how. These are # used by the functions wrap_and_insert_api_functions() and @@ -282,10 +205,6 @@ def wrap(self, val): def unwrap(self, val): return val._wrapped__object - - - - class Str(ValueProcessor): """Allows str or unicode.""" @@ -296,7 +215,8 @@ def __init__(self, maxlen=None, minlen=None): def check(self, val): - if not _is_in(type(val), [str, unicode]): + if not _is_in(type(val), [str, bytes]): + #print("We here bro.") raise RepyArgumentError("Invalid type %s" % type(val)) if self.maxlen is not None: @@ -307,10 +227,6 @@ def check(self, val): if len(val) < self.minlen: raise RepyArgumentError("Min string length is %s" % self.minlen) - - - - class Int(ValueProcessor): """Allows int or long.""" @@ -320,7 +236,7 @@ def __init__(self, min=0): def check(self, val): - if not _is_in(type(val), [int, long]): + if not _is_in(type(val), [int]): raise RepyArgumentError("Invalid type %s" % type(val)) if val < self.min: @@ -332,25 +248,17 @@ class NoneOrInt(ValueProcessor): ints.""" def check(self, val): - if val is not None and not _is_in(type(val), [int, long]): + if val is not None and not _is_in(type(val), [int]): raise RepyArgumentError("Invalid type %s" % type(val)) - - - - - class StrOrInt(ValueProcessor): """Allows a string or int. This doesn't enforce max/min/length limits on the strings and ints.""" def check(self, val): - if not _is_in(type(val), [int, long, str, unicode]): + if not _is_in(type(val), [int, str]): raise RepyArgumentError("Invalid type %s" % type(val)) - - - class StrOrNone(ValueProcessor): """Allows str, unicode, or None.""" @@ -371,7 +279,7 @@ def __init__(self, allow_neg=False): def check(self, val): - if not _is_in(type(val), [int, long, float]): + if not _is_in(type(val), [int, float]): raise RepyArgumentError("Invalid type %s" % type(val)) if not self.allow_neg: @@ -390,9 +298,6 @@ def check(self, val): raise RepyArgumentError("Invalid type %s" % type(val)) - - - class ListOfStr(ValueProcessor): """Allows lists of strings. This doesn't enforce max/min/length limits on the strings and ints.""" @@ -427,9 +332,6 @@ def check(self, val): raise RepyArgumentError("Invalid type %s" % type(val)) - - - class DictOfStrOrInt(ValueProcessor): """ Allows a tuple that contains dictionaries that only contain string keys @@ -472,9 +374,6 @@ def copy(self, val): return val - - - class File(ObjectProcessor): """Allows File objects.""" @@ -503,58 +402,6 @@ def check(self, val): def wrap(self, val): return NamespaceObjectWrapper("lock", val, lock_object_wrapped_functions_dict) - - - - -class UDPServerSocket(ObjectProcessor): - """Allows UDPServerSocket objects.""" - - def check(self, val): - if not isinstance(val, emulcomm.UDPServerSocket): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("socket", val, udp_server_socket_object_wrapped_functions_dict) - - - - - -class TCPServerSocket(ObjectProcessor): - """Allows TCPServerSocket objects.""" - - def check(self, val): - if not isinstance(val, emulcomm.TCPServerSocket): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("socket", val, tcp_server_socket_object_wrapped_functions_dict) - - - - - -class TCPSocket(ObjectProcessor): - """Allows TCPSocket objects.""" - - def check(self, val): - if not isinstance(val, emulcomm.EmulatedSocket): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("socket", val, tcp_socket_object_wrapped_functions_dict) - - - - - class VirtualNamespace(ObjectProcessor): """Allows VirtualNamespace objects.""" @@ -569,9 +416,6 @@ def wrap(self, val): virtual_namespace_object_wrapped_functions_dict) - - - class SafeDict(ValueProcessor): """Allows SafeDict objects.""" @@ -595,44 +439,7 @@ def check(self, val): SafeDict().check(val) - - - -# These are the functions in the user's name space excluding the builtins we -# allow. Each function is a key in the dictionary. Each value is a dictionary -# that defines the functions to be used by the wrapper when a call is -# performed. It is the same dictionary that is passed as a constructor to -# the NamespaceAPIFunctionWrapper class to create the actual wrappers. -# The public function wrap_and_insert_api_functions() uses this dictionary as -# the basis for what is populated in the user context. Anything function -# defined here will be wrapped and made available to untrusted user code. USERCONTEXT_WRAPPER_INFO = { - 'gethostbyname' : - {'func' : emulcomm.gethostbyname, - 'args' : [Str()], - 'return' : Str()}, - 'getmyip' : - {'func' : emulcomm.getmyip, - 'args' : [], - 'return' : Str()}, - 'sendmessage' : - {'func' : emulcomm.sendmessage, - 'args' : [Str(), Int(), Str(), Str(), Int()], - 'return' : Int()}, - 'listenformessage' : - {'func' : emulcomm.listenformessage, - 'args' : [Str(), Int()], - 'return' : UDPServerSocket()}, - 'openconnection' : - {'func' : emulcomm.openconnection, - 'args' : [Str(), Int(), Str(), Int(), Float()], -# 'raise' : [AddressBindingError, PortRestrictedError, PortInUseError, -# ConnectionRefusedError, TimeoutError, RepyArgumentError], - 'return' : TCPSocket()}, - 'listenforconnection' : - {'func' : emulcomm.listenforconnection, - 'args' : [Str(), Int()], - 'return' : TCPServerSocket()}, 'openfile' : {'func' : emulfile.emulated_open, 'args' : [Str(maxlen=120), Bool()], @@ -706,44 +513,8 @@ def check(self, val): 'return' : None}, } -TCP_SOCKET_OBJECT_WRAPPER_INFO = { - 'close' : - {'func' : emulcomm.EmulatedSocket.close, - 'args' : [], - 'return' : Bool()}, - 'recv' : - {'func' : emulcomm.EmulatedSocket.recv, - 'args' : [Int(min=1)], - 'return' : Str()}, - 'send' : - {'func' : emulcomm.EmulatedSocket.send, - 'args' : [Str()], - 'return' : Int(min=0)}, -} - # TODO: Figure out which real object should be wrapped. It doesn't appear # to be implemented yet as there is no "getconnection" in the repy_v2 source. -TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO = { - 'close' : - {'func' : emulcomm.TCPServerSocket.close, - 'args' : [], - 'return' : Bool()}, - 'getconnection' : - {'func' : emulcomm.TCPServerSocket.getconnection, - 'args' : [], - 'return' : (Str(), Int(), TCPSocket())}, -} - -UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO = { - 'close' : - {'func' : emulcomm.UDPServerSocket.close, - 'args' : [], - 'return' : Bool()}, - 'getmessage' : - {'func' : emulcomm.UDPServerSocket.getmessage, - 'args' : [], - 'return' : (Str(), Int(), Str())}, -} LOCK_OBJECT_WRAPPER_INFO = { 'acquire' : @@ -770,6 +541,76 @@ def check(self, val): 'return' : SafeDict()}, } +############################################################################## +# Public functions of this module to be called from the outside. +############################################################################## + +def wrap_and_insert_api_functions(usercontext): + """ + This is the main public function in this module at the current time. It will + wrap each function in the usercontext dict in a wrapper with custom + restrictions for that specific function. These custom restrictions are + defined in the dictionary USERCONTEXT_WRAPPER_INFO. + """ + + _init_namespace() + + for function_name in USERCONTEXT_WRAPPER_INFO: + function_info = USERCONTEXT_WRAPPER_INFO[function_name] + wrapperobj = NamespaceAPIFunctionWrapper(function_info) + usercontext[function_name] = wrapperobj.wrapped_function + + + + + +############################################################################## +# Helper functions for the above public function. +############################################################################## + +# Whether _init_namespace() has already been called. +initialized = False + +def _init_namespace(): + """ + Performs one-time initialization of the namespace module. + """ + global initialized + if not initialized: + initialized = True + _prepare_wrapped_functions_for_object_wrappers() + + + + + +# These dictionaries will ultimately contain keys whose names are allowed +# methods that can be called on the objects and values which are the wrapped +# versions of the functions which are exposed to users. If a dictionary +# is empty, it means no methods can be called on a wrapped object of that type. +file_object_wrapped_functions_dict = {} +virtual_namespace_object_wrapped_functions_dict = {} +lock_object_wrapped_functions_dict = {} + +def _prepare_wrapped_functions_for_object_wrappers(): + """ + Wraps functions that will be used whenever a wrapped object is created. + After this has been called, the dictionaries such as + file_object_wrapped_functions_dict have been populated and therefore can be + used by functions such as wrap_socket_obj(). + """ + #objects_tuples = [ + # (VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO, virtual_namespace_object_wrapped_functions_dict)] + objects_tuples = [(FILE_OBJECT_WRAPPER_INFO, file_object_wrapped_functions_dict), + (LOCK_OBJECT_WRAPPER_INFO, lock_object_wrapped_functions_dict), + (VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO, virtual_namespace_object_wrapped_functions_dict)] + + for description_dict, wrapped_func_dict in objects_tuples: + for function_name in description_dict: + function_info = description_dict[function_name] + wrapperobj = NamespaceAPIFunctionWrapper(function_info, is_method=True) + wrapped_func_dict[function_name] = wrapperobj.wrapped_function + ############################################################################## # The classes we define from which actual wrappers are instantiated. @@ -811,9 +652,12 @@ def _copy(obj, objectmap=None): # types.InstanceType is included because the user can provide an instance # of a class of their own in the list of callback args to settimer. - if _is_in(type(obj), [str, unicode, int, long, float, complex, bool, frozenset, - types.NoneType, types.FunctionType, types.LambdaType, - types.MethodType, types.InstanceType]): + # KEVIN: Had to replace this with object for python3. Not sure if this works. + if _is_in(type(obj), [str, int, float, complex, bool, frozenset, + type(None), types.FunctionType, types.LambdaType, + types.MethodType, object,#]): + bytes, safe.SafeDict]): # KEVIN: had to add bytes? + #types.MethodType, types.InstanceType]): return obj elif type(obj) is list: @@ -892,13 +736,9 @@ def _copy(obj, objectmap=None): else: raise TypeError("_copy is not implemented for objects of type " + str(type(obj))) - except Exception, e: + except Exception as e: raise NamespaceInternalError("_copy failed on " + str(obj) + " with message " + str(e)) - - - - class NamespaceInternalError(Exception): """Something went wrong and we should terminate.""" @@ -910,7 +750,7 @@ class NamespaceObjectWrapper(object): """ Instances of this class are used to wrap handles and objects returned by api functions to the user code. - + The methods that can be called on these instances are mostly limited to what is in the allowed_functions_dict passed to the constructor. The exception is that a simple __repr__() is defined as well as an __iter__() @@ -943,8 +783,6 @@ def __init__(self, wrapped_type_name, wrapped_object, allowed_functions_dict): self._wrapped__object = wrapped_object self._wrapped__allowed_functions_dict = allowed_functions_dict - - def __getattr__(self, name): """ When a method is called on an instance, we look for the method in the @@ -963,7 +801,7 @@ def __do_func_call(*args, **kwargs): else: # This is the standard way of handling "it doesn't exist as far as we # are concerned" in __getattr__() methods. - raise AttributeError, name + raise AttributeError(name) @@ -976,8 +814,6 @@ def __iter__(self): """ return self - - def next(self): """ We provide next() as part of the class rather than through __getattr__ @@ -1021,9 +857,6 @@ def __ne__(self, other): """ return _saved_hash(self) != _saved_hash(other) - - - class NamespaceAPIFunctionWrapper(object): """ Instances of this class exist solely to provide function wrapping. This is @@ -1060,6 +893,7 @@ def __init__(self, func_dict, is_method=False): self.__args = func_dict["args"] self.__return = func_dict["return"] self.__is_method = is_method + #print(str(self.__func) + " " + str(self.__return)) # Make sure that the __target_func really is a function or a string # indicating a function by that name on the underlying object should @@ -1102,8 +936,6 @@ def _process_args(self, args): return args_to_return - - def _process_retval_helper(self, processor, retval): try: if isinstance(processor, ValueProcessor): @@ -1120,7 +952,7 @@ def _process_retval_helper(self, processor, retval): raise InternalRepyError("Unknown retval expectation.") return tempretval - except RepyArgumentError, err: + except RepyArgumentError as err: raise InternalRepyError("Invalid retval type: %s" % err) @@ -1140,7 +972,7 @@ def _process_retval(self, retval): else: tempretval = self._process_retval_helper(self.__return, retval) - except Exception, e: + except Exception as e: raise InternalRepyError( "Function '" + self.__func_name + "' returned with unallowed return type " + str(type(retval)) + " : " + str(e)) @@ -1148,8 +980,6 @@ def _process_retval(self, retval): return tempretval - - def wrapped_function(self, *args, **kwargs): """ @@ -1170,6 +1000,7 @@ def wrapped_function(self, *args, **kwargs): Anything that the underlying function may return. """ + #print("In wrapped functions") try: # We don't allow keyword args. if kwargs: @@ -1177,18 +1008,22 @@ def wrapped_function(self, *args, **kwargs): self.__func_name) if self.__is_method: + #print("Is a method") # This is a method of an object instance rather than a standalone function. # The "self" argument will be passed implicitly by python in some cases, so # we remove it from the args we check. For the others, we'll add it back in # after the check. args_to_check = args[1:] else: + #print("Not a method") args_to_check = args if len(args_to_check) != len(self.__args): + #print("Length mixmatch") if not self.__args or not isinstance(self.__args[-1:][0], NonCopiedVarArgs): - raise RepyArgumentError("Function '" + self.__func_name + - "' takes " + str(len(self.__args)) + " arguments, not " + + #print("IsInstance") + raise RepyArgumentError("Function '" + self.__func_name + + "' takes " + str(len(self.__args)) + " arguments, not " + str(len(args_to_check)) + " as you provided.") args_copy = self._process_args(args_to_check) @@ -1200,24 +1035,31 @@ def wrapped_function(self, *args, **kwargs): # object. We use this if the function to wrap isn't available without # having the object around, such as with real lock objects. if type(self.__func) is str: + #print("Type of function is str") func_to_call = _saved_getattr(args[0], self.__func) args_to_use = args_copy else: + #print("Type of function not str") func_to_call = self.__func if self.__is_method: + #print("self.__is_method is True") # Sanity check the object we're adding back in as the "self" argument. if not isinstance(args[0], (NamespaceObjectWrapper, emulfile.emulated_file, emulcomm.EmulatedSocket, emulcomm.TCPServerSocket, emulcomm.UDPServerSocket, thread.LockType, virtual_namespace.VirtualNamespace)): + #print("Not an instance, raise error") raise NamespaceInternalError("Wrong type for 'self' argument.") # If it's a method but the function was not provided as a string, we # actually do have to add the first argument back in. Yes, this whole # area of code is ugly. args_to_use = [args[0]] + args_copy else: + #print("self.__is_method is False") args_to_use = args_copy - + + #print("Here at " + str(func_to_call)) + retval = func_to_call(*args_to_use) return self._process_retval(retval) @@ -1226,6 +1068,7 @@ def wrapped_function(self, *args, **kwargs): # TODO: this should be changed to RepyError along with all references to # RepyException in the rest of the repy code. # We allow any RepyError to continue up to the client code. + #print("Got RepyException") raise except: @@ -1236,10 +1079,12 @@ def wrapped_function(self, *args, **kwargs): # crash the sandbox despite being wrapped in `try`/`except`, # see SeattleTestbed/repy_v2#132.) if type(args[0]) == virtual_namespace.VirtualNamespace: + #print("Got a VirtrualNamespace arg error thing") raise # Non-`RepyException`s outside of `VirtualNamespace` methods # are unexpected and indicative of a programming error on # our side, so we terminate. + #print("Calling _handle_internalerror") _handle_internalerror("Unexpected exception from within Repy API", 843) diff --git a/nanny.py b/nanny.py index 027e8b06..f1575680 100755 --- a/nanny.py +++ b/nanny.py @@ -89,7 +89,7 @@ def _sleep_until_resource_drains(resource, resourcesalloweddict, resourcesuseddi # It'll never drain! if resourcesalloweddict[resource] == 0: - raise InternalRepyError, "Resource '"+resource+"' limit set to 0, won't drain!" + raise InternalRepyError("Resource '"+resource+"' limit set to 0, won't drain!") # We may need to go through this multiple times because other threads may @@ -263,7 +263,7 @@ def _tattle_add_item(resource, item, resourcesalloweddict, resourcesuseddict): return if len(resourcesuseddict[resource]) > resourcesalloweddict[resource]: - raise InternalRepyError, "Should not be able to exceed resource count" + raise InternalRepyError("Should not be able to exceed resource count") if len(resourcesuseddict[resource]) == resourcesalloweddict[resource]: # it's clobberin time! @@ -530,3 +530,4 @@ def get_resource_information(): return (resource_limit_dict, resource_use_dict) + diff --git a/nix_common_api.py b/nix_common_api.py index a856720c..2d499488 100644 --- a/nix_common_api.py +++ b/nix_common_api.py @@ -14,6 +14,7 @@ import portable_popen # Seattlelib text-processing library (not a Python stdlib): +# KEVIN: Replace. import textops # Get the standard library @@ -174,3 +175,4 @@ def get_available_interfaces(): # Done, return the interfaces return interfaces_list + diff --git a/nmstatusinterface.py b/nmstatusinterface.py index c92f106f..29f00218 100644 --- a/nmstatusinterface.py +++ b/nmstatusinterface.py @@ -62,7 +62,7 @@ def init(stopfile=None, statusfile=None, freq=1): # Check for the stopfile if stopfile != None and os.path.exists(stopfile): - raise Exception, "Stop file already exists! File:"+stopfile + raise Exception("Stop file already exists! File:"+stopfile) # Assign the values stopfilename = stopfile @@ -191,15 +191,15 @@ def run(self): # ThreadErr cannot be specified externally, since it has side-affects # such as changing global thread restrictions if exitcode == 56: - raise Exception, "ThreadErr exit code specified. Exit code not allowed." + raise Exception("ThreadErr exit code specified. Exit code not allowed.") # Print the message, then call harshexit with the exitcode if mesg != "": - print mesg + print(mesg) _stopfile_exit(exitcode, self.repy_process_id) else: - raise Exception, "Stopfile has no content." + raise Exception("Stopfile has no content.") except: # On any issue, just do "Stopped" (44) @@ -210,3 +210,4 @@ def run(self): + diff --git a/nonportable.py b/nonportable.py index 64af9def..7e0a27d4 100755 --- a/nonportable.py +++ b/nonportable.py @@ -112,7 +112,7 @@ def preparesocket(socketobject): pass else: - raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" + raise UnsupportedSystemException("Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")") # Armon: Also launches the nmstatusinterface thread. @@ -134,7 +134,7 @@ def monitor_cpu_disk_and_mem(): # process, so pass None instead of a process id. nmstatusinterface.launch(None) else: - raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" + raise UnsupportedSystemException("Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")") @@ -193,7 +193,7 @@ def getruntime(): # If the difference is less than 1 second, that is okay, since # The boot time is only precise to 1 second if (last_uptime - uptime) > 1: - raise EnvironmentError, "Uptime is going backwards!" + raise EnvironmentError("Uptime is going backwards!") else: # Use the last uptime uptime = last_uptime @@ -213,11 +213,11 @@ def getruntime(): runtimelock.release() # Time.clock returns elapsedtime since the first call to it, so this works for us - return time.clock() + return time.pref_counter() # Who knows... else: - raise EnvironmentError, "Unsupported Platform!" + raise EnvironmentError("Unsupported Platform!") # Current uptime minus start time runtime = uptime - starttime @@ -257,7 +257,7 @@ def getruntime(): get_resources_lock = threading.Lock() # Cache the disk used from the external process -cached_disk_used = 0L +cached_disk_used = 0 # This array holds the times that repy was stopped. # It is an array of tuples, of the form (time, amount) @@ -382,14 +382,14 @@ def run(self): if memused > nanny.get_resource_limit("memory"): # We will be killed by the other thread... - raise Exception, "Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'" + raise Exception("Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'") # Check if we should check the disk if (counter % disk_to_memory_ratio) == 0: # Check diskused diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR) if diskused > nanny.get_resource_limit("diskused"): - raise Exception, "Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'" + raise Exception("Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'") # Sleep until the next iteration of checking the memory time.sleep(memory_check_interval) @@ -401,7 +401,7 @@ def run(self): except: tracebackrepy.handle_exception() - print >> sys.stderr, "Nanny died! Trying to kill everything else" + print("Nanny died! Trying to kill everything else", file=sys.stderr) harshexit.harshexit(20) @@ -506,7 +506,7 @@ def run(self): except: tracebackrepy.handle_exception() - print >> sys.stderr, "CPU Nanny died! Trying to kill everything else" + print("CPU Nanny died! Trying to kill everything else", file=sys.stderr) harshexit.harshexit(25) @@ -520,8 +520,8 @@ def run(self): # This method handles messages on the "diskused" channel from # the external process. When the external process measures disk used, # it is piped in and cached for calls to getresources. -def IPC_handle_diskused(bytes): - cached_disk_used = bytes +def IPC_handle_diskused(some_bytes): + cached_disk_used = some_bytes # This method handles messages on the "repystopped" channel from @@ -577,10 +577,10 @@ def write_message_to_pipe(writehandle, channel, data): # Send this index = 0 while index < len(mesg): - bytes = os.write(writehandle, mesg[index:]) - if bytes == 0: - raise EnvironmentError, "Write send 0 bytes! Pipe broken!" - index += bytes + some_bytes = os.write(writehandle, mesg[index:]) + if some_bytes == 0: + raise EnvironmentError("Write send 0 bytes! Pipe broken!") + index += some_bytes # Armon: Method to read a message from the pipe, used for IPC. @@ -614,7 +614,7 @@ def read_message_from_pipe(readhandle): # Read 8 bytes at a time mesg = os.read(readhandle,8) if len(mesg) == 0: - raise EnvironmentError, "Read returned empty string! Pipe broken!" + raise EnvironmentError("Read returned empty string! Pipe broken!") data += mesg # Increment the index while there is data and we have not found a colon @@ -633,7 +633,7 @@ def read_message_from_pipe(readhandle): while more_data > 0: mesg = os.read(readhandle, more_data) if len(mesg) == 0: - raise EnvironmentError, "Read returned empty string! Pipe broken!" + raise EnvironmentError("Read returned empty string! Pipe broken!") data += mesg more_data -= len(mesg) @@ -676,7 +676,7 @@ def run(self): # Read a message try: mesg = read_message_from_pipe(self.readhandle) - except Exception, e: + except Exception as e: break # Check for a handler function @@ -687,14 +687,14 @@ def run(self): # Print a message if there is a message on an unknown channel else: - print "[WARN] Message on unknown channel from parent process:", mesg[0] + print("[WARN] Message on unknown channel from parent process:" + mesg[0]) ### We only leave the loop on a fatal error, so we need to exit now # Write out status information, our parent would do this, but its dead. statusstorage.write_status("Terminated") - print >> sys.stderr, "Monitor process died! Terminating!" + print("Monitor process died! Terminating!", file=sys.stderr) harshexit.harshexit(70) @@ -735,7 +735,7 @@ def do_forked_resource_monitor(): # Small internal error handler function def _internal_error(message): try: - print >> sys.stderr, message + print(message, file=sys.stderr) sys.stderr.flush() except: pass @@ -760,12 +760,12 @@ def _internal_error(message): # Launch the resource monitor, if it fails determine why and restart if necessary resource_monitor(childpid, writehandle) - except ResourceException, exp: + except ResourceException as exp: # Repy exceeded its resource limit, kill it _internal_error(str(exp)+" Impolitely killing child!") harshexit.harshexit(98) - except Exception, exp: + except Exception as exp: # There is some general error... try: (pid, status) = os.waitpid(childpid,os.WNOHANG) @@ -866,7 +866,7 @@ def resource_monitor(childpid, pipe_handle): # Check if it is using too much memory if memused > nanny.get_resource_limit("memory"): - raise ResourceException, "Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'." + raise ResourceException("Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'.") ########### End Check Memory ########### # @@ -884,7 +884,7 @@ def resource_monitor(childpid, pipe_handle): # Raise exception if we are over limit if diskused > nanny.get_resource_limit("diskused"): - raise ResourceException, "Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'." + raise ResourceException("Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'.") # Send the disk usage information, raw bytes used write_message_to_pipe(pipe_handle, "diskused", diskused) @@ -949,14 +949,14 @@ def calculate_granularity(): import windows_api as os_api else: # This is a non-supported OS - raise UnsupportedSystemException, "The current Operating System is not supported! Fatal Error." + raise UnsupportedSystemException("The current Operating System is not supported! Fatal Error.") # Set granularity calculate_granularity() # For Windows, we need to initialize time.clock() if ostype in ["Windows"]: - time.clock() + time.perf_counter() # Initialize getruntime for other platforms else: @@ -968,3 +968,4 @@ def calculate_granularity(): elapsedtime = 0 + diff --git a/portable_popen.py b/portable_popen.py index 15a944a9..3c53a777 100644 --- a/portable_popen.py +++ b/portable_popen.py @@ -29,3 +29,4 @@ def Popen(args): # Everything else return subprocess.Popen(args, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + diff --git a/repy.py b/repy.py index ef2c9cc9..0e6a5f69 100755 --- a/repy.py +++ b/repy.py @@ -1,4 +1,5 @@ """ + irint(str(newcontext)) Justin Cappos Ivan Beschastnikh (12/24/08) -- added usage @@ -40,8 +41,6 @@ --servicelog : Enable usage of the servicelogger for internal errors """ - - import os import sys import time @@ -51,12 +50,12 @@ # Relative imports # First make sure the version of python is supported -import checkpythonversion -checkpythonversion.ensure_python_version_is_supported() +#import checkpythonversion +#checkpythonversion.ensure_python_version_is_supported() import safe import nanny -import emulcomm +#import emulcomm import idhelper import harshexit import namespace @@ -126,7 +125,7 @@ def execute_namespace_until_completion(thisnamespace, thiscontext): event_id = idhelper.getuniqueid() try: nanny.tattle_add_item('events', event_id) - except Exception, e: + except Exception as e: tracebackrepy.handle_internalerror("Failed to acquire event for '" + \ "initialize' event.\n(Exception was: %s)" % e.message, 140) @@ -183,22 +182,22 @@ def add_repy_options(parser): """Adds the Repy command-line options to the specified optparser """ - parser.add_option('--ip', - action="append", type="string", dest="ip" , - help="Explicitly allow Repy to bind to the specified IP. This option can be used multiple times." - ) + #parser.add_option('--ip', + # action="append", type="string", dest="ip" , + # help="Explicitly allow Repy to bind to the specified IP. This option can be used multiple times." + # ) parser.add_option('--execinfo', action="store_true", dest="execinfo", default=False, help="Display information regarding the current execution state." ) - parser.add_option('--iface', - action="append", type="string", dest="interface", - help="Explicitly allow Repy to bind to the specified interface. This option can be used multiple times." - ) - parser.add_option('--nootherips', - action="store_true", dest="nootherips",default=False, - help="Do not allow IPs or interfaces that are not explicitly specified" - ) + #parser.add_option('--iface', + # action="append", type="string", dest="interface", + # help="Explicitly allow Repy to bind to the specified interface. This option can be used multiple times." + # ) + #parser.add_option('--nootherips', + # action="store_true", dest="nootherips",default=False, + # help="Do not allow IPs or interfaces that are not explicitly specified" + # ) parser.add_option('--logfile', action="store", type="string", dest="logfile", help="Set up a circular log buffer and output to logfile" @@ -224,28 +223,28 @@ def parse_options(options): """ Parse the specified options and initialize all required structures Note: This modifies global state, specifically, the emulcomm module """ - if options.ip: - emulcomm.user_ip_interface_preferences = True + #if options.ip: + # emulcomm.user_ip_interface_preferences = True # Append this ip to the list of available ones if it is new - for ip in options.ip: - if (True, ip) not in emulcomm.user_specified_ip_interface_list: - emulcomm.user_specified_ip_interface_list.append((True, ip)) + # for ip in options.ip: + # if (True, ip) not in emulcomm.user_specified_ip_interface_list: + # emulcomm.user_specified_ip_interface_list.append((True, ip)) - if options.interface: - emulcomm.user_ip_interface_preferences = True + #if options.interface: + # emulcomm.user_ip_interface_preferences = True # Append this interface to the list of available ones if it is new - for interface in options.interface: - if (False, interface) not in emulcomm.user_specified_ip_interface_list: - emulcomm.user_specified_ip_interface_list.append((False, interface)) + # for interface in options.interface: + # if (False, interface) not in emulcomm.user_specified_ip_interface_list: + # emulcomm.user_specified_ip_interface_list.append((False, interface)) # Check if they have told us to only use explicitly allowed IP's and interfaces - if options.nootherips: + #if options.nootherips: # Set user preference to True - emulcomm.user_ip_interface_preferences = True + # emulcomm.user_ip_interface_preferences = True # Disable nonspecified IP's - emulcomm.allow_nonspecified_ips = False + # emulcomm.allow_nonspecified_ips = False # set up the circular log buffer... # Armon: Initialize the circular logger before starting the nanny @@ -289,98 +288,162 @@ def initialize_nanny(resourcefn): nonportable.monitor_cpu_disk_and_mem() # JAC: I believe this is needed for interface / ip-based restrictions - emulcomm.update_ip_cache() - +# emulcomm.update_ip_cache() + + + +#def main(): +# # JAC: This function should be kept as stable if possible. Others who +# # extend Repy may be doing essentially the same thing in their main and +# # your changes may not be reflected there! +# +# +# # Armon: The CMD line path to repy is the first argument +# repy_location = sys.argv[0] +# +# # Get the directory repy is in +# repy_directory = os.path.dirname(repy_location) +# +# init_repy_location(repy_directory) +# +# +# ### PARSE OPTIONS. These are command line in our case, but could be from +# ### anywhere if this is repurposed... +# usage = "USAGE: repy.py [options] resource_file program_to_run.r2py [program args]" +# parser = optparse.OptionParser(usage=usage) +# +# # Set optparse to stop parsing arguments on the first non-option arg. We +# # need this so that command-line args to the sandboxed Repy program don't +# # clash or get confused with args to the sandbox (repy.py) itself. +# # See also SeattleTestbed/repy_v2#101 . +# # (Per the USAGE string above, the user program name is the first +# # non-option argument which causes parsing to stop.) +# parser.disable_interspersed_args() +# +# add_repy_options(parser) +# options, args = parser.parse_args() +# +# if len(args) < 2: +# print("Repy requires a resource file and the program to run!") +# parser.print_help() +# sys.exit(1) +# +# resourcefn = args[0] +# progname = args[1] +# progargs = args[2:] +# +# # Do a huge amount of initialization. +# parse_options(options) +# +# ### start resource restrictions, etc. for the nanny +# #initialize_nanny(resourcefn) +# +# # Read the user code from the file +# try: +# filehandle = open(progname) +# usercode = filehandle.read() +# filehandle.close() +# except: +# print("FATAL ERROR: Unable to read the specified program file: '%s'" % (progname)) +# sys.exit(1) +# +# # create the namespace... +# try: +# newnamespace = virtual_namespace.VirtualNamespace(usercode, progname) +# except CodeUnsafeError as e: +# print("Specified repy program is unsafe!") +# print("Static-code analysis failed with error: "+str(e)) +# harshexit.harshexit(5) +# +# # allow the (potentially large) code string to be garbage collected +# del usercode +# +# +# +# # Insert program log separator and execution information +# if options.execinfo: +# print('=' * 40) +# print("Running program: " + progname) +# print("Arguments: " +progargs) +# print('=' * 40) +# +# +# +# # get a new namespace +# newcontext = get_safe_context(progargs) +# +# # one could insert a new function for repy code here by changing newcontext +# # to contain an additional function. +# +# # run the code to completion... +# execute_namespace_until_completion(newnamespace, newcontext) +# +# # No more pending events for the user thread, we exit +# harshexit.harshexit(0) def main(): - # JAC: This function should be kept as stable if possible. Others who - # extend Repy may be doing essentially the same thing in their main and - # your changes may not be reflected there! + repy_location = sys.argv[0] + repy_directory = os.path.dirname(repy_location) + init_repy_location(repy_directory) - # Armon: The CMD line path to repy is the first argument - repy_location = sys.argv[0] + usage = "USAGE: repy.py [options] program_to_run.r2py [program args]" + parser = optparse.OptionParser(usage=usage) - # Get the directory repy is in - repy_directory = os.path.dirname(repy_location) - - init_repy_location(repy_directory) - - - ### PARSE OPTIONS. These are command line in our case, but could be from - ### anywhere if this is repurposed... - usage = "USAGE: repy.py [options] resource_file program_to_run.r2py [program args]" - parser = optparse.OptionParser(usage=usage) - - # Set optparse to stop parsing arguments on the first non-option arg. We - # need this so that command-line args to the sandboxed Repy program don't - # clash or get confused with args to the sandbox (repy.py) itself. - # See also SeattleTestbed/repy_v2#101 . - # (Per the USAGE string above, the user program name is the first - # non-option argument which causes parsing to stop.) - parser.disable_interspersed_args() - - add_repy_options(parser) - options, args = parser.parse_args() - - if len(args) < 2: - print "Repy requires a resource file and the program to run!" - parser.print_help() - sys.exit(1) - - resourcefn = args[0] - progname = args[1] - progargs = args[2:] - - # Do a huge amount of initialization. - parse_options(options) - - ### start resource restrictions, etc. for the nanny - initialize_nanny(resourcefn) + parser.disable_interspersed_args() - # Read the user code from the file - try: - filehandle = open(progname) - usercode = filehandle.read() - filehandle.close() - except: - print "FATAL ERROR: Unable to read the specified program file: '%s'" % (progname) - sys.exit(1) + add_repy_options(parser) + options, args = parser.parse_args() - # create the namespace... - try: - newnamespace = virtual_namespace.VirtualNamespace(usercode, progname) - except CodeUnsafeError, e: - print "Specified repy program is unsafe!" - print "Static-code analysis failed with error: "+str(e) - harshexit.harshexit(5) + if len(args) < 1: + print("Repy requires a program to run!") + parser.print_help() + sys.exit(1) - # allow the (potentially large) code string to be garbage collected - del usercode + #resourcefn = args[0] + progname = args[0] + progargs = args[1:] + parse_options(options) + + nmstatusinterface.launch(None) + try: + filehandle = open(progname) + usercode = filehandle.read() + filehandle.close() + except: + print(f"FATAL ERROR: Unable to read the specified program file: {progname}") + sys.exit(1) - # Insert program log separator and execution information - if options.execinfo: - print '=' * 40 - print "Running program:", progname - print "Arguments:", progargs - print '=' * 40 + try: + newnamespace = virtual_namespace.VirtualNamespace(usercode, progname) + except CodeUnsafeError as e: + print("Specified repy program is unsafe!") + print("Static-code analysis failed with error: " + str(e)) + harshexit.harshexit(5) + del usercode + if options.execinfo: + print("="*40) + print("Running program: " + progname) + print("Arguments: " + progargs) + print("="*40) - # get a new namespace - newcontext = get_safe_context(progargs) + newcontext = get_safe_context(progargs) - # one could insert a new function for repy code here by changing newcontext - # to contain an additional function. + try: + newnamespace.evaluate(newcontext) + except SystemExit: + raise + except: - # run the code to completion... - execute_namespace_until_completion(newnamespace, newcontext) + tracebackrepy.handle_exception() + harshexit.harshexit(6) + harshexit.harshexit(0) - # No more pending events for the user thread, we exit - harshexit.harshexit(0) @@ -392,3 +455,4 @@ def main(): except: tracebackrepy.handle_exception() harshexit.harshexit(3) + diff --git a/repy_constants.py b/repy_constants.py index 01aa3251..25db81ee 100644 --- a/repy_constants.py +++ b/repy_constants.py @@ -48,3 +48,4 @@ "169.229.131.81", # Berkley "140.142.12.202"] # Univ. of Washington + diff --git a/resourcemanipulation.py b/resourcemanipulation.py index dcdf5368..1862c426 100644 --- a/resourcemanipulation.py +++ b/resourcemanipulation.py @@ -277,12 +277,12 @@ def write_resourcedict_to_file(resourcedict, filename, call_list=None): for resource in resourcedict: if type(resourcedict[resource]) == set: for item in resourcedict[resource]: - print >> outfo, "resource "+resource+" "+str(item) + print("resource "+resource+" "+str(item), file=outfo) else: - print >> outfo, "resource "+resource+" "+str(resourcedict[resource]) + print("resource "+resource+" "+str(resourcedict[resource]), file=outfo) if call_list: - print >> outfo, '\n' + str(call_list) + print('\n' + str(call_list), file=outfo) outfo.close() diff --git a/safe.py b/safe.py index ea258de6..7cce783b 100755 --- a/safe.py +++ b/safe.py @@ -82,31 +82,32 @@ import sys # This is to get sys.executable to launch the external process import time # This is to sleep -# Currently required to filter out Android-specific debug messages, -# see SeattleTestbed/attic#1080 and safe_check() below. try: - import android - IS_ANDROID = True + import compiler # Required for the code safety check except ImportError: - IS_ANDROID = False + import ast as compiler -# Hide the DeprecationWarning for compiler -import warnings -warnings.simplefilter('ignore') -import compiler # Required for the code safety check -warnings.resetwarnings() -import UserDict # This is to get DictMixin +#import UserDict +#class MyMixin(UserDict.DictMixin): +# pass +#except ImportError: +from collections import MutableMapping as DictMixin +from collections import UserDict +# class MyMixin(UserDict, DictMixin): +# pass import platform # This is for detecting Nokia tablets import threading # This is to get a lock -import harshexit # This is to kill the external process on timeout import subprocess # This is to start the external process -import __builtin__ -import nonportable # This is to get the current runtime -import repy_constants # This is to get our start-up directory -import exception_hierarchy # For exception classes -import encoding_header # Subtract len(ENCODING_HEADER) from error line numbers. +try: + import __builtin__ as builtins +except ImportError: + import builtins + +import six +import harshexit # This is to kill the external process on timeout +import exception_hierarchy # For exception classes # Fix to make repy compatible with Python 2.7.2 on Ubuntu 11.10, # see SeattleTestbed/repy_v2#24. @@ -189,7 +190,7 @@ def _is_string_safe(token): """ # If it's not a string, return True - if type(token) is not str and type(token) is not unicode: + if type(token) is not str and type(token) is not str: return True # If the string is explicitly allowed, return True @@ -219,48 +220,56 @@ def _is_string_safe(token): 'LeftShift', 'List', 'ListComp', 'ListCompFor', 'ListCompIf', 'Mod', 'Module', 'Mul', 'Name', 'Node', 'Not', 'Or', 'Pass', 'Power', 'Return', 'RightShift', 'Slice', 'Sliceobj', - 'Stmt', 'Sub', 'Subscript', 'Tuple', 'UnaryAdd', 'UnarySub', 'While', + 'Stmt', 'Sub', 'Subscript', 'Tuple', 'UnaryAdd', 'UnarySub', 'While', # New additions 'TryExcept', 'TryFinally', 'Raise', 'ExcepthandlerType', 'Invert', - ] -_NODE_ATTR_OK = ['value'] + # Python 3 additions + 'Expr', 'Call', 'Load', 'Index', 'Str', 'Store', 'Attribute', "Num", "FunctionDef", + "arguments", "arg", "Try", "NameConstant", "ExceptHandler", "BinOp", "Eq", "BoolOp", + "In", "Is", "IsNot", "UnaryOp", "iter", "Iter", "ClassDef", "sorted", "Delete", + "Del", "NotIn", "Starred", "USub", "Gt", "GtE", "NotEq", "Lt", "LtE", "Mult", "Constant", + + # Debugging + "Import", "iter", "input", +] + +if os.environ.get("REPY_ALLOW_UNSAFE_PRINT"): +# XXX: print statements in python 3 are not any of these nodes, so be careufl + _NODE_CLASS_OK.extend(['Printnl', 'Print']) + + +_NODE_ATTR_OK = ['value', "__func"] def _check_node(node): """ - Examines a node, its attributes, and all of its children (recursively) for - safety. A node is safe if it is in _NODE_CLASS_OK and an attribute is safe - if it is not a unicode string and either in _NODE_ATTR_OK or is safe as is - defined by _is_string_safe() + Examines a node, its attributes, and all of its children (recursively) for + safety. A node is safe if it is in _NODE_CLASS_OK and an attribute is safe + if it is not a unicode string and either in _NODE_ATTR_OK or is safe as is + defined by _is_string_safe() - node: A node in an AST - + node: A node in an AST + - CheckNodeException if an unsafe node is used - CheckStrException if an attribute has an unsafe string + CheckNodeException if an unsafe node is used + CheckStrException if an attribute has an unsafe string - None + None """ - # Subtract length of encoding header from traceback line numbers, - # see SeattleTestbed/repy_v2#95. - HEADERSIZE = len(encoding_header.ENCODING_DECLARATION.splitlines()) - - # Proceed with the node check. - if node.__class__.__name__ not in _NODE_CLASS_OK: raise exception_hierarchy.CheckNodeException("Unsafe call '" + - str(node.__class__.__name__) + "' in line " + str(node.lineno - HEADERSIZE)) - - for attribute, value in node.__dict__.iteritems(): - # Don't allow the construction of unicode literals - if type(value) == unicode: - raise exception_hierarchy.CheckStrException("Unsafe string '" + - str(value) + "' in line " + str(node.lineno - HEADERSIZE) + - ", node attribute '" + str(attribute) + "'") + str(node.__class__.__name__) + "' in line " + str(node.lineno)) + + for attribute, value in node.__dict__.items(): + # Don't allow the construction of unicode literals + # if type(value) == unicode: + # raise exception_hierarchy.CheckStrException("Unsafe string '" + + # str(value) + "' in line " + str(node.lineno) + + # ", node attribute '" + str(attribute) + "'") if attribute in _NODE_ATTR_OK: continue @@ -274,30 +283,34 @@ def _check_node(node): # Check the safety of any strings if not _is_string_safe(value): raise exception_hierarchy.CheckStrException("Unsafe string '" + - str(value) + "' in line " + str(node.lineno - HEADERSIZE) + - ", node attribute '" + str(attribute) + "'") + str(value) + "' in line " + str(node.lineno) + + ", node attribute '" + str(attribute) + "'") - for child in node.getChildNodes(): - _check_node(child) + if 'getChildNodes' in dir(node): + for child in node.getChildNodes(): + _check_node(child) + else: + for child in compiler.iter_child_nodes(node): + _check_node(child) def safe_check(code): """ - Takes the code as input, and parses it into an AST. - It then calls _check_node, which does a recursive safety check for every - node. + Takes the code as input, and parses it into an AST. + It then calls _check_node, which does a recursive safety check for every + node. - code: A string representation of python code - + code: A string representation of python code + - CheckNodeException if an unsafe node is used - CheckStrException if an attribute has an unsafe string + CheckNodeException if an unsafe node is used + CheckStrException if an attribute has an unsafe string - None + None """ parsed_ast = compiler.parse(code) _check_node(parsed_ast) @@ -310,105 +323,55 @@ def safe_check(code): def safe_check_subprocess(code): """ - Runs safe_check() in a subprocess. This is done because the AST - safe_check() uses a large amount of RAM. By running safe_check() in a - subprocess we can guarantee that the memory will be reclaimed when the - process ends. + Runs safe_check() in a subprocess. This is done because the AST + safe_check() uses a large amount of RAM. By running safe_check() in a + subprocess we can guarantee that the memory will be reclaimed when the + process ends. - code: See safe_check. - + code: See safe_check. + - As with safe_check. + As with safe_check. - See safe_check. + See safe_check. """ - + # Get the path to safe_check.py by using the original start directory of python - path_to_safe_check = os.path.join(repy_constants.REPY_START_DIR, "safe_check.py") - + path_to_safe_check = os.path.join(os.path.dirname(__file__), "safe_check.py") + # Start a safety check process, reading from the user code and outputing to a pipe we can read - proc = subprocess.Popen([sys.executable, path_to_safe_check], - stdin=subprocess.PIPE, stdout=subprocess.PIPE) - - # Write out the user code, close so the other end gets an EOF - proc.stdin.write(code) - proc.stdin.close() - - # Wait for the process to terminate - starttime = nonportable.getruntime() - - # Only wait up to EVALUTATION_TIMEOUT seconds before terminating - while nonportable.getruntime() - starttime < EVALUTATION_TIMEOUT: - # Did the process finish running? - if proc.poll() != None: - break; - time.sleep(0.02) - else: - # Kill the timed-out process - try: - harshexit.portablekill(proc.pid) - except: - pass - raise Exception, "Evaluation of code safety exceeded timeout threshold \ - ("+str(nonportable.getruntime() - starttime)+" seconds)" - - # Read the output and close the pipe - rawoutput = proc.stdout.read() - proc.stdout.close() - - - # Interim fix for SeattleTestbed/attic#1080: - # Get rid of stray debugging output on Android of the form - # `dlopen libpython2.6.so` and `dlopen /system/lib/libc.so`, - # yet preserve all of the other output (including empty lines). - - if IS_ANDROID: - output = "" - for line in rawoutput.split("\n"): - # Preserve empty lines - if line == "": - output += "\n" - continue - # Suppress debug messages we know can turn up - wordlist = line.split() - if wordlist[0]=="dlopen": - if wordlist[-1]=="/system/lib/libc.so": - continue - if wordlist[-1].startswith("libpython") and \ - wordlist[-1].endswith(".so"): - # We expect "libpython" + version number + ".so". - # The version number should be a string convertible to float. - # If it's not, raise an exception. - try: - versionstring = (wordlist[-1].replace("libpython", - "")).replace(".so", "") - junk = float(versionstring) - except TypeError, ValueError: - raise Exception("Unexpected debug output '" + line + - "' while evaluating code safety!") - else: - output += line + "\n" + try: +#print("Let's start") + proc = subprocess.Popen([sys.executable, path_to_safe_check], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + universal_newlines=True) - # Strip off the last newline character we added - output = output[0:-1] + # Write out the user code, close so the other end gets an EOF + #(rawoutput, _) = proc.communicate(bytes(code, 'utf-8')) + #print("Command finished") + (output, _) = proc.communicate(code) + #print("Got code.") + except Exception as e: + raise - else: # We are *not* running on Android, proceed with unfiltered output - output = rawoutput + # Wait for the process to terminate + starttime = 0 + #output = rawoutput.decode("utf-8") # Check the output, None is success, else it is a failure if output == "None": return True - + # If there is no output, this is a fatal error condition elif output == "": - raise Exception, "Fatal error while evaluating code safety!" - + raise Exception("Fatal error while evaluating code safety!") + else: # Raise the error from the output - raise exception_hierarchy.SafeException, output + raise exception_hierarchy.SafeException(output) # Get a lock for serial_safe_check SAFE_CHECK_LOCK = threading.Lock() @@ -417,18 +380,18 @@ def safe_check_subprocess(code): def serial_safe_check(code): """ - Serializes calls to safe_check_subprocess(). This is because safe_check_subprocess() - creates a new process which may take many seconds to return. This prevents us from - creating many new python processes. + Serializes calls to safe_check_subprocess(). This is because safe_check_subprocess() + creates a new process which may take many seconds to return. This prevents us from + creating many new python processes. - code: See safe_check. - + code: See safe_check. + - As with safe_check. + As with safe_check. - See safe_check. + See safe_check. """ SAFE_CHECK_LOCK.acquire() @@ -456,14 +419,14 @@ def safe_type(*args, **kwargs): raise exception_hierarchy.RunBuiltinException( 'type() may only take exactly one non-keyword argument.') - # Fix for SeattleTestbed/repy_v1#128, block access to Python's `type`. +# Fix for SeattleTestbed/repy_v1#128, block access to Python's `type`. # if _type(args[0]) is _type or _type(args[0]) is _compile_type: # raise exception_hierarchy.RunBuiltinException( # 'unsafe type() call.') - # JAC: The above would be reasonable, but it is harsh. The wrapper code for - # the encasement library needs to have a way to check the type of things and - # these might be inadvertantly be types. It is hard to know if something - # is a type +# JAC: The above would be reasonable, but it is harsh. The wrapper code for +# the encasement library needs to have a way to check the type of things and +# these might be inadvertantly be types. It is hard to know if something +# is a type if args[0] == safe_type or args[0] == _type or _type(args[0]) is _type: return safe_type @@ -475,14 +438,14 @@ def safe_type(*args, **kwargs): # This dict maps built-in functions to their replacement functions _BUILTIN_REPLACE = { - 'type': safe_type +'type': safe_type } # The list of built-in exceptions can be generated by running the following: # r = [v for v in dir(__builtin__) if v[0] != '_' and v[0] == v[0].upper()] ; r.sort() ; print r _BUILTIN_OK = [ '__debug__', - + 'ArithmeticError', 'AssertionError', 'AttributeError', 'DeprecationWarning', 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception', 'False', 'FloatingPointError', 'FutureWarning', 'IOError', 'ImportError', @@ -494,16 +457,25 @@ def safe_type(*args, **kwargs): 'SystemExit', 'TabError', 'True', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UserWarning', 'ValueError', 'Warning', 'ZeroDivisionError', - + 'abs', 'bool', 'cmp', 'complex', 'dict', 'divmod', 'filter', 'float', 'frozenset', 'hex', 'id', 'int', 'len', 'list', 'long', 'map', 'max', 'min', 'object', 'oct', 'pow', 'range', 'reduce', 'repr', 'round', 'set', 'slice', - 'str', 'sum', 'tuple', 'xrange', 'zip','id', - + 'str', 'sum', 'tuple', 'zip','id', + #Added for repyv2 'isinstance', 'BaseException', 'WindowsError', 'type', 'issubclass', - 'ord', 'chr' - ] + 'ord', 'chr', + + # added for py3 support + 'print', 'exec', 'Attribute', 'split', + + # debugging goodies :) + '__import__', 'hasattr', 'getattr', 'any', '__build_class__', "Import", + + # Kevin: + 'iter', 'bytes', 'open', "sorted", "bytearray", "memoryview", "super", "USub", +] _BUILTIN_STR = ['copyright','credits','license','__name__','__doc__',] @@ -512,8 +484,8 @@ def safe_type(*args, **kwargs): def _replace_unsafe_builtin(unsafe_call): # This function will replace any unsafe built-in function def exceptionraiser(*vargs,**kargs): - raise exception_hierarchy.RunBuiltinException("Unsafe call '" + - str(unsafe_call) + "' with args '" + str(vargs) + "', kwargs '" + + raise exception_hierarchy.RunBuiltinException("Unsafe call '{}'".format(unsafe_call) + + "' with args '" + str(vargs) + "', kwargs '" + str(kargs) + "'") return exceptionraiser @@ -537,17 +509,17 @@ def _builtin_init(): # Create a backup of the built-in functions #TODO: Perhaps pull this out of the function - Is there a reason to do this more then once? - _builtin_globals_backup = __builtin__.__dict__.copy() + _builtin_globals_backup = builtins.__dict__.copy() _builtin_globals = {} - for builtin in __builtin__.__dict__.iterkeys(): + for builtin in builtins.__dict__.keys(): # It's important to check _BUILTIN_REPLACE before _BUILTIN_OK because # even if the name is defined in both, there must be a security reason # why it was supposed to be replaced, and not just allowed. if builtin in _BUILTIN_REPLACE: replacewith = _BUILTIN_REPLACE[builtin] elif builtin in _BUILTIN_OK: - replacewith = __builtin__.__dict__[builtin] + replacewith = builtins.__dict__[builtin] elif builtin in _BUILTIN_STR: replacewith = '' else: @@ -567,13 +539,13 @@ def _builtin_init(): # Replace every function in __builtin__ with the one from _builtin_globals. def _builtin_destroy(): _builtin_init() - for builtin_name, builtin in _builtin_globals.iteritems(): - __builtin__.__dict__[builtin_name] = builtin + for builtin_name, builtin in _builtin_globals.items(): + builtins.__dict__[builtin_name] = builtin # Restore every function in __builtin__ with the backup from _builtin_globals_backup. def _builtin_restore(): - for builtin_name, builtin in _builtin_globals_backup.iteritems(): - __builtin__.__dict__[builtin_name] = builtin + for builtin_name, builtin in _builtin_globals_backup.items(): + builtins.__dict__[builtin_name] = builtin # Have the builtins already been destroyed? BUILTINS_DESTROYED = False @@ -596,9 +568,8 @@ def safe_run(code,context=None): None """ - global BUILTINS_DESTROYED - + if context == None: context = {} @@ -606,13 +577,12 @@ def safe_run(code,context=None): if not BUILTINS_DESTROYED: BUILTINS_DESTROYED = True _builtin_destroy() - + try: context['__builtins__'] = _builtin_globals - exec code in context + exec(code, context) finally: - #_builtin_restore() - pass + _builtin_restore() # Convenience functions @@ -636,22 +606,22 @@ def safe_exec(code, context = None): None """ - serial_safe_check(code) safe_run(code, context) - # This portion of the code defines a SafeDict # A SafeDict prevents keys which are 'unsafe' strings from being added. -# Functional constructor for SafeDict to allow us to safely map it into the repy context. +# Functional constructor for SafeDict to allow us to safely map it into the +# repy context. def get_SafeDict(*args,**kwargs): return SafeDict(*args,**kwargs) -class SafeDict(UserDict.DictMixin): +class SafeDict(DictMixin): +#class SafeDict(MyMixin): """ A dictionary implementation which prohibits "unsafe" keys from being set or @@ -675,14 +645,14 @@ def __init__(self,from_dict=None): return # If we are given a dict, try to copy its keys - for key,value in from_dict.iteritems(): + for key,value in from_dict.items(): # Skip __builtins__ and __doc__ since safe_run/python inserts that if key in ["__builtins__","__doc__"]: continue # Check the key type - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" + if type(key) is not str and type(key) is not str: + raise TypeError("'SafeDict' keys must be of string type!") # Check if the key is safe if _is_string_safe(key): @@ -690,41 +660,41 @@ def __init__(self,from_dict=None): # Throw an exception if the key is unsafe else: - raise ValueError, "Unsafe key: '"+key+"'" + raise ValueError("Unsafe key: '"+key+"'") # Allow getting items def __getitem__(self,key): - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" + if type(key) is not str and type(key) is not str: + raise TypeError("'SafeDict' keys must be of string type!") if not _is_string_safe(key): - raise ValueError, "Unsafe key: '"+key+"'" + raise ValueError("Unsafe key: '"+key+"'") return self.__under__.__getitem__(key) # Allow setting items def __setitem__(self,key,value): - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" + if type(key) is not str and type(key) is not str: + raise TypeError("'SafeDict' keys must be of string type!") if not _is_string_safe(key): - raise ValueError, "Unsafe key: '"+key+"'" + raise ValueError("Unsafe key: '"+key+"'") return self.__under__.__setitem__(key,value) # Allow deleting items def __delitem__(self,key): - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" + if type(key) is not str and type(key) is not str: + raise TypeError("'SafeDict' keys must be of string type!") if not _is_string_safe(key): - raise ValueError, "Unsafe key: '"+key+"'" + raise ValueError("Unsafe key: '"+key+"'") return self.__under__.__delitem__(key) # Allow checking if a key is set def __contains__(self,key): - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" + if type(key) is not str and type(key) is not str: + raise TypeError("'SafeDict' keys must be of string type!") if not _is_string_safe(key): - raise ValueError, "Unsafe key: '"+key+"'" + raise ValueError("Unsafe key: '"+key+"'") return key in self.__under__ @@ -734,7 +704,7 @@ def keys(self): # Filter out the unsafe keys from the underlying dict safe_keys = [] - for key in self.__under__.iterkeys(): + for key in self.__under__.keys(): if _is_string_safe(key): safe_keys.append(key) @@ -747,14 +717,22 @@ def keys(self): # It seems unlikely this is adequate for more complex cases (like safedicts # that refer to each other) def __repr__(self): + #return "ayylmao" newdict = {} - for safekey in self.keys(): + for safekey in list(self.keys()): if self.__under__[safekey] == self: newdict[safekey] = newdict else: newdict[safekey] = self.__under__[safekey] return newdict.__repr__() + def __len__(self): + return len(self.__under__) + + def __iter__(self): + for i in self.__under__: + yield i + # Allow a copy of us def copy(self): @@ -766,7 +744,7 @@ def copy(self): # https://github.com/SeattleTestbed/repy_v2/issues/97 # Caveat: dict.copy is expected to return a shallow copy, this fix # introduces a partial deep copy for the contained self reference - for key, value in self.__under__.iteritems(): + for key, value in self.__under__.items(): if value is self: copy_inst[key] = copy_inst @@ -782,8 +760,10 @@ def __setattr__(self,name,value): if name == "__under__" and name not in self.__dict__: self.__dict__[name] = value return - raise TypeError,"'SafeDict' attributes are read-only!" + raise TypeError("'SafeDict' attributes are read-only! ({}, {})".format(name, value)) def __delattr__(self,name): - raise TypeError,"'SafeDict' attributes are read-only!" - + raise TypeError("'SafeDict' attributes are read-only!") + + def __copy(self): + return self.copy() diff --git a/safe_check.py b/safe_check.py index 80535bf4..89d860e3 100644 --- a/safe_check.py +++ b/safe_check.py @@ -8,17 +8,14 @@ memory used by the safe.safe_check() will be reclaimed when this process quits. """ - import safe import sys -import encoding_header - if __name__ == "__main__": # Get the user "code" usercode = sys.stdin.read() - + # Output buffer output = "" @@ -26,16 +23,9 @@ try: value = safe.safe_check(usercode) output += str(value) - except Exception, e: - # Adjust traceback line numbers, see SeattleTestbed/repy_v2#95. - try: - e.lineno = e.lineno - \ - len(encoding_header.ENCODING_DECLARATION.splitlines()) - except (TypeError, AttributeError): - # Ignore exceptions with line numbers that are non-numeric (i.e. - # `None`), or have no `lineno` attribute altogether. - pass + except Exception as e: output += str(type(e)) + " " + str(e) + # Write out sys.stdout.write(output) diff --git a/scripts/config_initialize.txt b/scripts/config_initialize.txt index c9963906..83d6632a 100644 --- a/scripts/config_initialize.txt +++ b/scripts/config_initialize.txt @@ -1,6 +1,6 @@ -https://github.com/SeattleTestbed/seattlelib_v2 ../DEPENDENCIES/seattlelib_v2 -https://github.com/SeattleTestbed/portability ../DEPENDENCIES/portability -https://github.com/SeattleTestbed/seash ../DEPENDENCIES/seash -https://github.com/SeattleTestbed/affix ../DEPENDENCIES/affix -https://github.com/SeattleTestbed/common ../DEPENDENCIES/common -https://github.com/SeattleTestbed/utf ../DEPENDENCIES/utf +https://github.com/kcg295/seattlelib_v2 ../DEPENDENCIES/seattlelib_v2 +https://github.com/kcg295/portability ../DEPENDENCIES/portability +https://github.com/kcg295/seash ../DEPENDENCIES/seash +https://github.com/kcg295/affix ../DEPENDENCIES/affix +https://github.com/kcg295/common ../DEPENDENCIES/common +https://github.com/kcg295/utf ../DEPENDENCIES/utf diff --git a/scripts/initialize.py b/scripts/initialize.py index 4c6241bd..832bb045 100644 --- a/scripts/initialize.py +++ b/scripts/initialize.py @@ -74,7 +74,7 @@ continue # If we end up here, the line contains a Git URL (+options?) for us to clone - print "Checking out repo from", line.split()[0], "..." + print("Checking out repo from" + line.split()[0] + "...") git_process = subprocess.Popen("git clone " + line, cwd = os.getcwd(), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE ) (stdout_data, stderr_data) = git_process.communicate() @@ -83,33 +83,33 @@ # to see if it performed correctly, and halt the program (giving debug # output) if not. if git_process.returncode == 0: - print "Done!" + print("Done!") else: - print "*** Error checking out repo. Git returned status code", git_process.returncode - print "*** Git messages on stdout: '" + stdout_data + "'." - print "*** Git messages on stderr: '" + stderr_data + "'." - print + print("*** Error checking out repo. Git returned status code" + str( git_process.returncode)) + print("*** Git messages on stdout: '" + stdout_data + "'.") + print("*** Git messages on stderr: '" + stderr_data + "'.") + print() if not ignore_git_errors: - print """Since the skip-mode is off, these errors need to be fixed before the build process can proceed. In + print("""Since the skip-mode is off, these errors need to be fixed before the build process can proceed. In doubt, please contact the Seattle development team at seattle-devel@googlegroups.com and supply all of the above information. Thank you! -""" - print +""") + print() sys.exit(1) else: - print "Continuing with the cloning of directories as skip-mode is active" - print + print("Continuing with the cloning of directories as skip-mode is active") + print() continue # If there is a readme file, show it to the user. try: readme_file = open('README.txt', 'r') for line in readme_file.readlines(): - print line + print(line) readme_file.close() except IOError: # There is no readme file, or we can't access it. diff --git a/statusstorage.py b/statusstorage.py index 55379a13..010f67e5 100755 --- a/statusstorage.py +++ b/statusstorage.py @@ -26,7 +26,7 @@ # To allow access to a real fileobject # call type... -myfile = file +#file = file statusfilenameprefix = None @@ -62,14 +62,14 @@ def write_status(status, mystatusfilenameprefix=None): timestamp = time.time() # write the file - myfile(mystatusfilenameprefix+"-"+status+"-"+str(timestamp),"w").close() + file(mystatusfilenameprefix+"-"+status+"-"+str(timestamp),"w").close() # remove the old files... for filename in existingfiles: if len(filename.split('-')) == 3 and filename.split('-')[0] == os.path.basename(mystatusfilenameprefix): try: os.remove(mystatusdir+filename) - except OSError, e: + except OSError as e: if e[0] == 2: # file not found, let's assume another instance removed it... continue @@ -109,3 +109,4 @@ def read_status(mystatusfilenameprefix=None): + diff --git a/tracebackrepy.py b/tracebackrepy.py index 2035941b..82cc3645 100755 --- a/tracebackrepy.py +++ b/tracebackrepy.py @@ -10,46 +10,38 @@ """ -# we'll print our own exceptions -import traceback -# This needs hasattr. I'll allow it... -traceback.hasattr = hasattr - -# and don't want traceback to use linecache because linecache uses open -import fakelinecache -traceback.linecache = fakelinecache - -# Need to be able to reference the last traceback... +# and don't want traceback to use linecache because linecache uses open +# Need to be able to reference the last traceback... import sys +import traceback -# We need the service logger to log internal errors -Brent +# We need the service logger to log internal errors -Brent # import servicelogger -# Used to determine whether or not we use the service logger to log internal -# errors. Defaults to false. -Brent -# WARNING: Changing this to True manually may break tracebackrepy + #Used to determine whether or not we use the service logger to log internal + #errors. Defaults to false. -Brent + #WARNING: Changing this to True manually may break tracebackrepy servicelog = False -# this is the directory where the node manager resides. We will use this -# when deciding where to write our service log. + #this is the directory where the node manager resides. We will use this + #when deciding where to write our service log. logdirectory = None -import harshexit # We need to be able to do a harshexit on internal errors. +import harshexit import exception_hierarchy -import os # needed to get the PID -import encoding_header # Subtract len(ENCODING_HEADER) from error line numbers. +import os -# This list contains all the modules which are black-listed from the -# traceback, so that if there is an exception, they will not appear in the -# "user" (filtered) traceback. + #This list contains all the modules which are black-listed from the + #traceback, so that if there is an exception, they will not appear in the + #"user" (filtered) traceback. TB_SKIP_MODULES = ["repy.py", "safe.py", "virtual_namespace.py", "namespace.py", "emulcomm.py", "emultimer.py", "emulmisc.py", "emulfile.py", "nonportable.py", "socket.py"] -# sets the user's file name. -# also sets whether or not the servicelogger is used. -Brent + #sets the user's file name. + #also sets whether or not the servicelogger is used. -Brent def initialize(useservlog=False, logdir = '.'): global servicelog global logdirectory @@ -138,9 +130,8 @@ def format_exception(): # Construct a frame of output. # Adjust traceback line numbers, see SeattleTestbed/repy_v2#95. - stack_frame = ' "' + filename + '", line ' + \ - str(lineno - len(encoding_header.ENCODING_DECLARATION.splitlines())) + \ - ", in " + modulename + "\n" + stack_frame = ' "' + filename + '", line ' + str(lineno) + ", in " +\ + modulename + "\n" # Always add to the full traceback full_tb += stack_frame @@ -174,19 +165,22 @@ def format_exception(): debug_str += "\n---" # Clear the exception being handled - sys.exc_clear() + try: + sys.exc_clear() + except: + pass # Return the debug string return debug_str -# This function is called when there is an uncaught exception prior to exiting + #This function is called when there is an uncaught exception prior to exiting def handle_exception(): # Get the debug string debug_str = format_exception() # Print "Uncaught exception!", followed by the debug string - print >> sys.stderr, "---\nUncaught exception!\n",debug_str + print("---\nUncaught exception!\n",debug_str) @@ -210,11 +204,12 @@ def handle_internalerror(error_string, exitcode): Shouldn't return because harshexit will always be called. """ + #pass if servicelog: import servicelogger try: - print >> sys.stderr, "Internal Error" + print("Internal Error") handle_exception() if not servicelog: # If the service log is disabled, lets just exit. @@ -247,18 +242,18 @@ def handle_internalerror(error_string, exitcode): # Again we want to ensure that even if we fail to log, we still exit. try: servicelogger.multi_process_log(exceptionstring, identifier, logdirectory) - except Exception, e: + except Exception as e: # if an exception occurs, log it (unfortunately, to the user's log) - print 'Inner abort of servicelogger' - print e,type(e) + print('Inner abort of servicelogger') + print(e,type(e)) traceback.print_exc() finally: harshexit.harshexit(exitcode) - except Exception, e: + except Exception as e: # if an exception occurs, log it (unfortunately, to the user's log) - print 'Outer abort of servicelogger' - print e,type(e) + print('Outer abort of servicelogger') + print(e,type(e)) traceback.print_exc() finally: harshexit.harshexit(842) diff --git a/virtual_namespace.py b/virtual_namespace.py index 7412e209..8acabb24 100644 --- a/virtual_namespace.py +++ b/virtual_namespace.py @@ -11,8 +11,7 @@ specified global context. """ -import encoding_header # Subtract len(ENCODING_HEADER) from error line numbers. -import safe # Used for safety checking +import safe as safe from exception_hierarchy import * # This is to work around safe... @@ -54,27 +53,22 @@ def __init__(self, code, name): """ # Check for the code # Do a type check + if type(code) == bytes: + code = code.decode() if type(code) is not str: - raise RepyArgumentError, "Code must be a string!" + raise RepyArgumentError("Code must be a string!") if type(name) is not str: - raise RepyArgumentError, "Name must be a string!" + raise RepyArgumentError("Name must be a string!" + str(type(name))) # Remove any windows carriage returns code = code.replace('\r\n','\n') - # Prepend an encoding string to protect against bugs in that code, - # see SeattleTestbed/repy_v1#120. - # This causes tracebacks to have an inaccurate line number, so we adjust - # them in multiple modules. See SeattleTestbed/repy_v2#95. - code = encoding_header.ENCODING_DECLARATION + code - - # Do a safety check try: safe.serial_safe_check(code) - except Exception, e: - raise CodeUnsafeError, "Code failed safety check! Error: "+str(e) + except Exception as e: + raise CodeUnsafeError("Code failed safety check! Error: "+str(e)) # All good, store the compiled byte code self.code = safe_compile(code,name,"exec") @@ -107,12 +101,12 @@ def evaluate(self,context): if type(context) is dict: try: context = safe.SafeDict(context) - except Exception, e: - raise ContextUnsafeError, "Provided context is not safe! Exception: "+str(e) + except Exception as e: + raise ContextUnsafeError("Provided context is not safe! Exception: "+str(e)) # Type check if not isinstance(context, safe.SafeDict): - raise RepyArgumentError, "Provided context is not a safe dictionary!" + raise RepyArgumentError("Provided context is not a safe dictionary!") # Call safe_run with the underlying dictionary safe.safe_run(self.code, context.__under__) diff --git a/windows_api.py b/windows_api.py index 8f18c893..48468705 100644 --- a/windows_api.py +++ b/windows_api.py @@ -52,13 +52,13 @@ THREAD_HANDLE_RIGHTS = THREAD_SET_INFORMATION | THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION PROCESS_TERMINATE = 0x0001 PROCESS_QUERY_INFORMATION = 0x0400 -SYNCHRONIZE = 0x00100000L +SYNCHRONIZE = 0x00100000 PROCESS_SET_INFORMATION = 0x0200 PROCESS_SET_QUERY_AND_TERMINATE = PROCESS_SET_INFORMATION | PROCESS_TERMINATE | PROCESS_QUERY_INFORMATION | SYNCHRONIZE ERROR_ALREADY_EXISTS = 183 WAIT_FAILED = 0xFFFFFFFF -WAIT_OBJECT_0 = 0x00000000L -WAIT_ABANDONED = 0x00000080L +WAIT_OBJECT_0 = 0x00000000 +WAIT_ABANDONED = 0x00000080 CE_FULL_PERMISSIONS = ctypes.c_ulong(0xFFFFFFFF) NORMAL_PRIORITY_CLASS = ctypes.c_ulong(0x00000020) HIGH_PRIORITY_CLASS = ctypes.c_ulong(0x00000080) @@ -394,7 +394,7 @@ def get_thread_handle(thread_id): if handle: return handle else: # Raise exception on failure - raise DeadThread, "Error opening thread handle! thread_id: " + str(thread_id) + " Error Str: " + str(ctypes.WinError()) + raise DeadThread("Error opening thread handle! thread_id: " + str(thread_id) + " Error Str: " + str(ctypes.WinError())) # Closes a thread handle @@ -602,7 +602,7 @@ def timeout_process(pid, stime): else: return False except DeadThread: # Escalate DeadThread to DeadProcess, because that is the underlying cause - raise DeadProcess, "Failed to sleep or resume a thread!" + raise DeadProcess("Failed to sleep or resume a thread!") # Sets the current threads priority level @@ -675,7 +675,7 @@ def get_process_handle(pid): if handle: return handle else: # Raise exception on failure - raise DeadProcess, "Error opening process handle! Process ID: " + str(pid) + " Error Str: " + str(ctypes.WinError()) + raise DeadProcess("Error opening process handle! Process ID: " + str(pid) + " Error Str: " + str(ctypes.WinError())) # Launches a new process @@ -839,7 +839,7 @@ def kill_process(pid): # Keep hackin' away at it while not dead: if (attempt > ATTEMPT_MAX): - raise DeadProcess, "Failed to kill process! Process ID: " + str(pid) + " Error Str: " + str(ctypes.WinError()) + raise DeadProcess("Failed to kill process! Process ID: " + str(pid) + " Error Str: " + str(ctypes.WinError())) # Increment attempt count attempt = attempt + 1 @@ -961,7 +961,7 @@ def get_current_thread_cpu_time(): # Check the result, error if result is 0 if res == 0: - raise Exception,(res, _get_last_error(), "Error getting thread CPU time! Error Str: " + str(ctypes.WinError())) + raise Exception((res, _get_last_error(), "Error getting thread CPU time! Error Str: " + str(ctypes.WinError()))) # Return the time return time_sum @@ -987,7 +987,7 @@ def wait_for_process(pid): # Pass in code as a pointer to store the output status = _wait_for_single_object(handle, INFINITE) if status != WAIT_OBJECT_0: - raise EnvironmentError, "Failed to wait for Process!" + raise EnvironmentError("Failed to wait for Process!") # Close the Process Handle _close_handle(handle) @@ -1116,7 +1116,7 @@ def create_mutex(name): _mutex_lock_count[handle] = 0 return handle else: # Raise exception on failure - raise FailedMutex, (_get_last_error(), "Error creating mutex! Mutex name: " + str(name) + " Error Str: " + str(ctypes.WinError())) + raise FailedMutex((_get_last_error(), "Error creating mutex! Mutex name: " + str(name) + " Error Str: " + str(ctypes.WinError()))) @@ -1185,8 +1185,8 @@ def release_mutex(handle): # 0 return value means failure if release == 0: - raise FailedMutex, (_get_last_error(), "Error releasing mutex! Mutex id: " + str(handle) + " Error Str: " + str(ctypes.WinError())) - except FailedMutex, e: + raise FailedMutex((_get_last_error(), "Error releasing mutex! Mutex id: " + str(handle) + " Error Str: " + str(ctypes.WinError()))) + except FailedMutex as e: if (e[0] == 288): # 288 is for non-owned mutex, which is ok pass else: