diff --git a/authome/admin/admin.py b/authome/admin/admin.py index e20293f..26b41ce 100644 --- a/authome/admin/admin.py +++ b/authome/admin/admin.py @@ -236,6 +236,16 @@ def _usergroups(self,obj): return mark_safe("{} ({})".format(result,usergroupnames)) _usergroups.short_description = "User Groups" + def _session_timeout(self,obj): + if not obj : + return "-" + else: + usergroups = models.UserGroup.find_groups(obj.email,cacheable=False)[0] + return models.UserGroup.get_session_timeout(usergroups) or "-" + _session_timeout.short_description = "Session Timeout" + + + def _usergroupnames(self,obj): if not obj : return "" @@ -283,12 +293,12 @@ def delete_queryset(self, request, queryset): self.delete_model(request,o) class UserAdmin(UserAuthorizationCheckMixin,UserGroupsMixin,DatetimeMixin,CatchModelExceptionMixin,auth.admin.UserAdmin): - list_display = ('username', 'email', 'first_name', 'last_name','is_active', 'is_staff','last_idp','_last_login') + list_display = ('username', 'email', 'first_name', 'last_name','is_active', 'is_staff','_session_timeout','last_idp','_last_login') list_filter = ( 'is_superuser',) - readonly_fields = ("_last_login","_date_joined","username","first_name","last_name","is_staff","is_superuser","_email","_usergroups","last_idp","_modified") + readonly_fields = ("_last_login","_date_joined","username","first_name","last_name","is_staff","is_superuser","_email","_usergroups","_session_timeout","last_idp","_modified") fieldsets = ( (None, {'fields': ('_email', )}), - ('Personal info', {'fields': ('username','first_name', 'last_name')}), + ('Personal info', {'fields': ('username','first_name', 'last_name',"_session_timeout")}), ('Permissions', { 'fields': ('is_active', 'is_staff', 'is_superuser',"_usergroups" ), }), diff --git a/authome/admin/monitoradmin.py b/authome/admin/monitoradmin.py index d48b2bf..a37c2d2 100644 --- a/authome/admin/monitoradmin.py +++ b/authome/admin/monitoradmin.py @@ -9,6 +9,7 @@ from . import admin from .. import models +from ..cache import cache logger = logging.getLogger(__name__) @@ -48,6 +49,20 @@ def _avg_time(self,obj): return round(obj.avg_time,2) _avg_time.short_description = "Avg Time" + def _redis_avg_time(self,obj): + if not obj or not obj.redis_avg_time: + return "" + else: + return round(obj.redis_avg_time,2) + _redis_avg_time.short_description = "Redis Avg Time" + + def _db_avg_time(self,obj): + if not obj or not obj.db_avg_time: + return "" + else: + return round(obj.db_avg_time,2) + _db_avg_time.short_description = "DB Avg Time" + def _domains(self,obj): if not obj or not obj.domains: return "" @@ -85,14 +100,62 @@ class SSOMethodTrafficDataInline(TrafficDataPropertyMixin,djangoadmin.TabularInl readonly_fields = ("sso_method","requests","_total_time","_min_time","_max_time","_avg_time","_status","_domains") fields = readonly_fields + def _domains(self,obj): + if not obj or not obj.domains: + return "" + else: + datas = [(k,v) for k,v in obj.domains.items()] + datas.sort(key=lambda o:((o[1].get("requests") or 0) * -1,o[0]) if isinstance(o[1],dict) else (o[1] * -1,o[0])) + return mark_safe("
{}
".format("\r\n".join(" {} : {}".format(o[0],json.dumps(o[1],sort_keys=True,indent=4) if isinstance(o[1],dict) else o[1]) for o in datas))) + _domains.short_description = "Groups" + +if settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0 and settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + list_display_4_cluster = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions") + list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time") + fields_4_cluster = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") + fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","_status","_domains","_batchid") +elif settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0: + list_display_4_cluster = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","get_remote_sessions","delete_remote_sessions") + list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time") + fields_4_cluster = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") + fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","_status","_domains","_batchid") +elif settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + list_display_4_cluster = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions") + list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time") + fields_4_cluster = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") + fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","_status","_domains","_batchid") +else: + list_display_4_cluster = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions") + list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time") + fields_4_cluster = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") + fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","_status","_domains","_batchid") + class TrafficDataAdmin(TrafficDataPropertyMixin,admin.DatetimeMixin,djangoadmin.ModelAdmin): - list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions") - readonly_fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") - fields = readonly_fields ordering = ("-start_time","clusterid") list_filter = ['clusterid'] inlines = [SSOMethodTrafficDataInline] + @property + def list_display(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return list_display_4_cluster + else: + return list_display + + @property + def readonly_fields(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return fields_4_cluster + else: + return fields + + @property + def fields(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return fields_4_cluster + else: + return fields + def _subreports(self,obj): if not obj: return "" @@ -143,10 +206,41 @@ class SSOMethodTrafficReportInline(TrafficDataPropertyMixin,djangoadmin.TabularI readonly_fields = ("sso_method","requests","_total_time","_min_time","_max_time","_avg_time","_status","_domains") fields = readonly_fields + def _domains(self,obj): + if not obj or not obj.domains: + return "" + else: + datas = [(k,v) for k,v in obj.domains.items()] + datas.sort(key=lambda o:((o[1].get("requests") or 0) * -1,o[0]) if isinstance(o[1],dict) else (o[1] * -1,o[0])) + return mark_safe("
{}
".format("\r\n".join(" {} : {}".format(o[0],json.dumps(o[1],sort_keys=True,indent=4) if isinstance(o[1],dict) else o[1]) for o in datas))) + _domains.short_description = "Groups" + +if settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0 and settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + report_list_display_4_cluster = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") + report_list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","_subreports") + + report_fields_4_cluster = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") + report_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","_status","_domains") +elif settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0: + report_list_display_4_cluster = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") + report_list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","_subreports") + + report_fields_4_cluster = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") + report_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","_status","_domains") +elif settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + report_list_display_4_cluster = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") + report_list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","_subreports") + + report_fields_4_cluster = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") + report_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","_status","_domains") +else: + report_list_display_4_cluster = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") + report_list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","_subreports") + + report_fields_4_cluster = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") + report_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","_status","_domains") + class TrafficReportAdmin(TrafficDataPropertyMixin,admin.DatetimeMixin,djangoadmin.ModelAdmin): - list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") - readonly_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") - fields = readonly_fields ordering = ("report_type","-start_time",'clusterid') list_filter = ['clusterid',"report_type"] inlines = [SSOMethodTrafficReportInline] @@ -154,6 +248,27 @@ class TrafficReportAdmin(TrafficDataPropertyMixin,admin.DatetimeMixin,djangoadmi traffic_data_list_url_name = 'admin:{}_{}_changelist'.format(models.TrafficData._meta.app_label,models.TrafficData._meta.model_name) traffic_report_list_url_name = 'admin:{}_{}_changelist'.format(models.TrafficReport._meta.app_label,models.TrafficReport._meta.model_name) + @property + def list_display(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return report_list_display_4_cluster + else: + return report_list_display + + @property + def readonly_fields(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return report_fields_4_cluster + else: + return report_fields + + @property + def fields(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return report_fields_4_cluster + else: + return report_fields + def _subreports(self,obj): if not obj: return "" diff --git a/authome/cache/cache.py b/authome/cache/cache.py index 74a2519..004be11 100644 --- a/authome/cache/cache.py +++ b/authome/cache/cache.py @@ -26,7 +26,6 @@ else: get_defaultcache = lambda :None - defaultcache = get_defaultcache() class TaskRunable(object): @@ -211,7 +210,6 @@ def __init__(self): @property def usergrouptree(self): if not self._usergrouptree: - logger.error("The usergrouptree cache is Empty, Try to refresh the data to bring the cache back to normal state") self.refresh_usergroups() return self._usergrouptree @@ -219,7 +217,6 @@ def usergrouptree(self): @property def usergroups(self): if not self._usergroups: - logger.error("The usergroups cache is Empty, Try to refresh the data to bring the cache back to normal state") self.refresh_usergroups() return self._usergroups @@ -244,7 +241,6 @@ def usergrouptree(self,value): @property def usergroupauthorization(self): if not self._usergroupauthorization: - logger.error("The usergroupauthorization cache is Empty, Try to refresh the data to bring the cache back to normal state") self.refresh_usergroupauthorization() return self._usergroupauthorization @@ -700,7 +696,14 @@ def healthy(self): return (False,msgs) if msgs else (True,["ok"]) if settings.TRAFFIC_MONITOR_LEVEL > 0: - class _BaseMemoryCacheWithTrafficMonitor(_BaseMemoryCache): + def _clean_traffic_data(data): + for key in data.keys(): + if isinstance(data[key],dict): + _clean_traffic_data(data[key]) + else: + data[key] = 0 + + class _MemoryCacheWithTrafficMonitor(_BaseMemoryCache): def __init__(self): super().__init__() self._traffic_data = None @@ -731,102 +734,90 @@ def _save_traffic_data(self,start): if self._traffic_data : self._traffic_data["starttime"] = data_starttime self._traffic_data["endtime"] = data_endtime + traffic_data = json.dumps(self._traffic_data) + + for data in self._traffic_data.values(): + if not isinstance(data,dict): + continue + _clean_traffic_data(data) + try: - length = self._client.rpush(self.traffic_data_key,json.dumps(self._traffic_data)) + length = self._client.rpush(self.traffic_data_key,traffic_data) except: from authome.models import DebugLog DebugLog.warning(DebugLog.ERROR,None,None,None,None,"Failed to save the traffic data to cache.{}".format(traceback.format_exc())) pass - if settings.TRAFFIC_MONITOR_LEVEL == 1: - class _MemoryCacheWithTrafficMonitor(_BaseMemoryCacheWithTrafficMonitor): - def __init__(self): - super().__init__() - logger.debug("Traffic monitor level 1 is enabled") - - def log_request(self,name,host,start,status_code): - if start >= self._traffic_data_next_ts: - self._save_traffic_data(start) - if self._traffic_data: - for data in self._traffic_data.values(): - if not isinstance(data,dict): - continue - for key in data.keys(): - if key == "status": - for domain in data[key].keys(): - data[key][domain] = 0 - else: - data[key] = 0 - - ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) - try: - data = self._traffic_data[name] - except KeyError as ex: - # name not in _traffic_data - self._traffic_data[name] = { + def _log_request_1(self,name,group,start,status_code,groupname="domains"): + if start >= self._traffic_data_next_ts: + self._save_traffic_data(start) + + ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) + try: + data = self._traffic_data[name] + except KeyError as ex: + # name not in _traffic_data + self._traffic_data[name] = { + "requests":1, + "totaltime":ptime, + "mintime":ptime, + "maxtime":ptime, + "status":{ + status_code:1 + } + } + return ptime + except: + #_traffic_data is None + self._traffic_data= { + "serverid":utils.get_processid(), + name: { "requests":1, "totaltime":ptime, "mintime":ptime, "maxtime":ptime, "status":{ - status_code:1 + status_code:1 } } - return ptime - except: - #_traffic_data is None - self._traffic_data= { - "serverid":utils.get_processid(), - name: { - "requests":1, - "totaltime":ptime, - "mintime":ptime, - "maxtime":ptime, - "status":{ - status_code:1 - } - } + } + return ptime + data["requests"] += 1 + data["totaltime"] += ptime + if not data["mintime"] or data["mintime"] > ptime: + data["mintime"] = ptime + if data["maxtime"] < ptime: + data["maxtime"] = ptime + data["status"][status_code] = data["status"].get(status_code,0) + 1 + return ptime + + def _log_request_2(self,name,group,start,status_code,groupname="domains"): + if start >= self._traffic_data_next_ts: + self._save_traffic_data(start) + + ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) + try: + data = self._traffic_data[name] + except KeyError as ex: + # name not in _traffic_data + self._traffic_data[name] = { + "requests":1, + "totaltime":ptime, + "mintime":ptime, + "maxtime":ptime, + "status":{ + status_code:1 + }, + groupname: { + group : 1 } - return ptime - data["requests"] += 1 - data["totaltime"] += ptime - if not data["mintime"] or data["mintime"] > ptime: - data["mintime"] = ptime - if data["maxtime"] < ptime: - data["maxtime"] = ptime - data["status"][status_code] = data["status"].get(status_code,0) + 1 + } return ptime - elif settings.TRAFFIC_MONITOR_LEVEL == 2: - class _MemoryCacheWithTrafficMonitor(_BaseMemoryCacheWithTrafficMonitor): - def __init__(self): - super().__init__() - logger.debug("Traffic monitor level 2 is enabled") - - def log_request(self,name,host,start,status_code): - if start >= self._traffic_data_next_ts: - self._save_traffic_data(start) - if self._traffic_data: - for data in self._traffic_data.values(): - if not isinstance(data,dict): - continue - for key in data.keys(): - if key == "domains": - for domain in [d for d,v in data[key].items() if v == 0]: - del data[key][domain] - for domain in data[key].keys(): - data[key][domain] = 0 - elif key in ("status",): - for domain in data[key].keys(): - data[key][domain] = 0 - else: - data[key] = 0 - - ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) - try: - data = self._traffic_data[name] - except KeyError as ex: - # name not in _traffic_data - self._traffic_data[name] = { + except: + #_traffic_data is None + self._traffic_data = { + "serverid":utils.get_processid(), + name: { "requests":1, "totaltime":ptime, "mintime":ptime, @@ -834,75 +825,57 @@ def log_request(self,name,host,start,status_code): "status":{ status_code:1 }, - "domains": { - host : 1 + groupname: { + group : 1 } } - return ptime - except: - #_traffic_data is None - self._traffic_data = { - "serverid":utils.get_processid(), - name: { + } + return ptime + data["requests"] += 1 + data["totaltime"] += ptime + if not data["mintime"] or data["mintime"] > ptime: + data["mintime"] = ptime + if data["maxtime"] < ptime: + data["maxtime"] = ptime + data["status"][status_code] = data["status"].get(status_code,0) + 1 + data[groupname][group] = data[groupname].get(group,0) + 1 + return ptime + + def _log_request_3(self,name,group,start,status_code,groupname="domains"): + if start >= self._traffic_data_next_ts: + self._save_traffic_data(start) + + ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) + try: + data = self._traffic_data[name] + except KeyError as ex: + # name not in _traffic_data + self._traffic_data[name] = { + "requests":1, + "totaltime":ptime, + "mintime":ptime, + "maxtime":ptime, + "status":{ + status_code:1 + }, + groupname: { + group : { "requests":1, "totaltime":ptime, "mintime":ptime, "maxtime":ptime, "status":{ status_code:1 - }, - "domains": { - host : 1 } } } - return ptime - data["requests"] += 1 - data["totaltime"] += ptime - if not data["mintime"] or data["mintime"] > ptime: - data["mintime"] = ptime - if data["maxtime"] < ptime: - data["maxtime"] = ptime - data["status"][status_code] = data["status"].get(status_code,0) + 1 - data["domains"][host] = data["domains"].get(host,0) + 1 + } return ptime - else: - class _MemoryCacheWithTrafficMonitor(_BaseMemoryCacheWithTrafficMonitor): - def __init__(self): - super().__init__() - logger.debug("Traffic monitor level 3 is enabled") - - def log_request(self,name,host,start,status_code): - if start >= self._traffic_data_next_ts: - self._save_traffic_data(start) - if self._traffic_data: - for data in self._traffic_data.values(): - if not isinstance(data,dict): - continue - for key in data.keys(): - if key == "status": - for k in data[key].keys(): - data[key][k] = 0 - elif key == "domains": - for domain in [d for d,v in data[key].items() if v["requests"] == 0]: - del data[key][domain] - for domain_data in data[key].values(): - for k in domain_data.keys(): - if k == "status": - for k1 in domain_data[k].keys(): - domain_data[k][k1] = 0 - else: - domain_data[k] = 0 - else: - data[key] = 0 - - - ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) - try: - data = self._traffic_data[name] - except KeyError as ex: - # name not in _traffic_data - self._traffic_data[name] = { + except: + # _traffic_data is None + self._traffic_data = { + "serverid":utils.get_processid(), + name: { "requests":1, "totaltime":ptime, "mintime":ptime, @@ -910,8 +883,8 @@ def log_request(self,name,host,start,status_code): "status":{ status_code:1 }, - "domains": { - host : { + groupname: { + group : { "requests":1, "totaltime":ptime, "mintime":ptime, @@ -922,64 +895,68 @@ def log_request(self,name,host,start,status_code): } } } - return ptime - except: - # _traffic_data is None - self._traffic_data = { - "serverid":utils.get_processid(), - name: { - "requests":1, - "totaltime":ptime, - "mintime":ptime, - "maxtime":ptime, - "status":{ - status_code:1 - }, - "domains": { - host : { - "requests":1, - "totaltime":ptime, - "mintime":ptime, - "maxtime":ptime, - "status":{ - status_code:1 - } - } - } - } - } - return ptime - - data["requests"] += 1 - data["totaltime"] += ptime - if not data["mintime"] or data["mintime"] > ptime: - data["mintime"] = ptime - if data["maxtime"] < ptime: - data["maxtime"] = ptime - data["status"][status_code] = data["status"].get(status_code,0) + 1 - - try: - domain_data = data["domains"][host] - domain_data["requests"] += 1 - domain_data["totaltime"] += ptime - if not domain_data["mintime"] or domain_data["mintime"] > ptime: - domain_data["mintime"] = ptime - if domain_data["maxtime"] < ptime: - domain_data["maxtime"] = ptime - domain_data["status"][status_code] = domain_data["status"].get(status_code,0) + 1 - except: - domain_data = { - "requests":1, - "totaltime":ptime, - "mintime":ptime, - "maxtime":ptime, - "status":{ - status_code:1 - } - } - data["domains"][host] = domain_data - + } return ptime + + data["requests"] += 1 + data["totaltime"] += ptime + if not data["mintime"] or data["mintime"] > ptime: + data["mintime"] = ptime + if data["maxtime"] < ptime: + data["maxtime"] = ptime + data["status"][status_code] = data["status"].get(status_code,0) + 1 + try: + group_data = data[groupname][group] + group_data["requests"] += 1 + group_data["totaltime"] += ptime + if not group_data["mintime"] or group_data["mintime"] > ptime: + group_data["mintime"] = ptime + if group_data["maxtime"] < ptime: + group_data["maxtime"] = ptime + group_data["status"][status_code] = group_data["status"].get(status_code,0) + 1 + except: + group_data = { + "requests":1, + "totaltime":ptime, + "mintime":ptime, + "maxtime":ptime, + "status":{ + status_code:1 + } + } + data[groupname][group] = group_data + + return ptime + + if settings.TRAFFIC_MONITOR_LEVEL == 1: + logger.debug("Traffic monitor level 1 is enabled") + _MemoryCacheWithTrafficMonitor.log_request = _MemoryCacheWithTrafficMonitor._log_request_1 + elif settings.TRAFFIC_MONITOR_LEVEL == 2: + logger.debug("Traffic monitor level 2 is enabled") + _MemoryCacheWithTrafficMonitor.log_request = _MemoryCacheWithTrafficMonitor._log_request_2 + else: + logger.debug("Traffic monitor level 3 is enabled") + _MemoryCacheWithTrafficMonitor.log_request = _MemoryCacheWithTrafficMonitor._log_request_3 + + if settings.REDIS_TRAFFIC_MONITOR_LEVEL == 1: + logger.debug("Reids traffic monitor level 1 is enabled") + _MemoryCacheWithTrafficMonitor.log_redisrequest = _MemoryCacheWithTrafficMonitor._log_request_1 + elif settings.REDIS_TRAFFIC_MONITOR_LEVEL == 2: + logger.debug("Reids traffic monitor level 2 is enabled") + _MemoryCacheWithTrafficMonitor.log_redisrequest = _MemoryCacheWithTrafficMonitor._log_request_2 + elif settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0: + logger.debug("Reids traffic monitor level 3 is enabled") + _MemoryCacheWithTrafficMonitor.log_redisrequest = _MemoryCacheWithTrafficMonitor._log_request_3 + + if settings.DB_TRAFFIC_MONITOR_LEVEL == 1: + logger.debug("DB traffic monitor level 1 is enabled") + _MemoryCacheWithTrafficMonitor.log_dbrequest = _MemoryCacheWithTrafficMonitor._log_request_1 + elif settings.DB_TRAFFIC_MONITOR_LEVEL == 2: + logger.debug("DB traffic monitor level 2 is enabled") + _MemoryCacheWithTrafficMonitor.log_dbrequest = _MemoryCacheWithTrafficMonitor._log_request_2 + elif settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + logger.debug("DB traffic monitor level 3 is enabled") + _MemoryCacheWithTrafficMonitor.log_dbrequest = _MemoryCacheWithTrafficMonitor._log_request_3 if True or settings.SYNC_MODE: class MemoryCache(_MemoryCacheWithTrafficMonitor): @@ -1001,3 +978,4 @@ def log_request(self,name,host,start,status_code): else: class MemoryCache(_BaseMemoryCache): pass + diff --git a/authome/cache/clustercache.py b/authome/cache/clustercache.py index 22daabc..cf26734 100644 --- a/authome/cache/clustercache.py +++ b/authome/cache/clustercache.py @@ -68,24 +68,24 @@ def default_auth2_cluster(self): """ In cluster environment, default cluster can be null, but current auth2 cluster can't be null """ - if not self._current_auth2_cluster: + if self._current_auth2_cluster is None: self.refresh_auth2_clusters(True) return self._default_auth2_cluster @property def current_auth2_cluster(self): - if not self._current_auth2_cluster: + if self._current_auth2_cluster is None: self.refresh_auth2_clusters(True) return self._current_auth2_cluster @property def auth2_clusters(self): - if not self._auth2_clusters: + if self._current_auth2_cluster is None: self.refresh_auth2_clusters(True) return self._auth2_clusters def refresh_auth2_clusters(self,force=False): - if self._auth2_clusters_check_time.can_run() or force or not self._auth2_clusters: + if self._auth2_clusters_check_time.can_run() or force or self._current_auth2_cluster is None: from ..models import Auth2Cluster refreshtime = timezone.localtime() l1 = len(self._auth2_clusters) @@ -139,7 +139,7 @@ def _send_request_to_other_clusters(self,request,f_send_req,force_refresh=False) DebugLog.warning(DebugLog.INTERCONNECTION_TIMEOUT,None,o.clusterid,None,None,message="Accessing auth2 cluster({1}) times out({0} seconds),{2}".format(settings.AUTH2_INTERCONNECTION_TIMEOUT,o.clusterid,str(ex)),request=request) failed_clusters = utils.add_to_list(failed_clusters,(o,ex)) - if not force_refresh and (retry_clusters or not self._auth2_clusters): + if not force_refresh and (retry_clusters or self._current_auth2_cluster is None): #some clusters failed self.refresh_auth2_clusters(True) for o,ex in retry_clusters: @@ -500,7 +500,7 @@ def status(self): @property def healthy(self): health,msgs = super().healthy - if not self._auth2_clusters and not self._default_auth2_cluster: + if not self._auth2_clusters and not self._current_auth2_cluster: if health: health = False msgs = ["Auth2 cluster cache is empty"] diff --git a/authome/migrations/0047_alter_auth2cluster_options_and_more.py b/authome/migrations/0047_alter_auth2cluster_options_and_more.py new file mode 100644 index 0000000..ba47048 --- /dev/null +++ b/authome/migrations/0047_alter_auth2cluster_options_and_more.py @@ -0,0 +1,117 @@ +# Generated by Django 4.2.16 on 2024-10-28 07:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('authome', '0046_alter_user_comments'), + ] + + operations = [ + migrations.AlterModelOptions( + name='auth2cluster', + options={'verbose_name_plural': ' Auth2 Clusters'}, + ), + migrations.AlterModelOptions( + name='customizableuserflow', + options={'verbose_name_plural': ' Customizable Userflows'}, + ), + migrations.AlterModelOptions( + name='debuglog', + options={'verbose_name_plural': ' Auth2 Logs'}, + ), + migrations.AlterModelOptions( + name='identityprovider', + options={'verbose_name_plural': ' Identity Providers'}, + ), + migrations.AlterModelOptions( + name='normaluser', + options={'verbose_name': 'User', 'verbose_name_plural': ' Users'}, + ), + migrations.AlterModelOptions( + name='normalusertoken', + options={'verbose_name': 'System User', 'verbose_name_plural': ' User Tokens'}, + ), + migrations.AlterModelOptions( + name='systemuser', + options={'verbose_name': 'System User', 'verbose_name_plural': ' System Users'}, + ), + migrations.AlterModelOptions( + name='systemusertoken', + options={'verbose_name': 'System User', 'verbose_name_plural': ' System User Tokens'}, + ), + migrations.AlterModelOptions( + name='trafficdata', + options={'verbose_name_plural': ' Traffic Data'}, + ), + migrations.AlterModelOptions( + name='trafficreport', + options={'verbose_name_plural': ' Traffic Report'}, + ), + migrations.AlterModelOptions( + name='user', + options={'verbose_name': 'user', 'verbose_name_plural': ' Users'}, + ), + migrations.AlterModelOptions( + name='userauthorization', + options={'verbose_name_plural': ' User Authorizations'}, + ), + migrations.AlterModelOptions( + name='usergroup', + options={'verbose_name_plural': ' User Groups'}, + ), + migrations.AlterModelOptions( + name='usergroupauthorization', + options={'verbose_name_plural': ' User Group Authorizations'}, + ), + migrations.AlterModelOptions( + name='usertoken', + options={'verbose_name_plural': ' Access Tokens'}, + ), + migrations.AlterModelOptions( + name='usertotp', + options={'verbose_name_plural': ' User TOTPs'}, + ), + migrations.AddField( + model_name='trafficdata', + name='db_avg_time', + field=models.FloatField(editable=False, null=True), + ), + migrations.AddField( + model_name='trafficdata', + name='db_requests', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='trafficdata', + name='redis_avg_time', + field=models.FloatField(editable=False, null=True), + ), + migrations.AddField( + model_name='trafficdata', + name='redis_requests', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='trafficreport', + name='db_avg_time', + field=models.FloatField(editable=False, null=True), + ), + migrations.AddField( + model_name='trafficreport', + name='db_requests', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='trafficreport', + name='redis_avg_time', + field=models.FloatField(editable=False, null=True), + ), + migrations.AddField( + model_name='trafficreport', + name='redis_requests', + field=models.PositiveIntegerField(default=0, editable=False), + ), + ] diff --git a/authome/models/models.py b/authome/models/models.py index 27502e1..406afe4 100644 --- a/authome/models/models.py +++ b/authome/models/models.py @@ -1665,6 +1665,11 @@ def refresh_cache(cls): refreshtime = timezone.localtime() for authorization in UserGroupAuthorization.objects.all().order_by("usergroup","sortkey"): size += 1 + #try to get the data from cache to avoid a extra db access + try: + authorization.usergroup = cache.usergroups[authorization.usergroup_id] + except: + pass if not previous_usergroup: usergroupauthorization[authorization.usergroup] = [authorization] diff --git a/authome/models/trafficmodels.py b/authome/models/trafficmodels.py index f7de90b..1c4f141 100644 --- a/authome/models/trafficmodels.py +++ b/authome/models/trafficmodels.py @@ -30,6 +30,10 @@ class TrafficData(models.Model): min_time = models.FloatField(null=True,editable=False) max_time = models.FloatField(null=True,editable=False) avg_time = models.FloatField(null=True,editable=False) + redis_requests = models.PositiveIntegerField(default=0,editable=False) + redis_avg_time = models.FloatField(null=True,editable=False) + db_requests = models.PositiveIntegerField(default=0,editable=False) + db_avg_time = models.FloatField(null=True,editable=False) get_remote_sessions = models.PositiveIntegerField(default=0,editable=False) delete_remote_sessions = models.PositiveIntegerField(default=0,editable=False) status = models.JSONField(null=True,editable=False) @@ -116,6 +120,10 @@ class TrafficReport(models.Model): max_time = models.FloatField(null=True,editable=False) avg_time = models.FloatField(null=True,editable=False) status = models.JSONField(null=True,editable=False) + redis_requests = models.PositiveIntegerField(default=0,editable=False) + redis_avg_time = models.FloatField(null=True,editable=False) + db_requests = models.PositiveIntegerField(default=0,editable=False) + db_avg_time = models.FloatField(null=True,editable=False) get_remote_sessions = models.PositiveIntegerField(default=0,editable=False) delete_remote_sessions = models.PositiveIntegerField(default=0,editable=False) domains = models.JSONField(null=True,editable=False) diff --git a/authome/monitordb.py b/authome/monitordb.py new file mode 100644 index 0000000..d51dcf6 --- /dev/null +++ b/authome/monitordb.py @@ -0,0 +1,80 @@ +import re +import logging + +from django.db import connections +from django.utils import timezone +from django.conf import settings +from django.db.backends.signals import connection_created + +from . import utils + +logger = logging.getLogger(__name__) + +if settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + _cache = None + select_re = re.compile('select\\s.+\\sfrom\\s+("?[a-zA-Z0-9\\-_]+"?\\.)?"?(?P[a-zA-Z0-9\\-_]+)"?',re.I|re.M|re.S) + delete_re = re.compile('delete\\s+from\\s+("?[a-zA-Z0-9\\-_]+"?\\.)?"?(?P
[a-zA-Z0-9\\-_]+)"?',re.I|re.M|re.S) + update_re = re.compile('update\\s+("?[a-zA-Z0-9\\-_]+"?\\.)?"?(?P
[a-zA-Z0-9\\-_]+)"?',re.I|re.M|re.S) + insert_re = re.compile('insert\\s+into\\s+("?[a-zA-Z0-9\\-_]+"?\\.)?"?(?P
[a-zA-Z0-9\\-_]+)"?',re.I|re.M|re.S) + savepoint_re = re.compile('\\s*(RELEASE\\s+)?SAVEPOINT\\s+',re.I|re.M|re.S) + + def monitor_db_access(execute, sql, params, many, context): + global _cache + try: + starttime = timezone.localtime() + status = "OK" + return execute(sql, params, many, context) + except Exception as ex: + status = ex.__class__.__name__ + raise + finally: + #cache and ignore the exceptions which are thrown before cache is fully initialized + try: + if settings.DB_TRAFFIC_MONITOR_LEVEL == 1: + _cache.log_dbrequest("DB",None,starttime,status) + else: + m = select_re.search(sql) + if m: + _cache.log_dbrequest("DB","SELECT {}".format(m.group("table").lower()),starttime,status) + elif sql == utils.ping_db_sql: + _cache.log_dbrequest("DB","PING",starttime,status) + else: + m = savepoint_re.search(sql) + if not m: + m = update_re.search(sql) + if m: + _cache.log_dbrequest("DB","UPDATE {}".format(m.group("table").lower()),starttime,status) + else: + m = insert_re.search(sql) + if m: + _cache.log_dbrequest("DB","INSERT {}".format(m.group("table").lower()),starttime,status) + else: + m = delete_re.search(sql) + if m: + _cache.log_dbrequest("DB","DELETE {}".format(m.group("table").lower()),starttime,status) + else: + _cache.log_dbrequest("DB","OTHERS",starttime,status) + except: + try: + from . import cache + _cache = cache.cache + except: + _cache = None + + def install_monitor_db_access(connection, **kwargs): + """ + Install monitor_db_access on the given database connection. + Rather than use the documented API of the `execute_wrapper()` context + manager, directly insert the hook. + """ + + if monitor_db_access in connection.execute_wrappers: + return + + connection.execute_wrappers.insert(0, monitor_db_access) + + + for connection in connections.all(): + install_monitor_db_access(connection=connection) + + connection_created.connect(install_monitor_db_access) diff --git a/authome/redis.py b/authome/redis.py index 34f8908..c38d389 100644 --- a/authome/redis.py +++ b/authome/redis.py @@ -1,4 +1,5 @@ import re +import traceback from datetime import timedelta import time from itertools import chain @@ -18,27 +19,6 @@ logger = logging.getLogger(__name__) -def is_cluster(url): - ex = None - for redisurl in url.split(";"): - redisurl = redisurl.strip() - if not redisurl: - continue - client = None - try: - client = redis.Redis.from_url(redisurl) - data = client.info("cluster") - logger.debug("Redis server({}) is cluster {}".format(redisurl,"enabled" if data["cluster_enabled"] else "disabled")) - return True if data["cluster_enabled"] else False - except Exception as e: - if client: - client.close() - ex = e - if ex: - raise Exception("No available redis server.{}".format(str(ex))) - else: - raise Exception("No available redis server.") - redis_re = re.compile("^\\s*((?P[a-zA-Z]+)://((?P[^:@]+)?(:(?P[^@]+)?)?@)?)?(?P[^:/]+)(:(?P[0-9]+))?(/(?P[0-9]+))?\\s*$") class CacheMixin(object): _parsed_servers = {} @@ -192,7 +172,7 @@ def __init__(self, servers, **options ): if retry_attempts >= 1: options["retry"] = redis.retry.Retry(redis.backoff.NoBackoff(),retry_attempts) super().__init__(servers,**options) - + def ttl(self, key): client = self.get_client(key) return client.ttl(key) diff --git a/authome/settings.py b/authome/settings.py index b4ceeba..d8ab6c8 100644 --- a/authome/settings.py +++ b/authome/settings.py @@ -1,12 +1,11 @@ import os import tomllib -from .utils import env, get_digest_function +from django.utils import timezone +from .utils import env, get_digest_function,is_cluster from datetime import timedelta import dj_database_url -from . import redis - DEFAULT_AUTO_FIELD = "django.db.models.AutoField" # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -399,14 +398,89 @@ def GET_SESSION_COOKIE_DOMAIN(domain): #KEY_PREFIX is None and KEY_VERSION_ENABLED is False STANDALONE_KEY_FUNCTION = lambda key,key_prefix,version : key +CACHE_SERVER = env("CACHE_SERVER") +CACHE_SERVER_OPTIONS = env("CACHE_SERVER_OPTIONS",default={}) + +CACHE_SESSION_SERVER = env("CACHE_SESSION_SERVER") +CACHE_SESSION_SERVER_OPTIONS = env("CACHE_SESSION_SERVER_OPTIONS",default={}) + +PREVIOUS_CACHE_SESSION_SERVER = env("PREVIOUS_CACHE_SESSION_SERVER") +PREVIOUS_CACHE_SESSION_SERVER_OPTIONS = env("PREVIOUS_CACHE_SESSION_SERVER_OPTIONS",default={}) + +CACHE_USER_SERVER = env("CACHE_USER_SERVER") +CACHE_USER_SERVER_OPTIONS = env("CACHE_USER_SERVER_OPTIONS",default={}) + +USER_CACHE_ALIAS = None +PREVIOUS_SESSION_CACHE_ALIAS=None + +GET_DEFAULT_CACHE_KEY = lambda key:key +GET_USER_KEY = lambda userid:str(userid) +GET_USERTOKEN_KEY = lambda userid:"T{}".format(userid) + +SESSION_CACHES = 0 +PREVIOUS_SESSION_CACHES = 0 +SESSION_CACHES = 0 +USER_CACHES = 0 + +TRAFFIC_MONITOR_LEVEL = env('TRAFFIC_MONITOR_LEVEL',default=0) #0: disabled, 1:summary, 2: per domain +if not CACHE_SERVER or not CACHE_SERVER.lower().startswith('redis'): + TRAFFIC_MONITOR_LEVEL = 0 +if TRAFFIC_MONITOR_LEVEL > 0: + try: + REDIS_TRAFFIC_MONITOR_LEVEL = int(env('REDIS_TRAFFIC_MONITOR_LEVEL',default=0)) + except: + REDIS_TRAFFIC_MONITOR_LEVEL = 0 + try: + DB_TRAFFIC_MONITOR_LEVEL = int(env('DB_TRAFFIC_MONITOR_LEVEL',default=0)) + except: + DB_TRAFFIC_MONITOR_LEVEL = 0 +else: + REDIS_TRAFFIC_MONITOR_LEVEL = 0 + DB_TRAFFIC_MONITOR_LEVEL = 0 + +TRAFFIC_MONITOR_INTERVAL=env('TRAFFIC_MONITOR_INTERVAL',default=3600) +if TRAFFIC_MONITOR_INTERVAL and TRAFFIC_MONITOR_INTERVAL > 0: + if 86400 % TRAFFIC_MONITOR_INTERVAL > 0 : + #One day can't be divided by interval, invalid, reset it to one hour + TRAFFIC_MONITOR_INTERVAL = timedelta(seconds=3600) + else: + TRAFFIC_MONITOR_INTERVAL = timedelta(seconds=TRAFFIC_MONITOR_INTERVAL) +else: + TRAFFIC_MONITOR_INTERVAL = timedelta(seconds=3600) + +if REDIS_TRAFFIC_MONITOR_LEVEL > 0: + import redis + class MonitorEnabledConnection(redis.Connection): + _cache = None + def send_command(self, *args, **kwargs): + try: + starttime = timezone.localtime() + status = "OK" + return super().send_command(*args,**kwargs) + except Exception as ex: + status = ex.__class__.__name__ + raise + finally: + #cache and ignore the exceptions which are thrown before cache is fully initialized + try: + MonitorEnabledConnection._cache.log_redisrequest("Redis",args[0],starttime,status) + except: + try: + from . import cache + MonitorEnabledConnection._cache = cache.cache + except: + MonitorEnabledConnection._cache = None + def GET_CACHE_CONF(cacheid,server,options={},key_function=KEY_FUNCTION): if server.lower().startswith('redis'): if "max_connections" not in options: - options["max_connections"] = 2 + options["max_connections"] = 10 + if REDIS_TRAFFIC_MONITOR_LEVEL > 0: + options["connection_class"] = MonitorEnabledConnection cluster = options.pop("cluster",None) if cluster is None: - cluster = redis.is_cluster(server) + cluster = is_cluster(server) if cluster: if "require_full_coverage" not in options: @@ -434,29 +508,6 @@ def GET_CACHE_CONF(cacheid,server,options={},key_function=KEY_FUNCTION): "OPTIONS": options } -CACHE_SERVER = env("CACHE_SERVER") -CACHE_SERVER_OPTIONS = env("CACHE_SERVER_OPTIONS",default={}) - -CACHE_SESSION_SERVER = env("CACHE_SESSION_SERVER") -CACHE_SESSION_SERVER_OPTIONS = env("CACHE_SESSION_SERVER_OPTIONS",default={}) - -PREVIOUS_CACHE_SESSION_SERVER = env("PREVIOUS_CACHE_SESSION_SERVER") -PREVIOUS_CACHE_SESSION_SERVER_OPTIONS = env("PREVIOUS_CACHE_SESSION_SERVER_OPTIONS",default={}) - -CACHE_USER_SERVER = env("CACHE_USER_SERVER") -CACHE_USER_SERVER_OPTIONS = env("CACHE_USER_SERVER_OPTIONS",default={}) - -USER_CACHE_ALIAS = None -PREVIOUS_SESSION_CACHE_ALIAS=None - -GET_DEFAULT_CACHE_KEY = lambda key:key -GET_USER_KEY = lambda userid:str(userid) -GET_USERTOKEN_KEY = lambda userid:"T{}".format(userid) - -SESSION_CACHES = 0 -PREVIOUS_SESSION_CACHES = 0 -SESSION_CACHES = 0 -USER_CACHES = 0 if CACHE_SERVER or CACHE_SESSION_SERVER or CACHE_USER_SERVER: CACHES = {} @@ -527,21 +578,6 @@ def GET_CACHE_CONF(cacheid,server,options={},key_function=KEY_FUNCTION): if STAFF_CACHE_TIMEOUT <= 0: STAFF_CACHE_TIMEOUT = None -TRAFFIC_MONITOR_LEVEL = env('TRAFFIC_MONITOR_LEVEL',default=0) #0: disabled, 1:summary, 2: per domain -if not CACHE_SERVER or not CACHE_SERVER.lower().startswith('redis'): - TRAFFIC_MONITOR_LEVEL = 0 - -TRAFFIC_MONITOR_INTERVAL=env('TRAFFIC_MONITOR_INTERVAL',default=3600) -if TRAFFIC_MONITOR_INTERVAL and TRAFFIC_MONITOR_INTERVAL > 0: - if 86400 % TRAFFIC_MONITOR_INTERVAL > 0 : - #One day can't be divided by interval, invalid, reset it to one hour - TRAFFIC_MONITOR_INTERVAL = timedelta(seconds=3600) - else: - TRAFFIC_MONITOR_INTERVAL = timedelta(seconds=TRAFFIC_MONITOR_INTERVAL) -else: - TRAFFIC_MONITOR_INTERVAL = timedelta(seconds=3600) - - TEST_RUNNER=env("TEST_RUNNER","django.test.runner.DiscoverRunner") #enable auth2 cluster feature by setting AUTH2_CLUSTERID diff --git a/authome/testmonitoring.py b/authome/testmonitoring.py index 003858e..e7aab54 100644 --- a/authome/testmonitoring.py +++ b/authome/testmonitoring.py @@ -149,26 +149,26 @@ def merge_trafficdata(self,traffic_data): dresult = result["domains"] for data in traffic_data: - result["requests"] = result.get("requests",0) + data["requests"] - result["total_time"] = result.get("total_time",0) + data["total_time"] - if "min_time" not in result or result["min_time"] > data["min_time"]: - result["min_time"] = data["min_time"] - if "max_time" not in result or result["max_time"] < data["max_time"]: - result["max_time"] = data["max_time"] + result["requests"] = result.get("requests",0) + (data["requests"] or 0) + result["total_time"] = result.get("total_time",0) + (data["total_time"] or 0) + if "min_time" not in result or (data["min_time"] is not None and result["min_time"] > data["min_time"]): + result["min_time"] = data["min_time"] or 0 + if "max_time" not in result or (data["max_time"] and result["max_time"] < data["max_time"]): + result["max_time"] = data["max_time"] or 0 result["avg_time"] = result["total_time"] / result["requests"] if result["requests"] else 0 for k,v in (data["domains"] or {}).items(): if k not in dresult: dresult[k] = {} if isinstance(v,dict): - dresult[k]["requests"] = dresult[k].get("requests",0) + v["requests"] - dresult[k]["total_time"] = dresult[k].get("total_time",0) + v["total_time"] - if "min_time" not in dresult[k] or dresult[k]["min_time"] > v["min_time"]: - dresult[k]["min_time"] = v["min_time"] - if "max_time" not in dresult[k] or dresult[k]["max_time"] < v["max_time"]: - dresult[k]["max_time"] = v["max_time"] + dresult[k]["requests"] = dresult[k].get("requests",0) + (v["requests"] or 0) + dresult[k]["total_time"] = dresult[k].get("total_time",0) + (v["total_time"] or 0) + if "min_time" not in dresult[k] or (v["min_time"] and dresult[k]["min_time"] > v["min_time"]): + dresult[k]["min_time"] = v["min_time"] or 0 + if "max_time" not in dresult[k] or (v["max_time"] and dresult[k]["max_time"] < v["max_time"]): + dresult[k]["max_time"] = v["max_time"] or 0 dresult[k]["avg_time"] = dresult[k]["total_time"] / dresult[k]["requests"] if dresult[k]["requests"] else 0 else: - dresult[k]["requests"] = dresult[k].get("requests",0) + v + dresult[k]["requests"] = dresult[k].get("requests",0) + (v or 0) return result def test_monitoring(self): diff --git a/authome/trafficdata.py b/authome/trafficdata.py index f4a1fb5..17b2972 100644 --- a/authome/trafficdata.py +++ b/authome/trafficdata.py @@ -129,7 +129,7 @@ def _populate_reports(): if data.batchid == BATCHID_END: continue - if not data.requests and not data.get_remote_sessions and not data.delete_remote_sessions: + if not data.requests and not data.get_remote_sessions and not data.delete_remote_sessions and not data.db_requests and not data.redis_requests: #no requests continue @@ -228,7 +228,18 @@ def _populate_reports(): _add_avg(method_traffic_report.domains) else: method_traffic_report.domains = method_data.domains + method_traffic_report.changed = True + + if method_data.sso_method == "Redis": + traffic_reports[method_report_key[0]].redis_requests = method_traffic_report.requests + traffic_reports[method_report_key[0]].redis_avg_time = method_traffic_report.avg_time + traffic_reports[method_report_key[0]].changed = True + elif method_data.sso_method == "DB": + traffic_reports[method_report_key[0]].db_requests = method_traffic_report.requests + traffic_reports[method_report_key[0]].db_avg_time = method_traffic_report.avg_time + traffic_reports[method_report_key[0]].changed = True + if not traffic_reports : logger.info("No new traffic data and all traffic reports are latest.") diff --git a/authome/urls/__init__.py b/authome/urls/__init__.py index eed0be7..fbe3696 100644 --- a/authome/urls/__init__.py +++ b/authome/urls/__init__.py @@ -6,6 +6,7 @@ from .urls import urlpatterns,logger from .. import views from . import selfserviceurls +from .. import monitordb handler400 = views.handler400 diff --git a/authome/utils.py b/authome/utils.py index e25779d..8b899cf 100644 --- a/authome/utils.py +++ b/authome/utils.py @@ -12,6 +12,7 @@ import traceback import threading from datetime import timedelta,datetime +import redis from django.utils import timezone from django.contrib.auth import REDIRECT_FIELD_NAME @@ -477,14 +478,14 @@ def add_to_map(m,k,v): else: m[k] = v return m - +ping_db_sql = "select 1" def ping_database(dbalias): status = {} healthy = True starttime = timezone.localtime() with connections[dbalias].cursor() as cursor: try: - cursor.execute("select 1") + cursor.execute(ping_db_sql) v = cursor.fetchone()[0] if v != 1: healthy = False @@ -537,3 +538,35 @@ def create_secret_key(length=64): return get_random_string(length, string.digits + string.ascii_letters + "`~!@$%^&*()_+-={}|[]:;,./<>?") +def is_cluster(url): + ex = None + for redisurl in url.split(";"): + redisurl = redisurl.strip() + if not redisurl: + continue + client = None + try: + client = redis.Redis.from_url(redisurl) + data = client.info("cluster") + logger.debug("Redis server({}) is cluster {}".format(redisurl,"enabled" if data["cluster_enabled"] else "disabled")) + return True if data["cluster_enabled"] else False + except Exception as e: + if client: + client.close() + ex = e + if ex: + raise Exception("No available redis server.{}".format(str(ex))) + else: + raise Exception("No available redis server.") + +authome_module_prefix=None +def get_callstack(only_include_authome_module=True): + global authome_module_prefix + if not authome_module_prefix: + from django.conf import settings + authome_module_prefix = 'File "{}'.format(settings.BASE_DIR) + + if only_include_authome_module: + return "\n".join(line for line in traceback.format_stack()[:-1] if line.strip().startswith(authome_module_prefix)) + else: + return "\n".join(line for line in traceback.format_stack()[:-1]) diff --git a/authome/views/monitorviews.py b/authome/views/monitorviews.py index bca9d11..701d7f8 100644 --- a/authome/views/monitorviews.py +++ b/authome/views/monitorviews.py @@ -470,7 +470,7 @@ def _save_trafficdata(batchid): with transaction.atomic(): #save the data to db for data in traffic_datas.values(): - if not data.get("sso_requests",{}).get("requests") and not data.get("get_remote_session",{}).get("requests") and not data.get("delete_remote_session",{}).get("requests"): + if not data.get("sso_requests",{}).get("requests") and not data.get("get_remote_session",{}).get("requests") and not data.get("delete_remote_session",{}).get("requests") and not data.get("Redis",{}).get("requests") and not data.get("DB",{}).get("requests"): #no requests logger.debug("Ignore empty data") continue @@ -488,6 +488,10 @@ def _save_trafficdata(batchid): avg_time=data["sso_requests"].get("avgtime"), status=data["sso_requests"].get("status"), domains=data["sso_requests"].get("domains"), + redis_requests = data.get("Redis",{}).get("requests") or 0, + redis_avg_time = data.get("Redis",{}).get("avgtime") or 0, + db_requests = data.get("DB",{}).get("requests") or 0, + db_avg_time = data.get("DB",{}).get("avgtime") or 0, get_remote_sessions = data.get("get_remote_session",{}).get("requests") or 0, delete_remote_sessions = data.get("delete_remote_session",{}).get("requests") or 0 ) diff --git a/authome/views/views.py b/authome/views/views.py index e6ff2f8..c640739 100644 --- a/authome/views/views.py +++ b/authome/views/views.py @@ -32,7 +32,7 @@ import time from .. import models -from .. cache import cache, get_defaultcache +from ..cache import cache, get_defaultcache from .. import utils from .. import emails from ..exceptions import HttpResponseException,UserDoesNotExistException,PolicyNotConfiguredException diff --git a/healthcheck.sh b/healthcheck.sh index 979ec90..fa9b3a5 100755 --- a/healthcheck.sh +++ b/healthcheck.sh @@ -127,7 +127,7 @@ auth2processes=${#auth2pids[@]}; resourceusage="
Processes : ${auth2processes} , Total CPU : $(printf %6.2f%% ${totalcpuusage}) , Virutal Memory : $(printf %8.2fM ${totalvsusage}) , Memory : $(printf %8.2fM ${totalrsusage})<\/div>
    " for (( i=0; i<${auth2processes}; i++ )); do - resourceusage="${resourceusage}
  • CPU : $(printf %6.2f%% ${cpuusages[i]}) , Virtual Memory : $(printf %8.2fM ${vsusages[i]}) , Memory : $(printf %8.2fM ${rsusages[i]}) <\/li>" + resourceusage="${resourceusage}
  • PID : ${auth2pids[i]} , CPU : $(printf %6.2f%% ${cpuusages[i]}) , Virtual Memory : $(printf %8.2fM ${vsusages[i]}) , Memory : $(printf %8.2fM ${rsusages[i]}) <\/li>" done resourceusage="${resourceusage}<\/ul><\/div>" @@ -152,7 +152,7 @@ if [[ $(date '+%s') -ge ${nexttime} ]] ; then message="${message} " diff --git a/poetry.lock b/poetry.lock index 0ad7331..006d24c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -564,13 +564,13 @@ ipython = {version = ">=7.31.1", markers = "python_version >= \"3.11\""} [[package]] name = "ipython" -version = "8.28.0" +version = "8.29.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" files = [ - {file = "ipython-8.28.0-py3-none-any.whl", hash = "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35"}, - {file = "ipython-8.28.0.tar.gz", hash = "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a"}, + {file = "ipython-8.29.0-py3-none-any.whl", hash = "sha256:0188a1bd83267192123ccea7f4a8ed0a78910535dbaa3f37671dca76ebd429c8"}, + {file = "ipython-8.29.0.tar.gz", hash = "sha256:40b60e15b22591450eef73e40a027cf77bd652e757523eebc5bd7c7c498290eb"}, ] [package.dependencies] @@ -633,19 +633,19 @@ traitlets = "*" [[package]] name = "mock" -version = "4.0.3" +version = "5.1.0" description = "Rolling backport of unittest.mock for all Pythons" optional = false python-versions = ">=3.6" files = [ - {file = "mock-4.0.3-py3-none-any.whl", hash = "sha256:122fcb64ee37cfad5b3f48d7a7d51875d7031aaf3d8be7c42e2bee25044eee62"}, - {file = "mock-4.0.3.tar.gz", hash = "sha256:7d3fbbde18228f4ff2f1f119a45cdffa458b4c0dee32eb4d2bb2f82554bac7bc"}, + {file = "mock-5.1.0-py3-none-any.whl", hash = "sha256:18c694e5ae8a208cdb3d2c20a993ca1a7b0efa258c247a1e565150f477f83744"}, + {file = "mock-5.1.0.tar.gz", hash = "sha256:5e96aad5ccda4718e0a229ed94b2024df75cc2d55575ba5762d31f5767b8767d"}, ] [package.extras] build = ["blurb", "twine", "wheel"] docs = ["sphinx"] -test = ["pytest (<5.4)", "pytest-cov"] +test = ["pytest", "pytest-cov"] [[package]] name = "oauthlib" @@ -805,32 +805,33 @@ wcwidth = "*" [[package]] name = "psutil" -version = "6.0.0" +version = "6.1.0" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, - {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, - {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, - {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, - {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, - {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, - {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, - {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, - {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, - {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, + {file = "psutil-6.1.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0"}, + {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942"}, + {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:000d1d1ebd634b4efb383f4034437384e44a6d455260aaee2eca1e9c1b55f047"}, + {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5cd2bcdc75b452ba2e10f0e8ecc0b57b827dd5d7aaffbc6821b2a9a242823a76"}, + {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:045f00a43c737f960d273a83973b2511430d61f283a44c96bf13a6e829ba8fdc"}, + {file = "psutil-6.1.0-cp27-none-win32.whl", hash = "sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e"}, + {file = "psutil-6.1.0-cp27-none-win_amd64.whl", hash = "sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85"}, + {file = "psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688"}, + {file = "psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a"}, + {file = "psutil-6.1.0-cp36-cp36m-win32.whl", hash = "sha256:6d3fbbc8d23fcdcb500d2c9f94e07b1342df8ed71b948a2649b5cb060a7c94ca"}, + {file = "psutil-6.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1209036fbd0421afde505a4879dee3b2fd7b1e14fee81c0069807adcbbcca747"}, + {file = "psutil-6.1.0-cp37-abi3-win32.whl", hash = "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e"}, + {file = "psutil-6.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be"}, + {file = "psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a"}, ] [package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] +dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "wheel"] +test = ["pytest", "pytest-xdist", "setuptools"] [[package]] name = "psycopg" @@ -1288,4 +1289,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "~3.12" -content-hash = "a5a67f8d731dd2ea5da4fcd7d8f5ba8d066403fde7fd8188bd53f90bfe5423ff" +content-hash = "6a9275598f36c327ce63b5df49d0b1e94d1cbbfa28be72bc3b88d756ad814d8c" diff --git a/pyproject.toml b/pyproject.toml index 02a1859..973f954 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,15 +22,15 @@ qrcode = "7.4.2" pyotp = "2.9.0" Pillow = "10.3.0" PyJWT = "2.9.0" -psutil = "6.0.0" +psutil = "6.1.0" redis = "5.0.4" sentry-sdk = {version = "2.16.0", extras = ["django"]} [tool.poetry.dev-dependencies] -ipython = "^8.28.0" +ipython = "^8.29.0" coverage = "^5.3" coveralls = "^2.1.2" -mock = "^4.0.2" +mock = "^5.1.0" ipdb = "^0.13.4" [build-system] diff --git a/start_auth2 b/start_auth2 index 96838a7..92d1f6b 100755 --- a/start_auth2 +++ b/start_auth2 @@ -76,6 +76,7 @@ fi if [[ "$backend" =~ ^true$ ]] then + source .env.monitor if [[ "$envfile" == ".env.standalone" ]] then echo 'Running auth2 server in background' @@ -104,6 +105,7 @@ else else echo 'Running auth2 cluster in foreground' fi + source .env.monitor export SYNC_MODE=False poetry run python manage.py runserver 0.0.0.0:$PORT fi