diff --git a/authome/admin/admin.py b/authome/admin/admin.py index e20293f..26b41ce 100644 --- a/authome/admin/admin.py +++ b/authome/admin/admin.py @@ -236,6 +236,16 @@ def _usergroups(self,obj): return mark_safe("{} ({})".format(result,usergroupnames)) _usergroups.short_description = "User Groups" + def _session_timeout(self,obj): + if not obj : + return "-" + else: + usergroups = models.UserGroup.find_groups(obj.email,cacheable=False)[0] + return models.UserGroup.get_session_timeout(usergroups) or "-" + _session_timeout.short_description = "Session Timeout" + + + def _usergroupnames(self,obj): if not obj : return "" @@ -283,12 +293,12 @@ def delete_queryset(self, request, queryset): self.delete_model(request,o) class UserAdmin(UserAuthorizationCheckMixin,UserGroupsMixin,DatetimeMixin,CatchModelExceptionMixin,auth.admin.UserAdmin): - list_display = ('username', 'email', 'first_name', 'last_name','is_active', 'is_staff','last_idp','_last_login') + list_display = ('username', 'email', 'first_name', 'last_name','is_active', 'is_staff','_session_timeout','last_idp','_last_login') list_filter = ( 'is_superuser',) - readonly_fields = ("_last_login","_date_joined","username","first_name","last_name","is_staff","is_superuser","_email","_usergroups","last_idp","_modified") + readonly_fields = ("_last_login","_date_joined","username","first_name","last_name","is_staff","is_superuser","_email","_usergroups","_session_timeout","last_idp","_modified") fieldsets = ( (None, {'fields': ('_email', )}), - ('Personal info', {'fields': ('username','first_name', 'last_name')}), + ('Personal info', {'fields': ('username','first_name', 'last_name',"_session_timeout")}), ('Permissions', { 'fields': ('is_active', 'is_staff', 'is_superuser',"_usergroups" ), }), diff --git a/authome/admin/monitoradmin.py b/authome/admin/monitoradmin.py index d48b2bf..a37c2d2 100644 --- a/authome/admin/monitoradmin.py +++ b/authome/admin/monitoradmin.py @@ -9,6 +9,7 @@ from . import admin from .. import models +from ..cache import cache logger = logging.getLogger(__name__) @@ -48,6 +49,20 @@ def _avg_time(self,obj): return round(obj.avg_time,2) _avg_time.short_description = "Avg Time" + def _redis_avg_time(self,obj): + if not obj or not obj.redis_avg_time: + return "" + else: + return round(obj.redis_avg_time,2) + _redis_avg_time.short_description = "Redis Avg Time" + + def _db_avg_time(self,obj): + if not obj or not obj.db_avg_time: + return "" + else: + return round(obj.db_avg_time,2) + _db_avg_time.short_description = "DB Avg Time" + def _domains(self,obj): if not obj or not obj.domains: return "" @@ -85,14 +100,62 @@ class SSOMethodTrafficDataInline(TrafficDataPropertyMixin,djangoadmin.TabularInl readonly_fields = ("sso_method","requests","_total_time","_min_time","_max_time","_avg_time","_status","_domains") fields = readonly_fields + def _domains(self,obj): + if not obj or not obj.domains: + return "" + else: + datas = [(k,v) for k,v in obj.domains.items()] + datas.sort(key=lambda o:((o[1].get("requests") or 0) * -1,o[0]) if isinstance(o[1],dict) else (o[1] * -1,o[0])) + return mark_safe("
{}".format("\r\n".join(" {} : {}".format(o[0],json.dumps(o[1],sort_keys=True,indent=4) if isinstance(o[1],dict) else o[1]) for o in datas))) + _domains.short_description = "Groups" + +if settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0 and settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + list_display_4_cluster = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions") + list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time") + fields_4_cluster = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") + fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","_status","_domains","_batchid") +elif settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0: + list_display_4_cluster = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","get_remote_sessions","delete_remote_sessions") + list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time") + fields_4_cluster = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") + fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","_status","_domains","_batchid") +elif settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + list_display_4_cluster = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions") + list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time") + fields_4_cluster = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") + fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","_status","_domains","_batchid") +else: + list_display_4_cluster = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions") + list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time") + fields_4_cluster = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") + fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","_status","_domains","_batchid") + class TrafficDataAdmin(TrafficDataPropertyMixin,admin.DatetimeMixin,djangoadmin.ModelAdmin): - list_display = ("_start_time","_cluster","_servers","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions") - readonly_fields = ("_cluster","_start_time","_end_time","_serverlist","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains","_batchid") - fields = readonly_fields ordering = ("-start_time","clusterid") list_filter = ['clusterid'] inlines = [SSOMethodTrafficDataInline] + @property + def list_display(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return list_display_4_cluster + else: + return list_display + + @property + def readonly_fields(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return fields_4_cluster + else: + return fields + + @property + def fields(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return fields_4_cluster + else: + return fields + def _subreports(self,obj): if not obj: return "" @@ -143,10 +206,41 @@ class SSOMethodTrafficReportInline(TrafficDataPropertyMixin,djangoadmin.TabularI readonly_fields = ("sso_method","requests","_total_time","_min_time","_max_time","_avg_time","_status","_domains") fields = readonly_fields + def _domains(self,obj): + if not obj or not obj.domains: + return "" + else: + datas = [(k,v) for k,v in obj.domains.items()] + datas.sort(key=lambda o:((o[1].get("requests") or 0) * -1,o[0]) if isinstance(o[1],dict) else (o[1] * -1,o[0])) + return mark_safe("
{}".format("\r\n".join(" {} : {}".format(o[0],json.dumps(o[1],sort_keys=True,indent=4) if isinstance(o[1],dict) else o[1]) for o in datas))) + _domains.short_description = "Groups" + +if settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0 and settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + report_list_display_4_cluster = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") + report_list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","_subreports") + + report_fields_4_cluster = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") + report_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","db_requests","_db_avg_time","_status","_domains") +elif settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0: + report_list_display_4_cluster = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") + report_list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","_subreports") + + report_fields_4_cluster = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") + report_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","redis_requests","_redis_avg_time","_status","_domains") +elif settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + report_list_display_4_cluster = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") + report_list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","_subreports") + + report_fields_4_cluster = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") + report_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","db_requests","_db_avg_time","_status","_domains") +else: + report_list_display_4_cluster = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") + report_list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","_subreports") + + report_fields_4_cluster = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") + report_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","_status","_domains") + class TrafficReportAdmin(TrafficDataPropertyMixin,admin.DatetimeMixin,djangoadmin.ModelAdmin): - list_display = ("_report_type","_start_time","_cluster","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_subreports") - readonly_fields = ("_cluster","_report_type","_start_time","_end_time","requests","_min_time","_max_time","_avg_time","get_remote_sessions","delete_remote_sessions","_status","_domains") - fields = readonly_fields ordering = ("report_type","-start_time",'clusterid') list_filter = ['clusterid',"report_type"] inlines = [SSOMethodTrafficReportInline] @@ -154,6 +248,27 @@ class TrafficReportAdmin(TrafficDataPropertyMixin,admin.DatetimeMixin,djangoadmi traffic_data_list_url_name = 'admin:{}_{}_changelist'.format(models.TrafficData._meta.app_label,models.TrafficData._meta.model_name) traffic_report_list_url_name = 'admin:{}_{}_changelist'.format(models.TrafficReport._meta.app_label,models.TrafficReport._meta.model_name) + @property + def list_display(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return report_list_display_4_cluster + else: + return report_list_display + + @property + def readonly_fields(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return report_fields_4_cluster + else: + return report_fields + + @property + def fields(self): + if settings.AUTH2_CLUSTER_ENABLED and cache.auth2_clusters: + return report_fields_4_cluster + else: + return report_fields + def _subreports(self,obj): if not obj: return "" diff --git a/authome/cache/cache.py b/authome/cache/cache.py index 74a2519..004be11 100644 --- a/authome/cache/cache.py +++ b/authome/cache/cache.py @@ -26,7 +26,6 @@ else: get_defaultcache = lambda :None - defaultcache = get_defaultcache() class TaskRunable(object): @@ -211,7 +210,6 @@ def __init__(self): @property def usergrouptree(self): if not self._usergrouptree: - logger.error("The usergrouptree cache is Empty, Try to refresh the data to bring the cache back to normal state") self.refresh_usergroups() return self._usergrouptree @@ -219,7 +217,6 @@ def usergrouptree(self): @property def usergroups(self): if not self._usergroups: - logger.error("The usergroups cache is Empty, Try to refresh the data to bring the cache back to normal state") self.refresh_usergroups() return self._usergroups @@ -244,7 +241,6 @@ def usergrouptree(self,value): @property def usergroupauthorization(self): if not self._usergroupauthorization: - logger.error("The usergroupauthorization cache is Empty, Try to refresh the data to bring the cache back to normal state") self.refresh_usergroupauthorization() return self._usergroupauthorization @@ -700,7 +696,14 @@ def healthy(self): return (False,msgs) if msgs else (True,["ok"]) if settings.TRAFFIC_MONITOR_LEVEL > 0: - class _BaseMemoryCacheWithTrafficMonitor(_BaseMemoryCache): + def _clean_traffic_data(data): + for key in data.keys(): + if isinstance(data[key],dict): + _clean_traffic_data(data[key]) + else: + data[key] = 0 + + class _MemoryCacheWithTrafficMonitor(_BaseMemoryCache): def __init__(self): super().__init__() self._traffic_data = None @@ -731,102 +734,90 @@ def _save_traffic_data(self,start): if self._traffic_data : self._traffic_data["starttime"] = data_starttime self._traffic_data["endtime"] = data_endtime + traffic_data = json.dumps(self._traffic_data) + + for data in self._traffic_data.values(): + if not isinstance(data,dict): + continue + _clean_traffic_data(data) + try: - length = self._client.rpush(self.traffic_data_key,json.dumps(self._traffic_data)) + length = self._client.rpush(self.traffic_data_key,traffic_data) except: from authome.models import DebugLog DebugLog.warning(DebugLog.ERROR,None,None,None,None,"Failed to save the traffic data to cache.{}".format(traceback.format_exc())) pass - if settings.TRAFFIC_MONITOR_LEVEL == 1: - class _MemoryCacheWithTrafficMonitor(_BaseMemoryCacheWithTrafficMonitor): - def __init__(self): - super().__init__() - logger.debug("Traffic monitor level 1 is enabled") - - def log_request(self,name,host,start,status_code): - if start >= self._traffic_data_next_ts: - self._save_traffic_data(start) - if self._traffic_data: - for data in self._traffic_data.values(): - if not isinstance(data,dict): - continue - for key in data.keys(): - if key == "status": - for domain in data[key].keys(): - data[key][domain] = 0 - else: - data[key] = 0 - - ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) - try: - data = self._traffic_data[name] - except KeyError as ex: - # name not in _traffic_data - self._traffic_data[name] = { + def _log_request_1(self,name,group,start,status_code,groupname="domains"): + if start >= self._traffic_data_next_ts: + self._save_traffic_data(start) + + ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) + try: + data = self._traffic_data[name] + except KeyError as ex: + # name not in _traffic_data + self._traffic_data[name] = { + "requests":1, + "totaltime":ptime, + "mintime":ptime, + "maxtime":ptime, + "status":{ + status_code:1 + } + } + return ptime + except: + #_traffic_data is None + self._traffic_data= { + "serverid":utils.get_processid(), + name: { "requests":1, "totaltime":ptime, "mintime":ptime, "maxtime":ptime, "status":{ - status_code:1 + status_code:1 } } - return ptime - except: - #_traffic_data is None - self._traffic_data= { - "serverid":utils.get_processid(), - name: { - "requests":1, - "totaltime":ptime, - "mintime":ptime, - "maxtime":ptime, - "status":{ - status_code:1 - } - } + } + return ptime + data["requests"] += 1 + data["totaltime"] += ptime + if not data["mintime"] or data["mintime"] > ptime: + data["mintime"] = ptime + if data["maxtime"] < ptime: + data["maxtime"] = ptime + data["status"][status_code] = data["status"].get(status_code,0) + 1 + return ptime + + def _log_request_2(self,name,group,start,status_code,groupname="domains"): + if start >= self._traffic_data_next_ts: + self._save_traffic_data(start) + + ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) + try: + data = self._traffic_data[name] + except KeyError as ex: + # name not in _traffic_data + self._traffic_data[name] = { + "requests":1, + "totaltime":ptime, + "mintime":ptime, + "maxtime":ptime, + "status":{ + status_code:1 + }, + groupname: { + group : 1 } - return ptime - data["requests"] += 1 - data["totaltime"] += ptime - if not data["mintime"] or data["mintime"] > ptime: - data["mintime"] = ptime - if data["maxtime"] < ptime: - data["maxtime"] = ptime - data["status"][status_code] = data["status"].get(status_code,0) + 1 + } return ptime - elif settings.TRAFFIC_MONITOR_LEVEL == 2: - class _MemoryCacheWithTrafficMonitor(_BaseMemoryCacheWithTrafficMonitor): - def __init__(self): - super().__init__() - logger.debug("Traffic monitor level 2 is enabled") - - def log_request(self,name,host,start,status_code): - if start >= self._traffic_data_next_ts: - self._save_traffic_data(start) - if self._traffic_data: - for data in self._traffic_data.values(): - if not isinstance(data,dict): - continue - for key in data.keys(): - if key == "domains": - for domain in [d for d,v in data[key].items() if v == 0]: - del data[key][domain] - for domain in data[key].keys(): - data[key][domain] = 0 - elif key in ("status",): - for domain in data[key].keys(): - data[key][domain] = 0 - else: - data[key] = 0 - - ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) - try: - data = self._traffic_data[name] - except KeyError as ex: - # name not in _traffic_data - self._traffic_data[name] = { + except: + #_traffic_data is None + self._traffic_data = { + "serverid":utils.get_processid(), + name: { "requests":1, "totaltime":ptime, "mintime":ptime, @@ -834,75 +825,57 @@ def log_request(self,name,host,start,status_code): "status":{ status_code:1 }, - "domains": { - host : 1 + groupname: { + group : 1 } } - return ptime - except: - #_traffic_data is None - self._traffic_data = { - "serverid":utils.get_processid(), - name: { + } + return ptime + data["requests"] += 1 + data["totaltime"] += ptime + if not data["mintime"] or data["mintime"] > ptime: + data["mintime"] = ptime + if data["maxtime"] < ptime: + data["maxtime"] = ptime + data["status"][status_code] = data["status"].get(status_code,0) + 1 + data[groupname][group] = data[groupname].get(group,0) + 1 + return ptime + + def _log_request_3(self,name,group,start,status_code,groupname="domains"): + if start >= self._traffic_data_next_ts: + self._save_traffic_data(start) + + ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) + try: + data = self._traffic_data[name] + except KeyError as ex: + # name not in _traffic_data + self._traffic_data[name] = { + "requests":1, + "totaltime":ptime, + "mintime":ptime, + "maxtime":ptime, + "status":{ + status_code:1 + }, + groupname: { + group : { "requests":1, "totaltime":ptime, "mintime":ptime, "maxtime":ptime, "status":{ status_code:1 - }, - "domains": { - host : 1 } } } - return ptime - data["requests"] += 1 - data["totaltime"] += ptime - if not data["mintime"] or data["mintime"] > ptime: - data["mintime"] = ptime - if data["maxtime"] < ptime: - data["maxtime"] = ptime - data["status"][status_code] = data["status"].get(status_code,0) + 1 - data["domains"][host] = data["domains"].get(host,0) + 1 + } return ptime - else: - class _MemoryCacheWithTrafficMonitor(_BaseMemoryCacheWithTrafficMonitor): - def __init__(self): - super().__init__() - logger.debug("Traffic monitor level 3 is enabled") - - def log_request(self,name,host,start,status_code): - if start >= self._traffic_data_next_ts: - self._save_traffic_data(start) - if self._traffic_data: - for data in self._traffic_data.values(): - if not isinstance(data,dict): - continue - for key in data.keys(): - if key == "status": - for k in data[key].keys(): - data[key][k] = 0 - elif key == "domains": - for domain in [d for d,v in data[key].items() if v["requests"] == 0]: - del data[key][domain] - for domain_data in data[key].values(): - for k in domain_data.keys(): - if k == "status": - for k1 in domain_data[k].keys(): - domain_data[k][k1] = 0 - else: - domain_data[k] = 0 - else: - data[key] = 0 - - - ptime = round((timezone.localtime() - start).total_seconds() * 1000,2) - try: - data = self._traffic_data[name] - except KeyError as ex: - # name not in _traffic_data - self._traffic_data[name] = { + except: + # _traffic_data is None + self._traffic_data = { + "serverid":utils.get_processid(), + name: { "requests":1, "totaltime":ptime, "mintime":ptime, @@ -910,8 +883,8 @@ def log_request(self,name,host,start,status_code): "status":{ status_code:1 }, - "domains": { - host : { + groupname: { + group : { "requests":1, "totaltime":ptime, "mintime":ptime, @@ -922,64 +895,68 @@ def log_request(self,name,host,start,status_code): } } } - return ptime - except: - # _traffic_data is None - self._traffic_data = { - "serverid":utils.get_processid(), - name: { - "requests":1, - "totaltime":ptime, - "mintime":ptime, - "maxtime":ptime, - "status":{ - status_code:1 - }, - "domains": { - host : { - "requests":1, - "totaltime":ptime, - "mintime":ptime, - "maxtime":ptime, - "status":{ - status_code:1 - } - } - } - } - } - return ptime - - data["requests"] += 1 - data["totaltime"] += ptime - if not data["mintime"] or data["mintime"] > ptime: - data["mintime"] = ptime - if data["maxtime"] < ptime: - data["maxtime"] = ptime - data["status"][status_code] = data["status"].get(status_code,0) + 1 - - try: - domain_data = data["domains"][host] - domain_data["requests"] += 1 - domain_data["totaltime"] += ptime - if not domain_data["mintime"] or domain_data["mintime"] > ptime: - domain_data["mintime"] = ptime - if domain_data["maxtime"] < ptime: - domain_data["maxtime"] = ptime - domain_data["status"][status_code] = domain_data["status"].get(status_code,0) + 1 - except: - domain_data = { - "requests":1, - "totaltime":ptime, - "mintime":ptime, - "maxtime":ptime, - "status":{ - status_code:1 - } - } - data["domains"][host] = domain_data - + } return ptime + + data["requests"] += 1 + data["totaltime"] += ptime + if not data["mintime"] or data["mintime"] > ptime: + data["mintime"] = ptime + if data["maxtime"] < ptime: + data["maxtime"] = ptime + data["status"][status_code] = data["status"].get(status_code,0) + 1 + try: + group_data = data[groupname][group] + group_data["requests"] += 1 + group_data["totaltime"] += ptime + if not group_data["mintime"] or group_data["mintime"] > ptime: + group_data["mintime"] = ptime + if group_data["maxtime"] < ptime: + group_data["maxtime"] = ptime + group_data["status"][status_code] = group_data["status"].get(status_code,0) + 1 + except: + group_data = { + "requests":1, + "totaltime":ptime, + "mintime":ptime, + "maxtime":ptime, + "status":{ + status_code:1 + } + } + data[groupname][group] = group_data + + return ptime + + if settings.TRAFFIC_MONITOR_LEVEL == 1: + logger.debug("Traffic monitor level 1 is enabled") + _MemoryCacheWithTrafficMonitor.log_request = _MemoryCacheWithTrafficMonitor._log_request_1 + elif settings.TRAFFIC_MONITOR_LEVEL == 2: + logger.debug("Traffic monitor level 2 is enabled") + _MemoryCacheWithTrafficMonitor.log_request = _MemoryCacheWithTrafficMonitor._log_request_2 + else: + logger.debug("Traffic monitor level 3 is enabled") + _MemoryCacheWithTrafficMonitor.log_request = _MemoryCacheWithTrafficMonitor._log_request_3 + + if settings.REDIS_TRAFFIC_MONITOR_LEVEL == 1: + logger.debug("Reids traffic monitor level 1 is enabled") + _MemoryCacheWithTrafficMonitor.log_redisrequest = _MemoryCacheWithTrafficMonitor._log_request_1 + elif settings.REDIS_TRAFFIC_MONITOR_LEVEL == 2: + logger.debug("Reids traffic monitor level 2 is enabled") + _MemoryCacheWithTrafficMonitor.log_redisrequest = _MemoryCacheWithTrafficMonitor._log_request_2 + elif settings.REDIS_TRAFFIC_MONITOR_LEVEL > 0: + logger.debug("Reids traffic monitor level 3 is enabled") + _MemoryCacheWithTrafficMonitor.log_redisrequest = _MemoryCacheWithTrafficMonitor._log_request_3 + + if settings.DB_TRAFFIC_MONITOR_LEVEL == 1: + logger.debug("DB traffic monitor level 1 is enabled") + _MemoryCacheWithTrafficMonitor.log_dbrequest = _MemoryCacheWithTrafficMonitor._log_request_1 + elif settings.DB_TRAFFIC_MONITOR_LEVEL == 2: + logger.debug("DB traffic monitor level 2 is enabled") + _MemoryCacheWithTrafficMonitor.log_dbrequest = _MemoryCacheWithTrafficMonitor._log_request_2 + elif settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + logger.debug("DB traffic monitor level 3 is enabled") + _MemoryCacheWithTrafficMonitor.log_dbrequest = _MemoryCacheWithTrafficMonitor._log_request_3 if True or settings.SYNC_MODE: class MemoryCache(_MemoryCacheWithTrafficMonitor): @@ -1001,3 +978,4 @@ def log_request(self,name,host,start,status_code): else: class MemoryCache(_BaseMemoryCache): pass + diff --git a/authome/cache/clustercache.py b/authome/cache/clustercache.py index 22daabc..cf26734 100644 --- a/authome/cache/clustercache.py +++ b/authome/cache/clustercache.py @@ -68,24 +68,24 @@ def default_auth2_cluster(self): """ In cluster environment, default cluster can be null, but current auth2 cluster can't be null """ - if not self._current_auth2_cluster: + if self._current_auth2_cluster is None: self.refresh_auth2_clusters(True) return self._default_auth2_cluster @property def current_auth2_cluster(self): - if not self._current_auth2_cluster: + if self._current_auth2_cluster is None: self.refresh_auth2_clusters(True) return self._current_auth2_cluster @property def auth2_clusters(self): - if not self._auth2_clusters: + if self._current_auth2_cluster is None: self.refresh_auth2_clusters(True) return self._auth2_clusters def refresh_auth2_clusters(self,force=False): - if self._auth2_clusters_check_time.can_run() or force or not self._auth2_clusters: + if self._auth2_clusters_check_time.can_run() or force or self._current_auth2_cluster is None: from ..models import Auth2Cluster refreshtime = timezone.localtime() l1 = len(self._auth2_clusters) @@ -139,7 +139,7 @@ def _send_request_to_other_clusters(self,request,f_send_req,force_refresh=False) DebugLog.warning(DebugLog.INTERCONNECTION_TIMEOUT,None,o.clusterid,None,None,message="Accessing auth2 cluster({1}) times out({0} seconds),{2}".format(settings.AUTH2_INTERCONNECTION_TIMEOUT,o.clusterid,str(ex)),request=request) failed_clusters = utils.add_to_list(failed_clusters,(o,ex)) - if not force_refresh and (retry_clusters or not self._auth2_clusters): + if not force_refresh and (retry_clusters or self._current_auth2_cluster is None): #some clusters failed self.refresh_auth2_clusters(True) for o,ex in retry_clusters: @@ -500,7 +500,7 @@ def status(self): @property def healthy(self): health,msgs = super().healthy - if not self._auth2_clusters and not self._default_auth2_cluster: + if not self._auth2_clusters and not self._current_auth2_cluster: if health: health = False msgs = ["Auth2 cluster cache is empty"] diff --git a/authome/migrations/0047_alter_auth2cluster_options_and_more.py b/authome/migrations/0047_alter_auth2cluster_options_and_more.py new file mode 100644 index 0000000..ba47048 --- /dev/null +++ b/authome/migrations/0047_alter_auth2cluster_options_and_more.py @@ -0,0 +1,117 @@ +# Generated by Django 4.2.16 on 2024-10-28 07:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('authome', '0046_alter_user_comments'), + ] + + operations = [ + migrations.AlterModelOptions( + name='auth2cluster', + options={'verbose_name_plural': ' Auth2 Clusters'}, + ), + migrations.AlterModelOptions( + name='customizableuserflow', + options={'verbose_name_plural': ' Customizable Userflows'}, + ), + migrations.AlterModelOptions( + name='debuglog', + options={'verbose_name_plural': ' Auth2 Logs'}, + ), + migrations.AlterModelOptions( + name='identityprovider', + options={'verbose_name_plural': ' Identity Providers'}, + ), + migrations.AlterModelOptions( + name='normaluser', + options={'verbose_name': 'User', 'verbose_name_plural': ' Users'}, + ), + migrations.AlterModelOptions( + name='normalusertoken', + options={'verbose_name': 'System User', 'verbose_name_plural': ' User Tokens'}, + ), + migrations.AlterModelOptions( + name='systemuser', + options={'verbose_name': 'System User', 'verbose_name_plural': ' System Users'}, + ), + migrations.AlterModelOptions( + name='systemusertoken', + options={'verbose_name': 'System User', 'verbose_name_plural': ' System User Tokens'}, + ), + migrations.AlterModelOptions( + name='trafficdata', + options={'verbose_name_plural': ' Traffic Data'}, + ), + migrations.AlterModelOptions( + name='trafficreport', + options={'verbose_name_plural': ' Traffic Report'}, + ), + migrations.AlterModelOptions( + name='user', + options={'verbose_name': 'user', 'verbose_name_plural': ' Users'}, + ), + migrations.AlterModelOptions( + name='userauthorization', + options={'verbose_name_plural': ' User Authorizations'}, + ), + migrations.AlterModelOptions( + name='usergroup', + options={'verbose_name_plural': ' User Groups'}, + ), + migrations.AlterModelOptions( + name='usergroupauthorization', + options={'verbose_name_plural': ' User Group Authorizations'}, + ), + migrations.AlterModelOptions( + name='usertoken', + options={'verbose_name_plural': ' Access Tokens'}, + ), + migrations.AlterModelOptions( + name='usertotp', + options={'verbose_name_plural': ' User TOTPs'}, + ), + migrations.AddField( + model_name='trafficdata', + name='db_avg_time', + field=models.FloatField(editable=False, null=True), + ), + migrations.AddField( + model_name='trafficdata', + name='db_requests', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='trafficdata', + name='redis_avg_time', + field=models.FloatField(editable=False, null=True), + ), + migrations.AddField( + model_name='trafficdata', + name='redis_requests', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='trafficreport', + name='db_avg_time', + field=models.FloatField(editable=False, null=True), + ), + migrations.AddField( + model_name='trafficreport', + name='db_requests', + field=models.PositiveIntegerField(default=0, editable=False), + ), + migrations.AddField( + model_name='trafficreport', + name='redis_avg_time', + field=models.FloatField(editable=False, null=True), + ), + migrations.AddField( + model_name='trafficreport', + name='redis_requests', + field=models.PositiveIntegerField(default=0, editable=False), + ), + ] diff --git a/authome/models/models.py b/authome/models/models.py index 27502e1..406afe4 100644 --- a/authome/models/models.py +++ b/authome/models/models.py @@ -1665,6 +1665,11 @@ def refresh_cache(cls): refreshtime = timezone.localtime() for authorization in UserGroupAuthorization.objects.all().order_by("usergroup","sortkey"): size += 1 + #try to get the data from cache to avoid a extra db access + try: + authorization.usergroup = cache.usergroups[authorization.usergroup_id] + except: + pass if not previous_usergroup: usergroupauthorization[authorization.usergroup] = [authorization] diff --git a/authome/models/trafficmodels.py b/authome/models/trafficmodels.py index f7de90b..1c4f141 100644 --- a/authome/models/trafficmodels.py +++ b/authome/models/trafficmodels.py @@ -30,6 +30,10 @@ class TrafficData(models.Model): min_time = models.FloatField(null=True,editable=False) max_time = models.FloatField(null=True,editable=False) avg_time = models.FloatField(null=True,editable=False) + redis_requests = models.PositiveIntegerField(default=0,editable=False) + redis_avg_time = models.FloatField(null=True,editable=False) + db_requests = models.PositiveIntegerField(default=0,editable=False) + db_avg_time = models.FloatField(null=True,editable=False) get_remote_sessions = models.PositiveIntegerField(default=0,editable=False) delete_remote_sessions = models.PositiveIntegerField(default=0,editable=False) status = models.JSONField(null=True,editable=False) @@ -116,6 +120,10 @@ class TrafficReport(models.Model): max_time = models.FloatField(null=True,editable=False) avg_time = models.FloatField(null=True,editable=False) status = models.JSONField(null=True,editable=False) + redis_requests = models.PositiveIntegerField(default=0,editable=False) + redis_avg_time = models.FloatField(null=True,editable=False) + db_requests = models.PositiveIntegerField(default=0,editable=False) + db_avg_time = models.FloatField(null=True,editable=False) get_remote_sessions = models.PositiveIntegerField(default=0,editable=False) delete_remote_sessions = models.PositiveIntegerField(default=0,editable=False) domains = models.JSONField(null=True,editable=False) diff --git a/authome/monitordb.py b/authome/monitordb.py new file mode 100644 index 0000000..d51dcf6 --- /dev/null +++ b/authome/monitordb.py @@ -0,0 +1,80 @@ +import re +import logging + +from django.db import connections +from django.utils import timezone +from django.conf import settings +from django.db.backends.signals import connection_created + +from . import utils + +logger = logging.getLogger(__name__) + +if settings.DB_TRAFFIC_MONITOR_LEVEL > 0: + _cache = None + select_re = re.compile('select\\s.+\\sfrom\\s+("?[a-zA-Z0-9\\-_]+"?\\.)?"?(?P