From 592fdf97a3f4bc8d8fab93a24405ec7773723717 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 31 Oct 2024 08:30:33 +0000 Subject: [PATCH 1/9] Update repo structure --- PROJECT_STRUCTURE.md | 9 +++++++-- repo_structure.txt | 9 +++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/PROJECT_STRUCTURE.md b/PROJECT_STRUCTURE.md index 23b5d82..d672dd6 100644 --- a/PROJECT_STRUCTURE.md +++ b/PROJECT_STRUCTURE.md @@ -225,19 +225,24 @@ │ ├── SQLFILE.sql │ ├── __pycache__/ │ │ ├── audio.cpython-311.pyc +│ │ ├── audio.cpython-312.pyc │ │ ├── detection.cpython-311.pyc -│ │ └── head_pose.cpython-311.pyc +│ │ ├── head_pose.cpython-311.pyc +│ │ └── head_pose.cpython-312.pyc │ ├── audio.py │ ├── detection.py │ ├── face-rec.py │ ├── graph.py │ ├── head_pose.py │ ├── logic.xlsx +│ ├── object_detection.py │ ├── peer_comparison_tool.py │ ├── processes.py │ ├── pyaudio_test.py │ ├── run.py -│ └── screen_recorder.py +│ ├── screen_recorder.py +│ ├── test-image.jpg +│ └── test-image2.jpg ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── GSSoC-Ext.png diff --git a/repo_structure.txt b/repo_structure.txt index 3864e1e..871b4aa 100644 --- a/repo_structure.txt +++ b/repo_structure.txt @@ -221,19 +221,24 @@ │ ├── SQLFILE.sql │ ├── __pycache__/ │ │ ├── audio.cpython-311.pyc +│ │ ├── audio.cpython-312.pyc │ │ ├── detection.cpython-311.pyc -│ │ └── head_pose.cpython-311.pyc +│ │ ├── head_pose.cpython-311.pyc +│ │ └── head_pose.cpython-312.pyc │ ├── audio.py │ ├── detection.py │ ├── face-rec.py │ ├── graph.py │ ├── head_pose.py │ ├── logic.xlsx +│ ├── object_detection.py │ ├── peer_comparison_tool.py │ ├── processes.py │ ├── pyaudio_test.py │ ├── run.py -│ └── screen_recorder.py +│ ├── screen_recorder.py +│ ├── test-image.jpg +│ └── test-image2.jpg ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── GSSoC-Ext.png From 0438ccaa2a2b4ba982c310b11d8e52afb322502e Mon Sep 17 00:00:00 2001 From: MANI Date: Thu, 31 Oct 2024 14:30:10 +0530 Subject: [PATCH 2/9] added the whole frontend --- frontend/index.html | 1003 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1003 insertions(+) create mode 100644 frontend/index.html diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000..8e60c9b --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,1003 @@ + + + + + + SPROCTOR - Smart Proctoring System + + + + + + +
+
+
+
+
1
+ Video Feed +
+
+
+ + Live +
+ Live Camera Feed +
+
+
+
24
+
Active Students
+
+
+
3
+
Active Exams
+
+
+
+ +
+
+
2
+ Image Processing +
+
+
+
98%
+
Face Detection
+
+
+
95%
+
Eye Tracking
+
+
+
+ Monitoring for: Head Movement, Eye Direction, Multiple Faces +
+
+ +
+
+
3
+ Suspicious Detection +
+
+
+ +
+
+
4
+ Results +
+
+
+
12%
+
Avg. Suspicious
+
+
+
5
+
Flagged
+
+
+
+
+ +
+
+

Active Examination Sessions

+
+
+ + +
+ +
+
+ +
+
+
👥
+
+
24
+
Total Students
+
+
+
+
+
+
18
+
Active Now
+
+
+
+
⚠️
+
+
3
+
High Risk
+
+
+
+
+
+
6
+
Completed
+
+
+
+ +
+ + + + + + + + + + + + + + +
Student IDNameStatusDurationSuspicious LevelActions
+
+ + +
+ + + + + + \ No newline at end of file From f40998a7bcb328fb9063f314d990dd8b1db6235b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 31 Oct 2024 09:00:32 +0000 Subject: [PATCH 3/9] Update repo structure --- PROJECT_STRUCTURE.md | 2 ++ repo_structure.txt | 2 ++ 2 files changed, 4 insertions(+) diff --git a/PROJECT_STRUCTURE.md b/PROJECT_STRUCTURE.md index d672dd6..adeae4c 100644 --- a/PROJECT_STRUCTURE.md +++ b/PROJECT_STRUCTURE.md @@ -265,6 +265,8 @@ │ ├── contributor.css │ ├── contributor.html │ └── contributor.js +├── frontend/ +│ └── index.html ├── heatmap_combined_20241009_144503.png ├── login.py ├── modle.png diff --git a/repo_structure.txt b/repo_structure.txt index 871b4aa..c6dc528 100644 --- a/repo_structure.txt +++ b/repo_structure.txt @@ -261,6 +261,8 @@ │ ├── contributor.css │ ├── contributor.html │ └── contributor.js +├── frontend/ +│ └── index.html ├── heatmap_combined_20241009_144503.png ├── login.py ├── modle.png From 804032aad8f8148b37a7a504bca0f92be2df3bdb Mon Sep 17 00:00:00 2001 From: MANI Date: Thu, 31 Oct 2024 14:57:00 +0530 Subject: [PATCH 4/9] Adding object detection model --- Backend/proctor_core.py | 121 +++++++++++++++++++++++++++++++++++++++ Backend/run.py | 124 +++++++++++++++++++++++++++++++++------- requirements.txt | 27 +++++---- 3 files changed, 239 insertions(+), 33 deletions(-) create mode 100644 Backend/proctor_core.py diff --git a/Backend/proctor_core.py b/Backend/proctor_core.py new file mode 100644 index 0000000..ed48076 --- /dev/null +++ b/Backend/proctor_core.py @@ -0,0 +1,121 @@ +# Backend/proctor_core.py + +import cv2 +import mediapipe as mp +import numpy as np +from typing import Dict, List, Tuple +import logging +import os + +# Local imports +from .detection import run_detection +from .head_pose import pose +from .object_detection import detect_objects +from .audio import process_audio +from .screen_recorder import capture_screen + +class ProctorCore: + def __init__(self): + self.mp_face_detection = mp.solutions.face_detection + self.mp_pose = mp.solutions.pose + self.face_detection = self.mp_face_detection.FaceDetection(min_detection_confidence=0.7) + self.pose_detection = self.mp_pose.Pose(min_detection_confidence=0.7) + self.logger = self._setup_logger() + + def _setup_logger(self) -> logging.Logger: + """Configure logging for the proctoring system""" + logger = logging.getLogger('ProctorCore') + logger.setLevel(logging.INFO) + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + return logger + + def start_monitoring(self): + """Initialize and start all monitoring components""" + try: + # Start detection systems + detection_result = run_detection() + pose_result = pose() + screen_capture = capture_screen() + + # Process and combine results + combined_results = self._process_results( + detection_result, + pose_result, + screen_capture + ) + + return combined_results + + except Exception as e: + self.logger.error(f"Error in monitoring: {str(e)}") + raise + + def _process_results(self, detection_data, pose_data, screen_data) -> Dict: + """Process and combine results from different detection systems""" + results = { + 'timestamp': np.datetime64('now'), + 'detection': detection_data, + 'pose': pose_data, + 'screen': screen_data, + 'suspicious_level': 0.0 + } + + # Calculate suspicious level based on combined factors + suspicious_factors = [ + detection_data.get('suspicious_score', 0), + pose_data.get('deviation_score', 0), + screen_data.get('activity_score', 0) + ] + + results['suspicious_level'] = np.mean([x for x in suspicious_factors if x is not None]) + + return results + + def save_results(self, results: Dict, output_path: str = None): + """Save monitoring results to specified location""" + if output_path is None: + output_path = os.path.join( + os.path.dirname(__file__), + 'Dataset', + f'proctor_results_{np.datetime64("now")}.json' + ) + + try: + import json + with open(output_path, 'w') as f: + json.dump(results, f, indent=4, default=str) + self.logger.info(f"Results saved to {output_path}") + except Exception as e: + self.logger.error(f"Error saving results: {str(e)}") + + def analyze_behavior(self, results: Dict) -> Dict: + """Analyze monitored behavior and generate insights""" + analysis = { + 'timestamp': np.datetime64('now'), + 'overall_score': results.get('suspicious_level', 0), + 'warnings': [], + 'recommendations': [] + } + + # Generate warnings based on thresholds + if results.get('pose', {}).get('deviation_score', 0) > 0.7: + analysis['warnings'].append('Significant head movement detected') + + if results.get('detection', {}).get('suspicious_score', 0) > 0.7: + analysis['warnings'].append('Suspicious objects detected') + + if results.get('screen', {}).get('activity_score', 0) > 0.7: + analysis['warnings'].append('Unusual screen activity detected') + + return analysis + + def cleanup(self): + """Cleanup resources and close connections""" + try: + cv2.destroyAllWindows() + self.logger.info("Cleanup completed successfully") + except Exception as e: + self.logger.error(f"Error during cleanup: {str(e)}") \ No newline at end of file diff --git a/Backend/run.py b/Backend/run.py index 7fa04de..a6dad64 100644 --- a/Backend/run.py +++ b/Backend/run.py @@ -1,28 +1,110 @@ -import head_pose -import detection -import threading as th - -def run_threads(): - try: - # Create threads for each target function - head_pose_thread = th.Thread(target=head_pose.pose) - # audio_thread = th.Thread(target=audio.sound) # Uncomment if audio module is needed - detection_thread = th.Thread(target=detection.run_detection) +# Backend/run.py - # Start the threads - head_pose_thread.start() - # audio_thread.start() # Uncomment to start audio thread - detection_thread.start() +import threading as th +import logging +import os +from typing import Dict, List +import queue +from .proctor_core import ProctorCore - # Wait for the threads to complete - head_pose_thread.join() - # audio_thread.join() # Uncomment to wait for audio thread - detection_thread.join() +class ProctorManager: + def __init__(self): + self.result_queue = queue.Queue() + self.proctor = ProctorCore() + self.is_running = False + self.threads: List[th.Thread] = [] + self.logger = self._setup_logger() + + def _setup_logger(self): + logger = logging.getLogger('ProctorManager') + logger.setLevel(logging.INFO) + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + return logger + + def _monitoring_worker(self): + """Worker function for continuous monitoring""" + while self.is_running: + try: + results = self.proctor.start_monitoring() + self.result_queue.put(results) + except Exception as e: + self.logger.error(f"Error in monitoring worker: {str(e)}") + break + + def _analysis_worker(self): + """Worker function for analyzing results""" + while self.is_running: + try: + results = self.result_queue.get(timeout=1) + if results: + analysis = self.proctor.analyze_behavior(results) + self.proctor.save_results(analysis) + except queue.Empty: + continue + except Exception as e: + self.logger.error(f"Error in analysis worker: {str(e)}") + break + + def start(self): + """Start the proctoring system""" + try: + self.is_running = True + + # Create worker threads + monitoring_thread = th.Thread(target=self._monitoring_worker) + analysis_thread = th.Thread(target=self._analysis_worker) + + # Start threads + self.threads = [monitoring_thread, analysis_thread] + for thread in self.threads: + thread.start() + + self.logger.info("Proctoring system started successfully") + + except Exception as e: + self.logger.error(f"Error starting proctoring system: {str(e)}") + self.stop() + + def stop(self): + """Stop the proctoring system""" + self.is_running = False + + # Wait for threads to complete + for thread in self.threads: + thread.join() + + # Cleanup resources + self.proctor.cleanup() + self.logger.info("Proctoring system stopped") +def main(): + # Setup logging + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' + ) + logger = logging.getLogger(__name__) + + try: + # Initialize and start the proctoring system + manager = ProctorManager() + manager.start() + + # Keep running until interrupted + while True: + pass + + except KeyboardInterrupt: + logger.info("Received shutdown signal") except Exception as e: - print(f"An error occurred: {e}") + logger.error(f"Unexpected error: {str(e)}") finally: - print("All threads have been joined.") + if 'manager' in locals(): + manager.stop() + logger.info("Application shutdown complete") if __name__ == "__main__": - run_threads() + main() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 3850801..0a37db7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,16 @@ +numpy==2.1.1 +opencv-python==4.10.0.84 +opencv-contrib-python==4.10.0.84 +mediapipe==0.10.14 +PyAudio==0.2.14 +sounddevice==0.5.0 +tensorflow>=2.14.0 +torch>=2.1.0 +mediapipe==0.10.14 +protobuf==4.25.5 +scipy==1.14.1 +matplotlib==3.9.2 +pillow==10.4.0 absl-py==2.1.0 attrs==24.2.0 cffi==1.17.1 @@ -9,23 +22,13 @@ glob2==0.7 jax==0.4.33 jaxlib==0.4.33 kiwisolver==1.4.7 -matplotlib==3.9.2 -mediapipe==0.10.14 ml_dtypes==0.5.0 -numpy==2.1.1 -opencv-contrib-python==4.10.0.84 -opencv-python==4.10.0.84 opt_einsum==3.4.0 packaging==24.1 -pillow==10.4.0 -protobuf==4.25.5 -PyAudio==0.2.14 pycparser==2.22 pyparsing==3.1.4 python-dateutil==2.9.0.post0 -pywin32==306 -scipy==1.14.1 six==1.16.0 -sounddevice==0.5.0 +pywin32==306 WMI==1.5.1 -tensor \ No newline at end of file +psutil>=5.9.0 \ No newline at end of file From 0d55b4fa166fc3eb81fa4e2f554647a5c2072710 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 31 Oct 2024 09:27:28 +0000 Subject: [PATCH 5/9] Update repo structure --- PROJECT_STRUCTURE.md | 1 + repo_structure.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/PROJECT_STRUCTURE.md b/PROJECT_STRUCTURE.md index adeae4c..6fb00ae 100644 --- a/PROJECT_STRUCTURE.md +++ b/PROJECT_STRUCTURE.md @@ -238,6 +238,7 @@ │ ├── object_detection.py │ ├── peer_comparison_tool.py │ ├── processes.py +│ ├── proctor_core.py │ ├── pyaudio_test.py │ ├── run.py │ ├── screen_recorder.py diff --git a/repo_structure.txt b/repo_structure.txt index c6dc528..2755cc2 100644 --- a/repo_structure.txt +++ b/repo_structure.txt @@ -234,6 +234,7 @@ │ ├── object_detection.py │ ├── peer_comparison_tool.py │ ├── processes.py +│ ├── proctor_core.py │ ├── pyaudio_test.py │ ├── run.py │ ├── screen_recorder.py From c7099f88b951eafb36e16f28221a0ba9716cc958 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 06:15:36 +0000 Subject: [PATCH 6/9] Update repo structure --- PROJECT_STRUCTURE.md | 1 + repo_structure.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/PROJECT_STRUCTURE.md b/PROJECT_STRUCTURE.md index 6fb00ae..397d1af 100644 --- a/PROJECT_STRUCTURE.md +++ b/PROJECT_STRUCTURE.md @@ -254,6 +254,7 @@ ├── LICENSE ├── PROJECT_STRUCTURE.md ├── ReadMe.md +├── SECURITY.md ├── Suggested-Issues.md ├── Userdb.sql ├── __pycache__/ diff --git a/repo_structure.txt b/repo_structure.txt index 2755cc2..5100cb8 100644 --- a/repo_structure.txt +++ b/repo_structure.txt @@ -250,6 +250,7 @@ ├── LICENSE ├── PROJECT_STRUCTURE.md ├── ReadMe.md +├── SECURITY.md ├── Suggested-Issues.md ├── Userdb.sql ├── __pycache__/ From 89089d01a0cc747b0be8a565e80183ee904088e0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:36:22 +0000 Subject: [PATCH 7/9] Update repo structure --- PROJECT_STRUCTURE.md | 4 +++- repo_structure.txt | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/PROJECT_STRUCTURE.md b/PROJECT_STRUCTURE.md index 397d1af..006de22 100644 --- a/PROJECT_STRUCTURE.md +++ b/PROJECT_STRUCTURE.md @@ -260,7 +260,9 @@ ├── __pycache__/ │ ├── audio.cpython-311.pyc │ ├── detection.cpython-311.pyc -│ └── head_pose.cpython-311.pyc +│ ├── head_pose.cpython-311.pyc +│ └── tutorial/ +│ └── tutorial.html ├── calenderApp/ │ └── calender.html ├── contributor/ diff --git a/repo_structure.txt b/repo_structure.txt index 5100cb8..038311e 100644 --- a/repo_structure.txt +++ b/repo_structure.txt @@ -256,7 +256,9 @@ ├── __pycache__/ │ ├── audio.cpython-311.pyc │ ├── detection.cpython-311.pyc -│ └── head_pose.cpython-311.pyc +│ ├── head_pose.cpython-311.pyc +│ └── tutorial/ +│ └── tutorial.html ├── calenderApp/ │ └── calender.html ├── contributor/ From 1211f17c8c1fdc49202d1615fca3dec26dde59fb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 17:31:46 +0000 Subject: [PATCH 8/9] Update repo structure --- PROJECT_STRUCTURE.md | 7 ++++++- repo_structure.txt | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/PROJECT_STRUCTURE.md b/PROJECT_STRUCTURE.md index 006de22..0852838 100644 --- a/PROJECT_STRUCTURE.md +++ b/PROJECT_STRUCTURE.md @@ -235,15 +235,19 @@ │ ├── graph.py │ ├── head_pose.py │ ├── logic.xlsx +│ ├── model_training.py +│ ├── mtcnn_face_detection.py │ ├── object_detection.py │ ├── peer_comparison_tool.py │ ├── processes.py +│ ├── proctor_api.py │ ├── proctor_core.py │ ├── pyaudio_test.py │ ├── run.py │ ├── screen_recorder.py │ ├── test-image.jpg -│ └── test-image2.jpg +│ ├── test-image2.jpg +│ └── train.py ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── GSSoC-Ext.png @@ -255,6 +259,7 @@ ├── PROJECT_STRUCTURE.md ├── ReadMe.md ├── SECURITY.md +├── SECURITYPOLICY.md ├── Suggested-Issues.md ├── Userdb.sql ├── __pycache__/ diff --git a/repo_structure.txt b/repo_structure.txt index 038311e..29c87ac 100644 --- a/repo_structure.txt +++ b/repo_structure.txt @@ -231,15 +231,19 @@ │ ├── graph.py │ ├── head_pose.py │ ├── logic.xlsx +│ ├── model_training.py +│ ├── mtcnn_face_detection.py │ ├── object_detection.py │ ├── peer_comparison_tool.py │ ├── processes.py +│ ├── proctor_api.py │ ├── proctor_core.py │ ├── pyaudio_test.py │ ├── run.py │ ├── screen_recorder.py │ ├── test-image.jpg -│ └── test-image2.jpg +│ ├── test-image2.jpg +│ └── train.py ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── GSSoC-Ext.png @@ -251,6 +255,7 @@ ├── PROJECT_STRUCTURE.md ├── ReadMe.md ├── SECURITY.md +├── SECURITYPOLICY.md ├── Suggested-Issues.md ├── Userdb.sql ├── __pycache__/ From 5aa67f0101e74d2d45663b157e390469a5b5b19f Mon Sep 17 00:00:00 2001 From: MANI Date: Sat, 9 Nov 2024 17:36:51 +0530 Subject: [PATCH 9/9] Added the functionality to track multiple students --- Backend/detection.py | 270 ++++++++++++++++++++----------------- Backend/proctor_api.py | 3 +- Backend/student_tracker.py | 88 ++++++++++++ 3 files changed, 235 insertions(+), 126 deletions(-) create mode 100644 Backend/student_tracker.py diff --git a/Backend/detection.py b/Backend/detection.py index a56dcfb..fdd098c 100644 --- a/Backend/detection.py +++ b/Backend/detection.py @@ -1,138 +1,158 @@ -import time -import audio -import head_pose -import matplotlib.pyplot as plt +import cv2 import numpy as np import logging +import time +import matplotlib.pyplot as plt +from typing import Dict, List, Tuple +from .student_tracker import MultiStudentTracker +from .face_recog import FaceRecognition # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') -PLOT_LENGTH = 200 - -# Placeholders -GLOBAL_CHEAT = 0 -PERCENTAGE_CHEAT = 0 -CHEAT_THRESH = 0.6 -XDATA = list(range(200)) -YDATA = [0] * 200 - -# Global flag to check if window is open -is_running = True - -def avg(current, previous): - if previous > 1: - return 0.65 - if current == 0: - if previous < 0.01: - return 0.01 - return previous / 1.01 - if previous == 0: - return current - return 1 * previous + 0.1 * current - -def process(): - global GLOBAL_CHEAT, PERCENTAGE_CHEAT, CHEAT_THRESH - - try: - if GLOBAL_CHEAT == 0: - if head_pose.X_AXIS_CHEAT == 0: - if head_pose.Y_AXIS_CHEAT == 0: - if audio.AUDIO_CHEAT == 0: - PERCENTAGE_CHEAT = avg(0, PERCENTAGE_CHEAT) - else: - PERCENTAGE_CHEAT = avg(0.2, PERCENTAGE_CHEAT) +class DetectionSystem: + def __init__(self): + self.face_recognition = FaceRecognition() + self.student_tracker = MultiStudentTracker() + self.active_tracking = False + + # Detection parameters + self.PLOT_LENGTH = 200 + self.CHEAT_THRESH = 0.6 + self.is_running = True + + # Initialize tracking data for each student + self.student_data = {} # Dictionary to store per-student metrics + + def avg(self, current: float, previous: float) -> float: + """Calculate weighted average of cheat probability""" + if previous > 1: + return 0.65 + if current == 0: + if previous < 0.01: + return 0.01 + return previous / 1.01 + if previous == 0: + return current + return 1 * previous + 0.1 * current + + def calculate_cheat_probability(self, student_metrics: Dict) -> float: + """ + Calculate cheat probability based on multiple metrics + """ + pose_data = student_metrics.get('pose_data', {}) + audio_cheat = student_metrics.get('audio_cheat', 0) + previous_cheat = student_metrics.get('previous_cheat', 0) + + x_axis_cheat = int(not pose_data.get('looking_straight', True)) + y_axis_cheat = int(pose_data.get('movement_detected', False)) + + # Complex decision tree for cheat probability + base_probability = 0 + + if x_axis_cheat == 0: + if y_axis_cheat == 0: + if audio_cheat == 0: + base_probability = 0 else: - if audio.AUDIO_CHEAT == 0: - PERCENTAGE_CHEAT = avg(0.2, PERCENTAGE_CHEAT) - else: - PERCENTAGE_CHEAT = avg(0.4, PERCENTAGE_CHEAT) + base_probability = 0.2 else: - if head_pose.Y_AXIS_CHEAT == 0: - if audio.AUDIO_CHEAT == 0: - PERCENTAGE_CHEAT = avg(0.1, PERCENTAGE_CHEAT) - else: - PERCENTAGE_CHEAT = avg(0.4, PERCENTAGE_CHEAT) + if audio_cheat == 0: + base_probability = 0.2 else: - if audio.AUDIO_CHEAT == 0: - PERCENTAGE_CHEAT = avg(0.15, PERCENTAGE_CHEAT) - else: - PERCENTAGE_CHEAT = avg(0.25, PERCENTAGE_CHEAT) + base_probability = 0.4 else: - if head_pose.X_AXIS_CHEAT == 0: - if head_pose.Y_AXIS_CHEAT == 0: - if audio.AUDIO_CHEAT == 0: - PERCENTAGE_CHEAT = avg(0, PERCENTAGE_CHEAT) - else: - PERCENTAGE_CHEAT = avg(0.55, PERCENTAGE_CHEAT) + if y_axis_cheat == 0: + if audio_cheat == 0: + base_probability = 0.1 else: - if audio.AUDIO_CHEAT == 0: - PERCENTAGE_CHEAT = avg(0.55, PERCENTAGE_CHEAT) - else: - PERCENTAGE_CHEAT = avg(0.85, PERCENTAGE_CHEAT) + base_probability = 0.4 else: - if head_pose.Y_AXIS_CHEAT == 0: - if audio.AUDIO_CHEAT == 0: - PERCENTAGE_CHEAT = avg(0.6, PERCENTAGE_CHEAT) - else: - PERCENTAGE_CHEAT = avg(0.85, PERCENTAGE_CHEAT) + if audio_cheat == 0: + base_probability = 0.15 else: - if audio.AUDIO_CHEAT == 0: - PERCENTAGE_CHEAT = avg(0.5, PERCENTAGE_CHEAT) - else: - PERCENTAGE_CHEAT = avg(0.85, PERCENTAGE_CHEAT) - - if PERCENTAGE_CHEAT > CHEAT_THRESH: - GLOBAL_CHEAT = 1 - print("CHEATING") - else: - GLOBAL_CHEAT = 0 - print("Cheat percent: ", PERCENTAGE_CHEAT, GLOBAL_CHEAT) - - except Exception as e: - logging.error(f"Error in process: {e}") - print("An error occurred during processing. Please check the logs.") - -def on_close(event): - global is_running - is_running = False - # Set flag to False when the window is closed - -def run_detection(): - global XDATA, YDATA, is_running - - try: - fig, axes = plt.subplots() - - axes.set_xlim(0, 200) - axes.set_ylim(0, 1) - line, = axes.plot(XDATA, YDATA, 'r-') - plt.title("Suspicious Behaviour Detection") - plt.xlabel("Time") - plt.ylabel("Cheat Probability") - - # Connect the close event to the callback - fig.canvas.mpl_connect('close_event', on_close) - - while is_running: - YDATA.pop(0) - YDATA.append(PERCENTAGE_CHEAT) - line.set_xdata(XDATA) - line.set_ydata(YDATA) - plt.draw() - plt.pause(1e-17) - process() - time.sleep(1 / 5) - - plt.close(fig) - - except Exception as e: - logging.error(f"Error in run_detection: {e}") - print("An error occurred while running the detection. Please check the logs.") - -if __name__ == "__main__": - try: - run_detection() - except KeyboardInterrupt: - logging.info("Detection interrupted by user.") - print("Terminated detection.") + base_probability = 0.25 + + return self.avg(base_probability, previous_cheat) + + def process_frame(self, frame: np.ndarray) -> Tuple[np.ndarray, List[Dict]]: + """ + Process a single frame with face recognition and student tracking + """ + if frame is None: + return None, [] + + try: + # Perform face recognition + recognized_frame = self.face_recognition.recognize_faces(frame) + + # Perform student tracking + tracked_frame, new_student_data = self.student_tracker.detect_and_track_students(recognized_frame) + + # Update student metrics + timestamp = time.time() + for student in new_student_data: + student_id = student['bbox'] # Use bbox as temporary ID + + if student_id not in self.student_data: + # Initialize new student data + self.student_data[student_id] = { + 'cheat_history': [0] * self.PLOT_LENGTH, + 'previous_cheat': 0, + 'global_cheat': 0, + 'audio_cheat': 0 # Placeholder for audio detection + } + + # Calculate cheat probability + cheat_prob = self.calculate_cheat_probability({ + 'pose_data': student['pose_data'], + 'audio_cheat': self.student_data[student_id]['audio_cheat'], + 'previous_cheat': self.student_data[student_id]['previous_cheat'] + }) + + # Update student metrics + self.student_data[student_id]['previous_cheat'] = cheat_prob + self.student_data[student_id]['cheat_history'].pop(0) + self.student_data[student_id]['cheat_history'].append(cheat_prob) + + # Update global cheat flag + if cheat_prob > self.CHEAT_THRESH: + self.student_data[student_id]['global_cheat'] = 1 + logging.info(f"Cheating detected for student at {student['bbox']}") + else: + self.student_data[student_id]['global_cheat'] = 0 + + # Add metrics to student data + student['cheat_probability'] = cheat_prob + student['global_cheat'] = self.student_data[student_id]['global_cheat'] + student['timestamp'] = timestamp + + return tracked_frame, new_student_data + + except Exception as e: + logging.error(f"Error in process_frame: {e}") + return frame, [] + + def start_tracking(self): + """Enable student tracking""" + self.active_tracking = True + self.is_running = True + + def stop_tracking(self): + """Disable student tracking""" + self.active_tracking = False + self.is_running = False + + def get_student_plot_data(self, student_id): + """Get plotting data for a specific student""" + if student_id in self.student_data: + return ( + list(range(self.PLOT_LENGTH)), + self.student_data[student_id]['cheat_history'] + ) + return None, None + + def cleanup(self): + """Cleanup resources""" + self.stop_tracking() + plt.close('all') \ No newline at end of file diff --git a/Backend/proctor_api.py b/Backend/proctor_api.py index 84e6888..262da79 100644 --- a/Backend/proctor_api.py +++ b/Backend/proctor_api.py @@ -263,4 +263,5 @@ def not_found(e): @proctor_api.errorhandler(500) def internal_error(e): - return jsonify({'error': 'Internal server error'}), 500 \ No newline at end of file + return jsonify({'error': 'Internal server error'}), 500 + diff --git a/Backend/student_tracker.py b/Backend/student_tracker.py new file mode 100644 index 0000000..24a7cc2 --- /dev/null +++ b/Backend/student_tracker.py @@ -0,0 +1,88 @@ +import cv2 +import mediapipe as mp +import numpy as np +from typing import List, Dict, Tuple + +class MultiStudentTracker: + def __init__(self): + self.mp_face_detection = mp.solutions.face_detection + self.face_detection = self.mp_face_detection.FaceDetection( + min_detection_confidence=0.7 + ) + self.suspicious_behaviors = { + 'looking_away': 0, + 'rapid_movement': 0, + 'out_of_frame': 0 + } + + def detect_and_track_students(self, frame: np.ndarray) -> Tuple[np.ndarray, List[Dict]]: + """ + Detect and track multiple students in the frame + Returns annotated frame and list of student tracking data + """ + rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + results = self.face_detection.process(rgb_frame) + student_data = [] + + if results.detections: + for detection in results.detections: + bbox = detection.location_data.relative_bounding_box + h, w, _ = frame.shape + + x = int(bbox.xmin * w) + y = int(bbox.ymin * h) + width = int(bbox.width * w) + height = int(bbox.height * h) + + # Track head pose and movement + pose_data = self.analyze_head_pose(frame[y:y+height, x:x+width]) + + # Calculate suspicion metrics + suspicion_score = self.calculate_suspicion_score(pose_data) + + student_info = { + 'bbox': (x, y, width, height), + 'pose_data': pose_data, + 'suspicion_score': suspicion_score, + 'timestamp': cv2.getTickCount() / cv2.getTickFrequency() + } + student_data.append(student_info) + + # Draw bounding box and suspicion score + color = self.get_alert_color(suspicion_score) + cv2.rectangle(frame, (x, y), (x + width, y + height), color, 2) + cv2.putText(frame, f"Score: {suspicion_score:.2f}", + (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, + 0.5, color, 2) + + return frame, student_data + + def analyze_head_pose(self, face_region: np.ndarray) -> Dict: + """ + Analyze head pose and movement patterns + """ + return { + 'looking_straight': True, + 'movement_detected': False + } + + def calculate_suspicion_score(self, pose_data: Dict) -> float: + """ + Calculate suspicion score based on pose analysis + """ + score = 0.0 + if not pose_data['looking_straight']: + score += 0.3 + if pose_data['movement_detected']: + score += 0.2 + return min(score, 1.0) + + def get_alert_color(self, score: float) -> Tuple[int, int, int]: + """ + Return color based on suspicion score + """ + if score < 0.3: + return (0, 255, 0) # Green + elif score < 0.7: + return (0, 255, 255) # Yellow + return (0, 0, 255) # Red \ No newline at end of file