diff --git a/.gitignore b/.gitignore index cc19cb61..c79bb8e7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,3 @@ -## Ignore Visual Studio temporary files, build results, and -## files generated by popular Visual Studio add-ons. -## -## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore - # User-specific files *.rsuser *.suo @@ -33,7 +28,7 @@ bld/ [Ll]og/ [Ll]ogs/ -# Visual Studio 2015/2017 cache/options directory +# Visual Studio cache/options directory .vs/ # Uncomment if you have tasks that create the project's static files in wwwroot #wwwroot/ @@ -121,12 +116,6 @@ ipch/ # Visual Studio Trace Files *.e2e -# TFS 2012 Local Workspace -$tf/ - -# Guidance Automation Toolkit -*.gpState - # ReSharper is a .NET coding add-in _ReSharper*/ *.[Rr]e[Ss]harper @@ -246,9 +235,6 @@ orleans.codegen.cs # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) #bower_components/ -# RIA/Silverlight projects -Generated_Code/ - # Backup & report files from converting an old project file # to a newer Visual Studio version. Backup files are not needed, # because we have git ;-) @@ -259,11 +245,6 @@ UpgradeLog*.htm ServiceFabricBackup/ *.rptproj.bak -# SQL Server files -*.mdf -*.ldf -*.ndf - # Business Intelligence projects *.rdl.data *.bim.layout @@ -286,27 +267,6 @@ node_modules/ # Visual Studio 6 build log *.plg -# Visual Studio 6 workspace options file -*.opt - -# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) -*.vbw - -# Visual Studio LightSwitch build output -**/*.HTMLClient/GeneratedArtifacts -**/*.DesktopClient/GeneratedArtifacts -**/*.DesktopClient/ModelManifest.xml -**/*.Server/GeneratedArtifacts -**/*.Server/ModelManifest.xml -_Pvt_Extensions - -# Paket dependency manager -.paket/paket.exe -paket-files/ - -# FAKE - F# Make -.fake/ - # CodeRush personal settings .cr/personal @@ -314,25 +274,9 @@ paket-files/ __pycache__/ *.pyc -# Cake - Uncomment if you are using it -# tools/** -# !tools/packages.config - -# Tabs Studio -*.tss - # Telerik's JustMock configuration file *.jmconfig -# BizTalk build output -*.btp.cs -*.btm.cs -*.odx.cs -*.xsd.cs - -# OpenCover UI analysis results -OpenCover/ - # Azure Stream Analytics local run output ASALocalRun/ @@ -342,24 +286,12 @@ ASALocalRun/ # NVidia Nsight GPU debugger configuration file *.nvuser -# MFractors (Xamarin productivity tool) working folder -.mfractor/ - # Local History for Visual Studio .localhistory/ -# BeatPulse healthcheck temp database -healthchecksdb - # Backup folder for Package Reference Convert tool in Visual Studio 2017 MigrationBackup/ -# Ionide (cross platform F# VS Code tools) working folder -.ionide/ - -# Fody - auto-generated XML schema -FodyWeavers.xsd - # Heat Generated Wix files. **/*Files.wxs @@ -382,33 +314,70 @@ FodyWeavers.xsd /Installers/Windows/PortraitFilter.Installer/PortraitFilterInstallFiles.wxs /Installers/Windows/CodeProjectAI.Server.Installer/DemoImangesFiles.wxs /Installers/Windows/Python39.Installer/Python39Files.wxs +/Installers/Windows/YoloNet.Installer/YoloNetInstallFiles.wxs /Installers/zlib123dllx64.zip /Installers/cudnn-windows-x86_64-8.5.0.96_cuda11-archive.zip /src/API/Server/FrontEnd/installconfig.json +/src/API/Server/FrontEnd/modules.json -/src/AnalysisLayer/FaceProcessing/assets -/src/AnalysisLayer/FaceProcessing/datastore/ -/src/AnalysisLayer/ObjectDetectionNet/assets/ -/src/AnalysisLayer/ObjectDetectionNet/custom-models -/src/AnalysisLayer/ObjectDetectionYolo/assets -/src/AnalysisLayer/ObjectDetectionYolo/custom-models - -/src/downloads/ -/src/module-downloads/ -/src/modules/downloads/ +# keep the folder structure but don't git commit the cached downloads +!/src/downloads +/src/downloads/* +!/src/downloads/modules +/src/downloads/modules/* +!/src/downloads/modules/readme.txt +# Downloaded assets for modules /src/modules/ALPR/paddleocr /src/modules/ALPR/plate.png /src/modules/BackgroundRemover/models +/src/modules/Cartooniser/weights +/src/modules/FaceProcessing/assets +/src/modules/FaceProcessing/datastore/ +/src/modules/ObjectDetectionCoral/assets +src/modules/ObjectDetectionCoral/edgetpu_runtime +src/modules/ObjectDetectionCoral/libedgetpu.* +/src/modules/ObjectDetectionNet/assets/ +/src/modules/ObjectDetectionNet/custom-models +/src/modules/ObjectDetectionNet/LocalNugets +src/modules/ObjectDetectionYoloRKNN/assets +src/modules/ObjectDetectionYoloRKNN/custom-models +/src/modules/ObjectDetectionTFLite/assets +/src/modules/ObjectDetectionYolo/assets +/src/modules/ObjectDetectionYolo/custom-models /src/modules/OCR/paddleocr /src/modules/PortraitFilter/runtimeconfig.template.Designer.cs /src/modules/SceneClassifier/assets +/src/modules/TrainingYoloV5/assets +/src/modules/TrainingYoloV5/datasets +/src/modules/TrainingYoloV5/fiftyone +/src/modules/TrainingYoloV5/training +/src/modules/TrainingYoloV5/train +/src/modules/TrainingYoloV5/zoo /src/modules/YOLOv5-3.1/custom-models /src/modules/YOLOv5-3.1/assets /src/modules/YOLOv5-3.1/windows_packages_cpu /src/modules/YOLOv5-3.1/windows_packages_gpu -/src/AnalysisLayer/BackgroundRemover/models/u2net.onnx -/src/AnalysisLayer/SceneClassifier/assets/categories_places365.txt -/src/AnalysisLayer/SceneClassifier/assets/scene.pt + +# Generated module packages +/src/modules/ALPR/ALPR-[0-9].[0-9].zip +/src/modules/BackgroundRemover/BackgroundRemover-[0-9].[0-9].zip +/src/modules/Cartooniser/Cartooniser-[0-9].[0-9].zip +/src/modules/FaceProcessing/FaceProcessing-[0-9].[0-9].zip +/src/modules/ObjectDetectionNet/ObjectDetectionNet-CPU-[0-9].[0-9].zip +/src/modules/ObjectDetectionNet/ObjectDetectionNet-OpenVINO-[0-9].[0-9].zip +/src/modules/ObjectDetectionNet/ObjectDetectionNet-DirectML-[0-9].[0-9].zip +/src/modules/ObjectDetectionNet/ObjectDetectionNet-CUDA-[0-9].[0-9].zip +/src/modules/ObjectDetectionNet/ObjectDetectionNet-CPU-[0-9].[0-9].zip +/src/modules/ObjectDetectionTFLite/ObjectDetectionTFLite-1.0.zip +/src/modules/ObjectDetectionYolo/ObjectDetectionYolo-[0-9].[0-9].zip +/src/modules/OCR/OCR-[0-9].[0-9].zip +/src/modules/PortraitFilter/PortraitFilter-[0-9].[0-9].zip +/src/modules/SceneClassifier/SceneClassifier-[0-9].[0-9].zip +/src/modules/SentimentAnalysis/SentimentAnalysis-[0-9].[0-9].zip +/src/modules/SuperResolution/SuperResolution-[0-9].[0-9].zip +/src/modules/TextSummary/TextSummary-[0-9].[0-9].zip +/src/modules/YOLOv5-3.1/YOLOv5-3.1-[0-9].[0-9].zip +/src/modules/ObjectDetectionNet/ObjectDetectionNet-1.1.zip diff --git a/.vscode/launch.docker.json b/.vscode/launch.docker.json new file mode 100644 index 00000000..8d4bd256 --- /dev/null +++ b/.vscode/launch.docker.json @@ -0,0 +1,335 @@ +{ + "version": "0.2.0", + "configurations": [ + + // Server ------------------------------------------------------------- + + { + "presentation": { + "group": "4 Launch", + "order": 1 + }, + "name": "Launch Server", + "type": "coreclr", + "request": "launch", + "program": "/app/server/CodeProject.AI.Server.dll", + "args": [], + "cwd": "/app/server/", + "stopAtEntry": false, + "requireExactSource": false, + "serverReadyAction": { + "action": "openExternally", + "pattern": "\\bNow listening on:\\s+(https?://\\S+)", + "uriFormat": "http://localhost:%s/swagger" + }, + "env": { + "ASPNETCORE_ENVIRONMENT": "Development", + "RUNNING_IN_VSCODE": "true", + "DOTNET_NOLOGO": "true" + } + }, + + { + "presentation": { + "group": "4 Launch", + "order": 100 + }, + "name": "Stop all Processes", + + "type": "python", + "code": "#", // dummy command + "console": "internalConsole", + "request": "launch", + + // "type": "coreclr", + // "program": "dotnet", // dummy command + // "args": [ "--version" ], // dummy command + + "preLaunchTask": "stop-all" + }, + + { + "presentation": { + "group": "4 Launch", + "order": 2 + }, + "name": "Launch Server without Modules", + "type": "coreclr", + "request": "launch", + "program": "/app/server/CodeProject.AI.Server.dll", + "args": [ + "--ModuleOptions:LaunchModules=false" + ], + "cwd": "/app/server/", + "stopAtEntry": false, + "serverReadyAction": { + "action": "openExternally", + "pattern": "\\bNow listening on:\\s+(https?://\\S+)", + "uriFormat": "http://localhost:%s/swagger" + }, + "env": { + "ASPNETCORE_ENVIRONMENT": "Development", + "RUNNING_IN_VSCODE": "true", + "DOTNET_NOLOGO": "true" + }, + "logging": { + "engineLogging": false, + "moduleLoad": false, + "exceptions": true, + "browserStdOut": false + } + }, + + // Launch Individual -------------------------------------------------- + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "ALPR", + "type": "python", + "python": "python3.8", + "request": "launch", + "program": "ALPR_adapter.py", + "console": "integratedTerminal", + "cwd": "/app/modules/ALPR", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Cartooniser", + "type": "python", + "python": "python3.9", + "request": "launch", + "program": "cartooniser_adapter.py", + "console": "integratedTerminal", + "cwd": "/app/modules/Cartooniser", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168", + "WEIGHTS_FOLDER": "/app/modules/Cartooniser/weights" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Face Processing", + "type": "python", + "python": "python3.8", + "request": "launch", + "program": "intelligencelayer/face.py", + "console": "integratedTerminal", + "cwd": "/app/preinstalled-modules/FaceProcessing/", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Object Detect .NET", + "type": "coreclr", + "request": "launch", + "program": "/app/preinstalled-modules/ObjectDetectionNet/ObjectDetectionNet.dll", + "args": [], + "cwd": "/app/modules/ObjectDetectionNet", + "stopAtEntry": false, + "console": "internalConsole", + "requireExactSource": false, + "justMyCode": false, + "env": { + "ASPNETCORE_ENVIRONMENT": "Development", + "RUNNING_IN_VSCODE": "true", + "DOTNET_NOLOGO": "true", + "CPAI_MODULE_QUEUENAME": "objectdetection_queue" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Object Detect TFLite", + "type": "python", + "request": "launch", + "python": "python3.9", + "program": "objectdetection_tflite_adapter.py", + "cwd": "/app/modules/ObjectDetectionTFLite", + "console": "integratedTerminal", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168", + "CPAI_MODULE_QUEUENAME": "objectdetection_queue" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Object Detect YOLO 6.2", + "type": "python", + "python": "python3.8", + "request": "launch", + "program": "detect_adapter.py", + "console": "integratedTerminal", + "cwd": "/app/preinstalled-modules/ObjectDetectionYolo", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168", + "CPAI_MODULE_QUEUENAME": "objectdetection_queue" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Object Detect YOLO 3.1", + "type": "python", + "python": "/app/modules/YOLOv5-3.1/bin/linux/python38/venv/bin/python", + "request": "launch", + "program": "yolo_adapter.py", + "console": "integratedTerminal", + "cwd": "/app/modules/YOLOv5-3.1", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "OCR", + "type": "python", + "python": "/app/modules/OCR/bin/linux/python38/venv/bin/python", + "request": "launch", + "program": "OCR_adapter.py", + "console": "integratedTerminal", + "cwd": "/app/modules/OCR", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Scene Classifier", + "type": "python", + "python": "/app/modules/SceneClassifier/bin/linux/python38/venv/bin/python", + "request": "launch", + "program": "scene_adapter.py", + "console": "integratedTerminal", + "cwd": "/app/modules/SceneClassifier", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Super Resolution", + "type": "python", + "python": "/app/modules/SuperResolution/bin/linux/python38/venv/bin/python", + "request": "launch", + "program": "superres_adapter.py", + "console": "integratedTerminal", + "cwd": "/app/modules/SuperResolution", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "SentimentAnalysis", + "type": "coreclr", + "request": "launch", + "program": "/app/modules/SentimentAnalysis/SentimentAnalysis", + "args": [], + "cwd": "${workspaceFolder}", + // "cwd": "/app/modules/SentimentAnalysis", - causes an exception. WTF. See HACK. + "stopAtEntry": false, + "console": "internalConsole", + "requireExactSource": false, + "justMyCode": false, + "env": { + "ASPNETCORE_ENVIRONMENT": "Development", + "RUNNING_IN_VSCODE": "true", + "DOTNET_NOLOGO": "true", + "MODELS_DIR": "/app/modules/SentimentAnalysis/sentiment_model" + } + }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "TextSummary", + "type": "python", + "python": "/app/modules/TextSummary/bin/linux/python38/venv/bin/python", + "request": "launch", + "program": "summary_adapter.py", + "console": "integratedTerminal", + "cwd": "/app/modules/TextSummary", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + } + } + ] +} \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json index 6a315c2d..d92d1a0b 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -31,14 +31,6 @@ "RUNNING_IN_VSCODE": "true", "DOTNET_NOLOGO": "true" } - /* - "logging": { - "engineLogging": false, - "moduleLoad": false, - "exceptions": false, - "browserStdOut": false - } - */ }, { "presentation": { @@ -232,7 +224,7 @@ // "program": "dotnet", // dummy command // "args": [ "--version" ], // dummy command - "preLaunchTask": "stop-all", + "preLaunchTask": "stop-all" }, { "presentation": { @@ -324,6 +316,35 @@ } }, + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Background Remover", + "type": "python", + "python": "${workspaceFolder}/src/modules/BackgroundRemover/bin/linux/python39/venv/bin/python", + "request": "launch", + "program": "rembg_adapter.py", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/src/modules/BackgroundRemover", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + }, + "windows": { + "python": "${workspaceFolder}/src/modules/BackgroundRemover/bin/windows/python39/venv/Scripts/python.exe", + }, + "linux": { + "python": "${workspaceFolder}/src/modules/BackgroundRemover/bin/linux/python39/venv/bin/python", + }, + "osx": { + "python": "${workspaceFolder}/src/modules/BackgroundRemover/bin/macos/python39/venv/bin/python", + } + }, + { "presentation": { "group": "5 Launch Individual", @@ -384,6 +405,36 @@ } }, + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Object Detect Coral", + "type": "python", + "python": "${workspaceFolder}/src/modules/ObjectDetectionCoral/bin/linux/python39/venv/bin/python", + "request": "launch", + "program": "objectdetection_coral_adapter.py", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/src/modules/ObjectDetectionCoral", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168", + "CPAI_MODULE_QUEUENAME": "objectdetection_queue" + }, + "windows": { + "python": "${workspaceFolder}/src/modules/ObjectDetectionCoral/bin/windows/python37/venv/Scripts/python.exe", + }, + "linux": { + "python": "${workspaceFolder}/src/modules/ObjectDetectionCoral/bin/linux/python39/venv/bin/python", + }, + "osx": { + "python": "${workspaceFolder}/src/modules/ObjectDetectionCoral/bin/macos/python39/venv/bin/python", + } + }, + { "presentation": { "group": "5 Launch Individual", @@ -397,7 +448,7 @@ "linux": { "program": "${workspaceFolder}/src/modules/ObjectDetectionNet/bin/Debug/net7.0/ObjectDetectionNet.dll", }, - "args": [], + // "args": [ "--selftest" ], "cwd": "${workspaceFolder}", // "cwd": "${workspaceFolder}/src/modules/ObjectDetectionNet", - causes an exception. WTF. See HACK. "stopAtEntry": false, @@ -412,6 +463,37 @@ } }, + + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Object Detect YOLO RKNN", + "type": "python", + "python": "${workspaceFolder}/src/modules/ObjectDetectionYoloRKNN/bin/windows/python39/venv/Scripts/python.exe", + "request": "launch", + "program": "objectdetection_fd_rknn_adapter.py", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/src/modules/ObjectDetectionYoloRKNN", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168", + "CPAI_MODULE_QUEUENAME": "objectdetection_queue" + }, + "windows": { + "python": "${workspaceFolder}/src/modules/ObjectDetectionYoloRKNN/bin/windows/python39/venv/Scripts/python.exe", + }, + "linux": { + "python": "${workspaceFolder}/src/modules/ObjectDetectionYoloRKNN/bin/linux/python39/venv/bin/python", + }, + "osx": { + "python": "${workspaceFolder}/src/modules/ObjectDetectionYoloRKNN/bin/macos/python39/venv/bin/python", + } + }, + { "presentation": { "group": "5 Launch Individual", @@ -420,7 +502,7 @@ "name": "Object Detect TFLite", "type": "python", "request": "launch", - "python": "${workspaceFolder}\\src\\modules\\ObjectDetectionTFLite\\bin\\windows\\python39\\venv\\Scripts\\python.exe", + "python": "${workspaceFolder}/src/modules/ObjectDetectionTFLite/bin/windows/python39/venv/Scripts/python.exe", "program": "objectdetection_tflite_adapter.py", "cwd": "${workspaceFolder}/src/modules/ObjectDetectionTFLite", "console": "integratedTerminal", @@ -429,10 +511,11 @@ "DEBUG_IN_VSCODE": "True", "RUNNING_IN_VSCODE": "True", "CPAI_PORT": "32168", - "CPAI_MODULE_QUEUENAME": "objectdetection_queue" + "CPAI_MODULE_QUEUENAME": "objectdetection_queue", + "MODEL_SIZE": "tiny" }, "windows": { - "python": "${workspaceFolder}\\src\\modules\\ObjectDetectionTFLite\\bin\\windows\\python39\\venv\\Scripts\\python.exe", + "python": "${workspaceFolder}/src/modules/ObjectDetectionTFLite/bin/windows/python39/venv/Scripts/python.exe", }, "linux": { "python": "${workspaceFolder}/src/modules/ObjectDetectionTFLite/bin/linux/python39/venv/bin/python", @@ -449,12 +532,13 @@ }, "name": "Object Detect YOLO 6.2", "type": "python", - "python": "${workspaceFolder}/src/modules/bin/linux/python38/venv/bin/python", + "python": "${workspaceFolder}/src/runtimes/bin/linux/python38/venv/bin/python", "request": "launch", "program": "detect_adapter.py", "console": "integratedTerminal", "cwd": "${workspaceFolder}/src/modules/ObjectDetectionYolo", "justMyCode": false, + // "args": [ "--selftest" ], "env": { "DEBUG_IN_VSCODE": "True", "RUNNING_IN_VSCODE": "True", @@ -462,13 +546,13 @@ "CPAI_MODULE_QUEUENAME": "objectdetection_queue" }, "windows": { - "python": "${workspaceFolder}/src/modules/bin/windows/python37/venv/Scripts/python" + "python": "${workspaceFolder}/src/runtimes/bin/windows/python37/venv/Scripts/python" }, "linux": { - "python": "${workspaceFolder}/src/modules/bin/linux/python38/venv/bin/python", + "python": "${workspaceFolder}/src/runtimes/bin/linux/python38/venv/bin/python", }, "osx": { - "python": "${workspaceFolder}/src/modules/bin/macos/python38/venv/bin/python", + "python": "${workspaceFolder}/src/runtimes/bin/macos/python38/venv/bin/python", } }, @@ -508,7 +592,7 @@ }, "name": "OCR", "type": "python", - "python": "${workspaceFolder}/src/modules/OCR/bin/linux/python38/venv/bin/python", + "python": "${workspaceFolder}/src/modules/OCR/bin/windows/python37/venv/Scripts/python", "request": "launch", "program": "OCR_adapter.py", "console": "integratedTerminal", @@ -520,14 +604,15 @@ "CPAI_PORT": "32168" }, "windows": { - "python": "${workspaceFolder}/src/modules/OCR/bin/windows/python37/venv/Scripts/python" - }, + "python": "${workspaceFolder}/src/modules/OCR/bin/windows/python37/venv/Scripts/python", + }/*, "linux": { "python": "${workspaceFolder}/src/modules/OCR/bin/linux/python38/venv/bin/python", }, "osx": { "python": "${workspaceFolder}/src/modules/OCR/bin/macos/python38/venv/bin/python", } + */ }, { @@ -537,7 +622,7 @@ }, "name": "Scene Classifier", "type": "python", - "python": "${workspaceFolder}/src/modules/bin/linux/python38/venv/bin/python", + "python": "${workspaceFolder}/src/modules/SceneClassifier/bin/linux/python38/venv/bin/python", "request": "launch", "program": "scene_adapter.py", "console": "integratedTerminal", @@ -549,13 +634,13 @@ "CPAI_PORT": "32168" }, "windows": { - "python": "${workspaceFolder}/src/modules/bin/windows/python37/venv/Scripts/python" + "python": "${workspaceFolder}/src/modules/SceneClassifier/bin/windows/python37/venv/Scripts/python" }, "linux": { - "python": "${workspaceFolder}/src/modules/bin/linux/python38/venv/bin/python", + "python": "${workspaceFolder}/src/modules/SceneClassifier/bin/linux/python38/venv/bin/python", }, "osx": { - "python": "${workspaceFolder}/src/modules/bin/macos/python38/venv/bin/python", + "python": "${workspaceFolder}/src/modules/SceneClassifier/bin/macos/python38/venv/bin/python", } }, @@ -632,7 +717,7 @@ "CPAI_PORT": "32168" }, "windows": { - "python": "${workspaceFolder}\\src\\modules\\TextSummary\\bin\\windows\\python37\\venv\\Scripts\\python.exe" + "python": "${workspaceFolder}/src/modules/TextSummary/bin/windows/python37/venv/Scripts/python.exe" }, "linux": { "python": "${workspaceFolder}/src/modules/TextSummary/bin/linux/python38/venv/bin/python", @@ -642,6 +727,64 @@ } }, + { + "presentation": { + "group": "5 Launch Individual", + "hidden": false + }, + "name": "Training YOLOv5", + "type": "python", + "python": "${workspaceFolder}/src/modules/TrainingYoloV5/bin/macos/python38/venv/bin/python", + "request": "launch", + "program": "TrainingYoloV5.py", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/src/modules/TrainingYoloV5", + // "args": [ "--selftest" ], + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168", + + "YOLOv5_AUTOINSTALL": "false", + "YOLOv5_VERBOSE": "false", + + "CPAI_MODULE_REQUIRED_MB": "7000", + + "YOLO_DATASETS_DIRNAME": "datasets", + "YOLO_TRAINING_DIRNAME": "training", + "YOLO_WEIGHTS_DIRNAME": "weights", + "YOLO_MODELS_DIRNAME": "assets", + "YOLO_DATASET_ZOO_DIRNAME": "zoo" + }, + "windows": { + "python": "${workspaceFolder}/src/modules/TrainingYoloV5/bin/windows/python39/venv/Scripts/python.exe" + }, + "linux": { + "python": "${workspaceFolder}/src/modules/TrainingYoloV5/bin/linux/python38/venv/bin/python", + }, + "osx": { + "python": "${workspaceFolder}/src/modules/TrainingYoloV5/bin/macos/python38/venv/bin/python", + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168", + + "CPAI_MODULE_SUPPORT_GPU": "False", // https://github.com/ultralytics/yolov5/issues/11235 + + "YOLOv5_AUTOINSTALL": "false", + "YOLOv5_VERBOSE": "false", + + "YOLO_DATASETS_DIRNAME": "datasets", + "YOLO_TRAINING_DIRNAME": "training", + "YOLO_WEIGHTS_DIRNAME": "weights", + "YOLO_MODELS_DIRNAME": "assets", + "YOLO_DATASET_ZOO_DIRNAME": "zoo" + } + } + }, + + // See https://code.visualstudio.com/docs/editor/variables-reference#_settings-command-variables-and-input-variables // For variables that can be used { @@ -764,6 +907,37 @@ "DEBUG_IN_VSCODE": "True", "RUNNING_IN_VSCODE": "True" } + }, + + + + { + "presentation": { + "group": "6 Launch Demo", + "hidden": false + }, + "name": "Racoon detector", + "type": "python", + "python": "${workspaceFolder}/src/runtimes/bin/windows/python38/venv/bin/python", + "request": "launch", + "program": "racoon_detect.py", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/demos/Python/ObjectDetect", + "justMyCode": false, + "env": { + "DEBUG_IN_VSCODE": "True", + "RUNNING_IN_VSCODE": "True", + "CPAI_PORT": "32168" + }, + "windows": { + "python": "${workspaceFolder}/src/runtimes/bin/windows/python39/venv/Scripts/python.exe" + }, + "linux": { + "python": "${workspaceFolder}/src/runtimes/bin/linux/python38/venv/bin/python", + }, + "osx": { + "python": "${workspaceFolder}/src/runtimes/bin/macos/python38/venv/bin/python", + } } ] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..4e7bfa8f --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,49 @@ +{ + "cSpell.words": [ + "alpr", + "appsettings", + "astype", + "aync", + "callbacktask", + "codeproject", + "CPAI", + "Cuda", + "Denoising", + "deskew", + "dtype", + "edgetpu", + "fiftyone", + "fouo", + "hostbuilder", + "hyps", + "imwrite", + "Initialises", + "installconfig", + "integerize", + "licence", + "logvals", + "modulesettings", + "objectdetection", + "Onnx", + "opencv", + "otsu", + "paddleocr", + "platenumber", + "pluralise", + "pycoral", + "QUEUENAME", + "reqid", + "reqtype", + "reso", + "runtimes", + "skia", + "selftest", + "textreader", + "tflite", + "ufeff", + "unclip", + "wwwroot", + "YOLO", + "yolov5" + ] +} \ No newline at end of file diff --git a/.vscode/tasks.docker.json b/.vscode/tasks.docker.json new file mode 100644 index 00000000..1de88405 --- /dev/null +++ b/.vscode/tasks.docker.json @@ -0,0 +1,16 @@ +{ + "version": "2.0.0", + "tasks": [ + + // Launch apps ================================================================================================ + + { + "label": "stop-all", // Builds and Launches the AI server for Linux + "group": "none", + "type": "process", + "command": "bash", + "args": [ "/app/SDK/Scripts/stop_all.sh" ], + "problemMatcher": "$msCompile" + } + ] +} \ No newline at end of file diff --git a/CodeProject.AI.sln b/CodeProject.AI.sln index c7ec75a3..d6463ba8 100644 --- a/CodeProject.AI.sln +++ b/CodeProject.AI.sln @@ -20,6 +20,8 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "demos", "demos", "{7F18EB64 EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "API", "API", "{2379A486-0D28-4CAD-BB13-E77FBA538E0D}" EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Installers", "Installers", "{D885EE64-C1BD-44D6-84D8-1E46806298D9}" +EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Javascript", "Javascript", "{3A860CDD-94B9-4002-BA08-87E8822DDE50}" ProjectSection(SolutionItems) = preProject demos\Javascript\Vision.html = demos\Javascript\Vision.html @@ -43,6 +45,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Frontend", "src\API\Server\ {033E966B-CBFD-4C79-AC06-78203C795EA8} = {033E966B-CBFD-4C79-AC06-78203C795EA8} {9CBBD8B2-B7CD-4DA0-A476-C1E858CAE5D7} = {9CBBD8B2-B7CD-4DA0-A476-C1E858CAE5D7} {C33D90E7-7570-46FB-9EB9-ED6B40A93A9B} = {C33D90E7-7570-46FB-9EB9-ED6B40A93A9B} + {F682C5F9-854F-4B4E-A7DE-33329F51A26B} = {F682C5F9-854F-4B4E-A7DE-33329F51A26B} + {F7056ECA-1C9C-4544-99CA-731C944651D6} = {F7056ECA-1C9C-4544-99CA-731C944651D6} EndProjectSection EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Backend", "src\API\Server\Backend\Backend.csproj", "{C93C22D7-4EB2-4EC0-A7F0-FBCFB9F6F72D}" @@ -58,18 +62,77 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "assets", "assets", "{CB26AB EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{460DB5C8-46F3-4407-A2DF-D9063D14493A}" ProjectSection(SolutionItems) = preProject + .editorconfig = .editorconfig global.json = global.json LICENCE.md = LICENCE.md README.md = README.md THIRD-PARTY-NOTICES.md = THIRD-PARTY-NOTICES.md EndProjectSection EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "_PRIVATE REPO", "_PRIVATE REPO", "{78509730-6FBA-44E5-98C0-083DB7F52027}" + ProjectSection(SolutionItems) = preProject + README.txt = README.txt + EndProjectSection +EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "SDK", "SDK", "{FF0C329F-41E8-4540-BCDB-97690911077D}" ProjectSection(SolutionItems) = preProject src\SDK\install.bat = src\SDK\install.bat src\SDK\install.sh = src\SDK\install.sh EndProjectSection EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Windows", "Windows", "{83C828B9-2B1E-4982-B4B7-69D173DFBB27}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Common", "Common", "{5F1052CB-8586-49CB-8F46-427A5F6901C2}" + ProjectSection(SolutionItems) = preProject + Installers\Windows\CodeProjectAI-install-BG.png = Installers\Windows\CodeProjectAI-install-BG.png + Installers\Windows\CodeProjectAI-install-LHS.png = Installers\Windows\CodeProjectAI-install-LHS.png + Installers\Windows\CodeProjectAI-install-sidebar.png = Installers\Windows\CodeProjectAI-install-sidebar.png + Installers\Windows\CodeProjectAI-install-topbanner.png = Installers\Windows\CodeProjectAI-install-topbanner.png + Installers\Windows\favicon.ico = Installers\Windows\favicon.ico + Installers\Windows\license.rtf = Installers\Windows\license.rtf + Installers\Windows\logo.png = Installers\Windows\logo.png + Installers\Windows\logoSide.png = Installers\Windows\logoSide.png + Installers\Windows\Sense-install-BG.png = Installers\Windows\Sense-install-BG.png + Installers\Windows\Sense-install-sidebar.png = Installers\Windows\Sense-install-sidebar.png + Installers\Windows\Sense-install-topbanner.png = Installers\Windows\Sense-install-topbanner.png + Installers\Windows\SharedDirectories.wxi = Installers\Windows\SharedDirectories.wxi + Installers\Windows\SharedProperties.wxi = Installers\Windows\SharedProperties.wxi + Installers\Windows\SharedUIConfiguration.wxi = Installers\Windows\SharedUIConfiguration.wxi + EndProjectSection +EndProject +Project("{930C7802-8A8C-48F9-8165-68863BCCD9DD}") = "CodeProject.AI.BootStrapper", "Installers\Windows\CodeProjectAI.BootStrapper\CodeProject.AI.BootStrapper.wixproj", "{C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}" +EndProject +Project("{930C7802-8A8C-48F9-8165-68863BCCD9DD}") = "CodeProjectAI.Server.Installer", "Installers\Windows\CodeProjectAI.Server.Installer\CodeProjectAI.Server.Installer.wixproj", "{A1AFA75C-324E-4B79-BE13-5557E495FBBE}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = ".NET 3.5.1", ".NET 3.5.1", "{8A423F72-C92C-4C8E-87D8-02849FD079E2}" + ProjectSection(SolutionItems) = preProject + Installers\Windows\.NET 3.5.1\Download .NET Framework 3.5 SP1.url = Installers\Windows\.NET 3.5.1\Download .NET Framework 3.5 SP1.url + EndProjectSection +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Docker", "Docker", "{FB0561D3-4AF8-415A-85B4-E4E9ADDC3DB2}" + ProjectSection(SolutionItems) = preProject + .wslconfig = .wslconfig + Installers\Docker\BuildAndPush.bat = Installers\Docker\BuildAndPush.bat + Installers\Docker\BuildAndPush.sh = Installers\Docker\BuildAndPush.sh + Installers\Docker\BuildArm64.bat = Installers\Docker\BuildArm64.bat + Installers\Docker\BuildArm64.sh = Installers\Docker\BuildArm64.sh + Installers\Docker\BuildCPU.bat = Installers\Docker\BuildCPU.bat + Installers\Docker\BuildCPU.sh = Installers\Docker\BuildCPU.sh + Installers\Docker\BuildGPU.bat = Installers\Docker\BuildGPU.bat + Installers\Docker\BuildGPU.sh = Installers\Docker\BuildGPU.sh + Installers\Docker\BuildRPi.bat = Installers\Docker\BuildRPi.bat + Installers\Docker\BuildRPi.sh = Installers\Docker\BuildRPi.sh + Installers\Docker\Dockerfile = Installers\Docker\Dockerfile + Installers\Docker\Dockerfile-Arm64 = Installers\Docker\Dockerfile-Arm64 + Installers\Docker\DockerFile-GPU = Installers\Docker\DockerFile-GPU + Installers\Docker\Dockerfile-GPU-cuDNN = Installers\Docker\Dockerfile-GPU-cuDNN + Installers\Docker\Dockerfile-RPi64 = Installers\Docker\Dockerfile-RPi64 + Installers\Docker\DockerPush.bat = Installers\Docker\DockerPush.bat + Installers\Docker\README = Installers\Docker\README + EndProjectSection +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "GetAspNetCoreVersionAction", "Installers\Windows\GetAspNetCoreVersionAction\GetAspNetCoreVersionAction.csproj", "{C3E39164-5120-41C6-8902-2598DD0EBCD0}" +EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "License plates", "License plates", "{D320EA6C-2388-41F7-A4D1-980192665A61}" ProjectSection(SolutionItems) = preProject demos\TestData\License plates\10.jpg = demos\TestData\License plates\10.jpg @@ -138,6 +201,8 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Objects", "Objects", "{4ED5 demos\TestData\Objects\traffic-pexels-photo-297927.jpeg = demos\TestData\Objects\traffic-pexels-photo-297927.jpeg EndProjectSection EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CudaVersionCustomAction", "Installers\Windows\CudaVersionCustomAction\CudaVersionCustomAction.csproj", "{214949E0-B56C-4F23-809A-07DA4DBDF925}" +EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "CodeProject.AI.API.Server.Backend.Tests", "tests\QueueServiceTests\CodeProject.AI.API.Server.Backend.Tests.csproj", "{031F17E0-BE84-42AF-B9FE-4F928CB03D1B}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Python", "Python", "{37533562-EC4C-4FB4-8C42-FE327D1D79BD}" @@ -154,6 +219,11 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "modules", "modules", "{1C7E src\modules\readme.txt = src\modules\readme.txt EndProjectSection EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "macOS", "macOS", "{31DA8C15-C038-4667-89AB-74FED47D7B51}" + ProjectSection(SolutionItems) = preProject + Installers\macOS\create macOS installer.html = Installers\macOS\create macOS installer.html + EndProjectSection +EndProject Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "ALPR", "src\modules\ALPR\ALPR.pyproj", "{6AE28B59-221B-4E3D-A66C-E255B26DAC82}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "NET", "src\SDK\NET\NET.csproj", "{F7056ECA-1C9C-4544-99CA-731C944651D6}" @@ -188,21 +258,25 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Scripts", "Scripts", "{95BF EndProject Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "BackgroundRemover", "src\modules\BackgroundRemover\BackgroundRemover.pyproj", "{470D3417-36A4-49A4-B719-496466FA92FC}" EndProject -Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "SceneClassifier", "src\modules\SceneClassifier\SceneClassifier.pyproj", "{08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}" +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "Cartooniser", "src\modules\Cartooniser\Cartooniser.pyproj", "{470D3417-36A4-49A4-B719-496466FA92FE}" EndProject -Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "SuperResolution", "src\modules\SuperResolution\SuperResolution.pyproj", "{A472B309-3C77-4DE5-8F03-AA81938EEFB4}" +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "FaceProcessing", "src\modules\FaceProcessing\intelligencelayer\FaceProcessing.pyproj", "{E5D27495-EE4F-4AAF-8749-A6BA848111E2}" EndProject -Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "TextSummary", "src\modules\TextSummary\TextSummary.pyproj", "{470D3417-36A4-49A4-B719-496466FA92FB}" +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "ObjectDetectionCoral", "src\modules\ObjectDetectionCoral\ObjectDetectionCoral.pyproj", "{470D3417-36A4-49A4-B719-496477FA92FB}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ObjectDetectionNet", "src\modules\ObjectDetectionNet\ObjectDetectionNet.csproj", "{F682C5F9-854F-4B4E-A7DE-33329F51A26B}" EndProject Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "ObjectDetectionYolo", "src\modules\ObjectDetectionYolo\ObjectDetectionYolo.pyproj", "{B6A1D372-264E-4F66-B7FB-7FF19587476F}" EndProject -Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "FaceProcessing", "src\modules\FaceProcessing\intelligencelayer\FaceProcessing.pyproj", "{E5D27495-EE4F-4AAF-8749-A6BA848111E2}" +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "ObjectDetectionTFLite", "src\modules\ObjectDetectionTFLite\ObjectDetectionTFLite.pyproj", "{4C40A443-6A02-43F1-BD33-8F1A73349CDA}" EndProject -Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "Cartooniser", "src\modules\Cartooniser\Cartooniser.pyproj", "{470D3417-36A4-49A4-B719-496466FA92FE}" +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "SceneClassifier", "src\modules\SceneClassifier\SceneClassifier.pyproj", "{08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}" EndProject -Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "ObjectDetectionTFLite", "src\modules\ObjectDetectionTFLite\ObjectDetectionTFLite.pyproj", "{470D3417-36A4-49A4-B719-496477FA92FB}" +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "SuperResolution", "src\modules\SuperResolution\SuperResolution.pyproj", "{A472B309-3C77-4DE5-8F03-AA81938EEFB4}" +EndProject +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "TextSummary", "src\modules\TextSummary\TextSummary.pyproj", "{470D3417-36A4-49A4-B719-496466FA92FB}" +EndProject +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "TrainingYoloV5", "src\modules\TrainingYoloV5\TrainingYoloV5.pyproj", "{2DFDA382-189B-45D1-94D5-3004D1AEB73C}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -262,6 +336,51 @@ Global {C93C22D7-4EB2-4EC0-A7F0-FBCFB9F6F72D}.Release|ARM64.Build.0 = Release|ARM64 {C93C22D7-4EB2-4EC0-A7F0-FBCFB9F6F72D}.Release|x86.ActiveCfg = Release|Any CPU {C93C22D7-4EB2-4EC0-A7F0-FBCFB9F6F72D}.Release|x86.Build.0 = Release|Any CPU + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Debug|Any CPU.ActiveCfg = Debug|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Debug|ARM64.ActiveCfg = Debug|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Debug|ARM64.Build.0 = Debug|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Debug|x86.ActiveCfg = Debug|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Debug|x86.Build.0 = Debug|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Release|Any CPU.ActiveCfg = Release|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Release|Any CPU.Build.0 = Release|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Release|ARM64.ActiveCfg = Release|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Release|ARM64.Build.0 = Release|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Release|x86.ActiveCfg = Release|x86 + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79}.Release|x86.Build.0 = Release|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Debug|Any CPU.ActiveCfg = Debug|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Debug|ARM64.ActiveCfg = Debug|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Debug|ARM64.Build.0 = Debug|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Debug|x86.ActiveCfg = Debug|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Debug|x86.Build.0 = Debug|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Release|Any CPU.ActiveCfg = Release|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Release|Any CPU.Build.0 = Release|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Release|ARM64.ActiveCfg = Release|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Release|ARM64.Build.0 = Release|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Release|x86.ActiveCfg = Release|x86 + {A1AFA75C-324E-4B79-BE13-5557E495FBBE}.Release|x86.Build.0 = Release|x86 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Debug|Any CPU.ActiveCfg = Debug|x86 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Debug|Any CPU.Build.0 = Debug|x86 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Debug|ARM64.Build.0 = Debug|ARM64 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Debug|x86.ActiveCfg = Debug|x86 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Debug|x86.Build.0 = Debug|x86 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Release|Any CPU.ActiveCfg = Release|x86 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Release|Any CPU.Build.0 = Release|x86 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Release|ARM64.ActiveCfg = Release|ARM64 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Release|ARM64.Build.0 = Release|ARM64 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Release|x86.ActiveCfg = Release|x86 + {C3E39164-5120-41C6-8902-2598DD0EBCD0}.Release|x86.Build.0 = Release|x86 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Debug|Any CPU.ActiveCfg = Debug|x86 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Debug|ARM64.Build.0 = Debug|ARM64 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Debug|x86.ActiveCfg = Debug|x86 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Debug|x86.Build.0 = Debug|x86 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Release|Any CPU.ActiveCfg = Release|x86 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Release|Any CPU.Build.0 = Release|x86 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Release|ARM64.ActiveCfg = Release|ARM64 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Release|ARM64.Build.0 = Release|ARM64 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Release|x86.ActiveCfg = Release|x86 + {214949E0-B56C-4F23-809A-07DA4DBDF925}.Release|x86.Build.0 = Release|x86 {031F17E0-BE84-42AF-B9FE-4F928CB03D1B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {031F17E0-BE84-42AF-B9FE-4F928CB03D1B}.Debug|Any CPU.Build.0 = Debug|Any CPU {031F17E0-BE84-42AF-B9FE-4F928CB03D1B}.Debug|ARM64.ActiveCfg = Debug|ARM64 @@ -310,7 +429,7 @@ Global {0690D5F7-864F-4347-8E20-FA9903CE56EB}.Release|Any CPU.ActiveCfg = Release|Any CPU {0690D5F7-864F-4347-8E20-FA9903CE56EB}.Release|ARM64.ActiveCfg = Release|Any CPU {0690D5F7-864F-4347-8E20-FA9903CE56EB}.Release|x86.ActiveCfg = Release|Any CPU - {B6A1D372-264E-4F66-B7FB-7FF19587476E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B6A1D372-264E-4F66-B7FB-7FF19587476E}.Debug|Any CPU.ActiveCfg = Release|Any CPU {B6A1D372-264E-4F66-B7FB-7FF19587476E}.Debug|ARM64.ActiveCfg = Debug|Any CPU {B6A1D372-264E-4F66-B7FB-7FF19587476E}.Debug|x86.ActiveCfg = Debug|Any CPU {B6A1D372-264E-4F66-B7FB-7FF19587476E}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -352,24 +471,24 @@ Global {470D3417-36A4-49A4-B719-496466FA92FC}.Release|Any CPU.ActiveCfg = Release|Any CPU {470D3417-36A4-49A4-B719-496466FA92FC}.Release|ARM64.ActiveCfg = Release|Any CPU {470D3417-36A4-49A4-B719-496466FA92FC}.Release|x86.ActiveCfg = Release|Any CPU - {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Debug|ARM64.ActiveCfg = Debug|Any CPU - {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Debug|x86.ActiveCfg = Debug|Any CPU - {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Release|Any CPU.ActiveCfg = Release|Any CPU - {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Release|ARM64.ActiveCfg = Release|Any CPU - {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Release|x86.ActiveCfg = Release|Any CPU - {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Debug|ARM64.ActiveCfg = Debug|Any CPU - {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Debug|x86.ActiveCfg = Debug|Any CPU - {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Release|Any CPU.ActiveCfg = Release|Any CPU - {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Release|ARM64.ActiveCfg = Release|Any CPU - {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Release|x86.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FB}.Debug|ARM64.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FB}.Debug|x86.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FB}.Release|Any CPU.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FB}.Release|ARM64.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FB}.Release|x86.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FE}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FE}.Debug|x86.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FE}.Release|Any CPU.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FE}.Release|ARM64.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FE}.Release|x86.ActiveCfg = Release|Any CPU + {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Debug|x86.ActiveCfg = Debug|Any CPU + {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Release|ARM64.ActiveCfg = Release|Any CPU + {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Release|x86.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496477FA92FB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496477FA92FB}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496477FA92FB}.Debug|x86.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496477FA92FB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496477FA92FB}.Release|ARM64.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496477FA92FB}.Release|x86.ActiveCfg = Release|Any CPU {F682C5F9-854F-4B4E-A7DE-33329F51A26B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {F682C5F9-854F-4B4E-A7DE-33329F51A26B}.Debug|Any CPU.Build.0 = Debug|Any CPU {F682C5F9-854F-4B4E-A7DE-33329F51A26B}.Debug|ARM64.ActiveCfg = Debug|Any CPU @@ -388,24 +507,36 @@ Global {B6A1D372-264E-4F66-B7FB-7FF19587476F}.Release|Any CPU.ActiveCfg = Release|Any CPU {B6A1D372-264E-4F66-B7FB-7FF19587476F}.Release|ARM64.ActiveCfg = Release|Any CPU {B6A1D372-264E-4F66-B7FB-7FF19587476F}.Release|x86.ActiveCfg = Release|Any CPU - {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Debug|ARM64.ActiveCfg = Debug|Any CPU - {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Debug|x86.ActiveCfg = Debug|Any CPU - {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Release|ARM64.ActiveCfg = Release|Any CPU - {E5D27495-EE4F-4AAF-8749-A6BA848111E2}.Release|x86.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FE}.Debug|ARM64.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FE}.Debug|x86.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FE}.Release|Any CPU.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FE}.Release|ARM64.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496466FA92FE}.Release|x86.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496477FA92FB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496477FA92FB}.Debug|ARM64.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496477FA92FB}.Debug|x86.ActiveCfg = Debug|Any CPU - {470D3417-36A4-49A4-B719-496477FA92FB}.Release|Any CPU.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496477FA92FB}.Release|ARM64.ActiveCfg = Release|Any CPU - {470D3417-36A4-49A4-B719-496477FA92FB}.Release|x86.ActiveCfg = Release|Any CPU + {4C40A443-6A02-43F1-BD33-8F1A73349CDA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {4C40A443-6A02-43F1-BD33-8F1A73349CDA}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {4C40A443-6A02-43F1-BD33-8F1A73349CDA}.Debug|x86.ActiveCfg = Debug|Any CPU + {4C40A443-6A02-43F1-BD33-8F1A73349CDA}.Release|Any CPU.ActiveCfg = Release|Any CPU + {4C40A443-6A02-43F1-BD33-8F1A73349CDA}.Release|ARM64.ActiveCfg = Release|Any CPU + {4C40A443-6A02-43F1-BD33-8F1A73349CDA}.Release|x86.ActiveCfg = Release|Any CPU + {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Debug|x86.ActiveCfg = Debug|Any CPU + {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Release|Any CPU.ActiveCfg = Release|Any CPU + {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Release|ARM64.ActiveCfg = Release|Any CPU + {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6}.Release|x86.ActiveCfg = Release|Any CPU + {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Debug|x86.ActiveCfg = Debug|Any CPU + {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Release|ARM64.ActiveCfg = Release|Any CPU + {A472B309-3C77-4DE5-8F03-AA81938EEFB4}.Release|x86.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FB}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FB}.Debug|x86.ActiveCfg = Debug|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FB}.Release|ARM64.ActiveCfg = Release|Any CPU + {470D3417-36A4-49A4-B719-496466FA92FB}.Release|x86.ActiveCfg = Release|Any CPU + {2DFDA382-189B-45D1-94D5-3004D1AEB73C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2DFDA382-189B-45D1-94D5-3004D1AEB73C}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {2DFDA382-189B-45D1-94D5-3004D1AEB73C}.Debug|x86.ActiveCfg = Debug|Any CPU + {2DFDA382-189B-45D1-94D5-3004D1AEB73C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2DFDA382-189B-45D1-94D5-3004D1AEB73C}.Release|ARM64.ActiveCfg = Release|Any CPU + {2DFDA382-189B-45D1-94D5-3004D1AEB73C}.Release|x86.ActiveCfg = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -422,17 +553,26 @@ Global {C93C22D7-4EB2-4EC0-A7F0-FBCFB9F6F72D} = {93A23681-E9E8-4381-9EB5-8D178A0EE785} {CB26AB5B-DB85-4A59-A3AE-FA55A35D05B0} = {3A860CDD-94B9-4002-BA08-87E8822DDE50} {FF0C329F-41E8-4540-BCDB-97690911077D} = {A8B76501-496A-4011-9C37-8308A1EBDFA7} + {83C828B9-2B1E-4982-B4B7-69D173DFBB27} = {D885EE64-C1BD-44D6-84D8-1E46806298D9} + {5F1052CB-8586-49CB-8F46-427A5F6901C2} = {83C828B9-2B1E-4982-B4B7-69D173DFBB27} + {C04BBD0D-FD36-4FA4-805B-106BCCD9BC79} = {83C828B9-2B1E-4982-B4B7-69D173DFBB27} + {A1AFA75C-324E-4B79-BE13-5557E495FBBE} = {83C828B9-2B1E-4982-B4B7-69D173DFBB27} + {8A423F72-C92C-4C8E-87D8-02849FD079E2} = {83C828B9-2B1E-4982-B4B7-69D173DFBB27} + {FB0561D3-4AF8-415A-85B4-E4E9ADDC3DB2} = {D885EE64-C1BD-44D6-84D8-1E46806298D9} + {C3E39164-5120-41C6-8902-2598DD0EBCD0} = {83C828B9-2B1E-4982-B4B7-69D173DFBB27} {D320EA6C-2388-41F7-A4D1-980192665A61} = {B10B59B5-9F63-41C2-BFBB-6C7311DC4E99} {C5CC1B6F-14B1-41C1-A2F3-164B37BDCC0C} = {B10B59B5-9F63-41C2-BFBB-6C7311DC4E99} {49530738-22E7-4D2C-88FD-B20B68BF3A75} = {B10B59B5-9F63-41C2-BFBB-6C7311DC4E99} {FB31D291-AB9E-43E7-B92D-DBE33F6DD65A} = {B10B59B5-9F63-41C2-BFBB-6C7311DC4E99} {C2EFFA0A-E8EA-4AFE-8599-FC28CB7864FB} = {B10B59B5-9F63-41C2-BFBB-6C7311DC4E99} {4ED567B5-C28D-48BB-AEDC-864E2B2C7204} = {B10B59B5-9F63-41C2-BFBB-6C7311DC4E99} + {214949E0-B56C-4F23-809A-07DA4DBDF925} = {83C828B9-2B1E-4982-B4B7-69D173DFBB27} {031F17E0-BE84-42AF-B9FE-4F928CB03D1B} = {D982BD8C-2257-413B-8513-8043AB3035F3} {37533562-EC4C-4FB4-8C42-FE327D1D79BD} = {7F18EB64-C857-49C4-9380-70D3CCE6242B} {25750BF1-1502-4F65-8D69-CEA8C87D6446} = {37533562-EC4C-4FB4-8C42-FE327D1D79BD} {C2500118-FD99-49EF-B726-3E2A3B30A717} = {37533562-EC4C-4FB4-8C42-FE327D1D79BD} {1C7E0F81-1F4A-478B-80CE-4C41606DC087} = {A8B76501-496A-4011-9C37-8308A1EBDFA7} + {31DA8C15-C038-4667-89AB-74FED47D7B51} = {D885EE64-C1BD-44D6-84D8-1E46806298D9} {6AE28B59-221B-4E3D-A66C-E255B26DAC82} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} {F7056ECA-1C9C-4544-99CA-731C944651D6} = {FF0C329F-41E8-4540-BCDB-97690911077D} {0690D5F7-864F-4347-8E20-FA9903CE56EB} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} @@ -442,14 +582,16 @@ Global {0690D5F7-864F-4347-8E20-FA9903CE56EA} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} {95BF276E-3D5E-4CB2-9FEB-A77AF0C2728C} = {FF0C329F-41E8-4540-BCDB-97690911077D} {470D3417-36A4-49A4-B719-496466FA92FC} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} + {470D3417-36A4-49A4-B719-496466FA92FE} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} + {E5D27495-EE4F-4AAF-8749-A6BA848111E2} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} + {470D3417-36A4-49A4-B719-496477FA92FB} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} + {F682C5F9-854F-4B4E-A7DE-33329F51A26B} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} + {B6A1D372-264E-4F66-B7FB-7FF19587476F} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} + {4C40A443-6A02-43F1-BD33-8F1A73349CDA} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} {08A2DD62-D65A-47A8-AB9D-55D18DBC74D6} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} {A472B309-3C77-4DE5-8F03-AA81938EEFB4} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} {470D3417-36A4-49A4-B719-496466FA92FB} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} - {F682C5F9-854F-4B4E-A7DE-33329F51A26B} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} - {B6A1D372-264E-4F66-B7FB-7FF19587476F} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} - {E5D27495-EE4F-4AAF-8749-A6BA848111E2} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} - {470D3417-36A4-49A4-B719-496466FA92FE} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} - {470D3417-36A4-49A4-B719-496477FA92FB} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} + {2DFDA382-189B-45D1-94D5-3004D1AEB73C} = {1C7E0F81-1F4A-478B-80CE-4C41606DC087} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {83740BD9-AEEF-49C7-A722-D7703D3A38CB} diff --git a/WSL-README.md b/WSL-README.md new file mode 100644 index 00000000..f61bbd0d --- /dev/null +++ b/WSL-README.md @@ -0,0 +1,63 @@ +# Notes on developing under WSL + +This repository can be run under Windows or WSL at the same time. The setup.bat +and setup.sh scripts will install runtimes and virtual environments into specific +locations for the given OS. Virtual environments, for example, will live under +the /bin/windows folder for a module in Windows, and /bin/linux when under Linux. +This allows the same code and models to be worked on and tested under both +Operating Systems. + +Note that this means you will need VS Code installed in Windows and in the WSL +hosted Ubuntu (should you choose to use Ubuntu in WSL). This further requires +that each instance of VS Code has the necessary extensions installed. The +profile sync mechanism in VS Code makes this seamless. + +## Speed considerations + +To share the same files and code between WSL (Ubuntu) and Windows, you need to +have one environment point to files in another. Often this would mean that the +file system lives in Windows, and the WSL instance accesses the Windows hosted +files through the magic of WSL. + +Crossing the OS boundary will result in poor disk performance for the WSL +instance. Having a WSL instance of VS Code work on its own copy of this repo, +separate from the Windows instance, speeds disk access (eg PIP installs) +dramatically. + +## Space considerations + +If you choose to run separate copies of the code in WSL and Windows then it means +you are doubling up on the code, libraries, tools, and compiled executables. If +you have limited disk space this can be an issue. + +### To free up space + +To free up space you can use the clean.bat/clean.sh scripts under /src/SDK/Scripts. + +To actually realise the freed up space in WSL you will need to compact the VHD +in which your WSL instance resides. + +In a Windows terminal: + +```cmd +wsl --shutdown +diskpart +``` + +This will shutdown WSL and open a disk partition session in a new window. Locate +the VHD file for WSL by heading to `%LOCALAPPDATA%\Packages` and looking for a +folder similar to `CanonicalGroupLimited.Ubuntu_79rhkp1fndgsc` that contains a +file `ext4.vhd`. + +Within the session, enter the following (adjusting the `ext4.vhd` location as needed) + +```text +select vdisk file="%LOCALAPPDATA%\Packages\CanonicalGroupLimited.Ubuntu_79rhkp1fndgsc\LocalState\ext4.vhdx" +attach vdisk readonly +compact vdisk +detach vdisk +exit +``` + +Your WSL virtual hard drive should be smaller and the space that was used +reclaimed by Windows. \ No newline at end of file diff --git a/demos/Python/Face/face.py b/demos/Python/Face/face.py index c9ff6f2a..86e5e8fb 100644 --- a/demos/Python/Face/face.py +++ b/demos/Python/Face/face.py @@ -2,6 +2,7 @@ import requests from PIL import Image from options import Options +from .. import utils def main(): @@ -12,7 +13,7 @@ def main(): image_data = open(filepath, "rb").read() image = Image.open(filepath).convert("RGB") - opts.cleanDetectedDir() + utils.cleanDir(opts.detectedDir) response = requests.post(opts.endpoint("vision/face"), files = {"image": image_data}).json() diff --git a/demos/Python/Face/options.py b/demos/Python/Face/options.py index 5586f5a6..1de16671 100644 --- a/demos/Python/Face/options.py +++ b/demos/Python/Face/options.py @@ -13,16 +13,3 @@ class Options: def endpoint(self, route) -> str: return self.serverUrl + route - def cleanDetectedDir(self) -> None: - # make sure the detected directory exists - if not os.path.exists(self.detectedDir): - os.mkdir(self.detectedDir) - - # delete all the files in the output directory - filelist = os.listdir(self.detectedDir) - for filename in filelist: - try: - filepath = os.path.join(self.detectedDir, filename) - os.remove(filepath) - except: - pass diff --git a/demos/Python/ObjectDetect/detect.py b/demos/Python/ObjectDetect/detect.py index c8bbbdde..78b95368 100644 --- a/demos/Python/ObjectDetect/detect.py +++ b/demos/Python/ObjectDetect/detect.py @@ -2,6 +2,7 @@ import requests from PIL import Image, ImageDraw +from .. import utils from options import Options def main(): @@ -9,7 +10,7 @@ def main(): minConfidence = 0.4 opts = Options() - opts.cleanDetectedDir() + utils.cleanDir(opts.detectedDir) imagedir = opts.imageDir + "/Objects" diff --git a/demos/Python/ObjectDetect/options.py b/demos/Python/ObjectDetect/options.py index d2fbb2ae..10773f78 100644 --- a/demos/Python/ObjectDetect/options.py +++ b/demos/Python/ObjectDetect/options.py @@ -11,25 +11,16 @@ class Options: # works for you rtsp_user = os.getenv("CPAI_RTSP_DEMO_USER", "User") rtsp_pass = os.getenv("CPAI_RTSP_DEMO_PASS", "Pass") - rtsp_IP = os.getenv("CPAI_RTSP_DEMO_IP", "10.0.0.204") + rtsp_IP = os.getenv("CPAI_RTSP_DEMO_IP", "10.0.0.198") rtsp_url = f"rtsp://{rtsp_user}:{rtsp_pass}@{rtsp_IP}/live" + email_server = os.getenv("CPAI_EMAIL_DEMO_SERVER", "smtp.gmail.com") + email_port = int(os.getenv("CPAI_EMAIL_DEMO_PORT", 587)) + email_acct = os.getenv("CPAI_EMAIL_DEMO_FROM", "me@gmail.com") + email_pwd = os.getenv("CPAI_EMAIL_DEMO_PWD", "password123") + # names of directories of interest detectedDir = "detected" def endpoint(self, route) -> str: return self.server_url + route - - def cleanDetectedDir(self) -> None: - # make sure the detected directory exists - if not os.path.exists(self.detectedDir): - os.mkdir(self.detectedDir) - - # delete all the files in the output directory - filelist = os.listdir(self.detectedDir) - for filename in filelist: - try: - filepath = os.path.join(self.detectedDir, filename) - os.remove(filepath) - except: - pass diff --git a/demos/Python/ObjectDetect/racoon_detect.py b/demos/Python/ObjectDetect/racoon_detect.py new file mode 100644 index 00000000..536952a6 --- /dev/null +++ b/demos/Python/ObjectDetect/racoon_detect.py @@ -0,0 +1,201 @@ +import base64 +from datetime import datetime +import io +from io import BytesIO +from typing import List, Tuple +import requests + +import imutils +from imutils.video import VideoStream +import cv2 + +import numpy as np +from PIL import Image, ImageDraw, ImageFont + +import smtplib +from email.mime.text import MIMEText +from email.mime.multipart import MIMEMultipart + +from options import Options +opts = Options() + +recipient = "alerts@acme_security.com" # Sucker who deals with the reports +model_name = "critters" # Model we'll use +intruders = [ "racoon", "squirrel", "skunk" ] # Things we care about +secs_between_checks = 5 # Min secs between sending a frame to CodeProject.AI +secs_between_alerts = 300 # Min secs between sending alerts (don't spam!) + +# Set to any time that's over an hour old +last_check_time = datetime(1999, 11, 15, 0, 0, 0) +last_alert_time = datetime(1999, 11, 15, 0, 0, 0) + +def do_detection(image: Image, intruders: List[str]) -> "(Image, str)": + + """ + Performs object detection on an image and returns an image with the objects + that were detected outlined, as well as a de-duped list of objects detected. + If nothing detected, image and list of objects are both returned as None + """ + + # Convert to format suitable for a POST + buf = io.BytesIO() + image.save(buf, format='JPEG') + buf.seek(0) + + # Better to have a session object created once at the start and closed at + # the end, but we keep the code simpler here for demo purposes + with requests.Session() as session: + response = session.post(opts.endpoint("vision/custom/" + model_name), + files={"image": ('image.png', buf, 'image/png') }, + data={"min_confidence": 0.5}).json() + + # Get the predictions (but be careful of a null return) + predictions = response["predictions"] + + detected_list = [] + + if predictions: + # Draw each bounding box that was returned by the AI engine + # font = ImageFont.load_default() + font_size = 25 + padding = 5 + font = ImageFont.truetype("arial.ttf", font_size) + draw = ImageDraw.Draw(image) + + for object in predictions: + label = object["label"] + conf = object["confidence"] + y_max = int(object["y_max"]) + y_min = int(object["y_min"]) + x_max = int(object["x_max"]) + x_min = int(object["x_min"]) + + draw.rectangle([(x_min, y_min), (x_max, y_max)], outline="red", width=5) + draw.rectangle([(x_min, y_min - 2*padding - font_size), + (x_max, y_min)], fill="red", outline="red") + draw.text((x_min + padding, y_min - padding - font_size), + f"{label} {round(conf*100.0,0)}%", font=font) + + # We're looking for specific objects. Build a deduped list + # containing only the objects we're interested in. + if label in intruders and not label in detected_list: + detected_list.append(label) + + # All done. Did we find any objects we were interested in? + if detected_list: + return image, ', '.join(detected_list) + + return None, None + + +def report_intruder(image: Image, objects_detected: str, recipient: str) -> None: + + # time since we last sent an alert + global last_alert_time + seconds_since_last_alert = (datetime.now() - last_alert_time).total_seconds() + + # Only send an alert if there's been sufficient time since the last alert + if seconds_since_last_alert > secs_between_alerts: + + # Simple console output + timestamp = datetime.now().strftime("%d %b %Y %I:%M:%S %p") + print(f"{timestamp} Intruder or intruders detected: {objects_detected}") + + # Send an email alert as well + with BytesIO() as buffered: + image.save(buffered, format="JPEG") + img_dataB64_bytes : bytes = base64.b64encode(buffered.getvalue()) + img_dataB64 : str = img_dataB64_bytes.decode("ascii"); + + message_html = "

An intruder was detected. Please review this image

" \ + + f"" + message_text = "A intruder was detected. We're all doomed!" + + send_email(opts.email_acct, opts.email_pwd, recipient, "Intruder Alert!", + message_text, message_html) + + # Could send an SMS or a tweet. Whatever takes your fancy... + + last_alert_time = datetime.now() + + +def send_email(sender, pwd, recipient, subject, message_text, message_html): + + msg = MIMEMultipart('alternative') + msg['From'] = sender + msg['To'] = recipient + msg['Subject'] = subject + + text = MIMEText(message_text, 'plain') + html = MIMEText(message_html, 'html') + msg.attach(text) + msg.attach(html) + + try: + server = smtplib.SMTP(opts.email_server, opts.email_port) + server.ehlo() + server.starttls() + server.ehlo() + server.login(sender, pwd) + server.send_message(msg, sender, [recipient]) + except Exception as ex: + print(f"Error sending email: {ex}") + finally: + server.quit() + + +def main(): + + # Open the RTSP stream + vs = VideoStream(opts.rtsp_url).start() + + while True: + + # Grab a frame at a time + frame = vs.read() + if frame is None: + continue + + objects_detected = "" + + # Let's not send an alert *every* time we see an object, otherwise we'll + # get an endless stream of emails, fractions of a second apart + global last_check_time + seconds_since_last_check = (datetime.now() - last_check_time).total_seconds() + + if seconds_since_last_check >= secs_between_checks: + # You may need to convert the colour space. + # image: Image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + image: Image = Image.fromarray(frame) + (image, objects_detected) = do_detection(image, intruders) + + # Replace the webcam feed's frame with our image that include object + # bounding boxes + if image: + frame = np.asarray(image) + + last_check_time = datetime.now() + + # Resize and display the frame on the screen + if frame is not None: + frame = imutils.resize(frame, width = 1200) + cv2.imshow('WyzeCam', frame) + + if objects_detected: + # Shrink the image to reduce email size + frame = imutils.resize(frame, width = 600) + image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + report_intruder(image, objects_detected, recipient) + + # Wait for the user to hit 'q' for quit + key = cv2.waitKey(1) & 0xFF + if key == ord('q'): + break + + # Clean up and we're outta here. + cv2.destroyAllWindows() + vs.stop() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/demos/Python/utils.py b/demos/Python/utils.py new file mode 100644 index 00000000..1c35daa2 --- /dev/null +++ b/demos/Python/utils.py @@ -0,0 +1,15 @@ +import os + +def cleanDir(dir: str) -> None: + # make sure the detected directory exists + if not os.path.exists(dir): + os.mkdir(dir) + + # delete all the files in the output directory + filelist = os.listdir(dir) + for filename in filelist: + try: + filepath = os.path.join(dir, filename) + os.remove(filepath) + except: + pass diff --git a/demos/TestData/Objects/parrot.jpg b/demos/TestData/Objects/parrot.jpg new file mode 100644 index 00000000..1ced7b37 Binary files /dev/null and b/demos/TestData/Objects/parrot.jpg differ diff --git a/demos/TestData/Objects/pexels-photo-414476.jpeg b/demos/TestData/Objects/pexels-photo-414476.jpeg new file mode 100644 index 00000000..b44719a8 Binary files /dev/null and b/demos/TestData/Objects/pexels-photo-414476.jpeg differ diff --git a/demos/TestData/Objects/quail.JPG b/demos/TestData/Objects/quail.JPG new file mode 100644 index 00000000..4c2cf2f1 Binary files /dev/null and b/demos/TestData/Objects/quail.JPG differ diff --git a/demos/dotNet/CodeProject.AI.Explorer/CodeProject.AI.ApiClient.cs b/demos/dotNet/CodeProject.AI.Explorer/CodeProject.AI.ApiClient.cs index a10a7ec4..abf18a05 100644 --- a/demos/dotNet/CodeProject.AI.Explorer/CodeProject.AI.ApiClient.cs +++ b/demos/dotNet/CodeProject.AI.Explorer/CodeProject.AI.ApiClient.cs @@ -76,10 +76,11 @@ public async Task Ping() try { var content = new MultipartFormDataContent(); - using var httpResponse = await Client.GetAsync("status/ping"); + using var httpResponse = await Client.GetAsync("status/ping").ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); - response = await httpResponse.Content.ReadFromJsonAsync(); + response = await httpResponse.Content.ReadFromJsonAsync() + .ConfigureAwait(false); response ??= new ErrorResponse("No response from the server"); } catch (Exception ex) @@ -112,10 +113,12 @@ public async Task DetectFaces(string image_path) request.Add(content, "image", Path.GetFileName(image_path)); - using var httpResponse = await Client.PostAsync("vision/face", request); + using var httpResponse = await Client.PostAsync("vision/face", request) + .ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); - response = await httpResponse.Content.ReadFromJsonAsync(); + response = await httpResponse.Content.ReadFromJsonAsync() + .ConfigureAwait(false); response ??= new ErrorResponse("No response from the server"); } @@ -154,10 +157,12 @@ public async Task MatchFaces(string image1FileName, string image2F request.Add(new StreamContent(image1_data), "image1", Path.GetFileName(image1FileName)); request.Add(new StreamContent(image2_data), "image2", Path.GetFileName(image2FileName)); - using var httpResponse = await Client.PostAsync("vision/face/match", request); + using var httpResponse = await Client.PostAsync("vision/face/match", request) + .ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); - response = await httpResponse.Content.ReadFromJsonAsync(); + response = await httpResponse.Content.ReadFromJsonAsync() + .ConfigureAwait(false); response ??= new ErrorResponse("No response from the server"); } catch (Exception ex) @@ -188,10 +193,12 @@ public async Task DetectScene(string image_path) request.Add(new StreamContent(image_data), "image", Path.GetFileName(image_path)); - using var httpResponse = await Client.PostAsync("vision/scene", request); + using var httpResponse = await Client.PostAsync("vision/scene", request) + .ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); - response = await httpResponse.Content.ReadFromJsonAsync(); + response = await httpResponse.Content.ReadFromJsonAsync() + .ConfigureAwait(false); response ??= new ErrorResponse("No response from the server"); } catch (Exception ex) @@ -223,12 +230,16 @@ public async Task DetectObjects(string image_path) request.Add(new StreamContent(image_data), "image", Path.GetFileName(image_path)); - using var httpResponse = await Client.PostAsync("vision/detection", request); + using var httpResponse = await Client.PostAsync("vision/detection", request) + .ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); - response = await httpResponse.Content.ReadFromJsonAsync(); - //var json = await httpResponse.Content.ReadAsStringAsync(); - //response = System.Text.Json.JsonSerializer.Deserialize(json); + response = await httpResponse.Content.ReadFromJsonAsync() + .ConfigureAwait(false); + + // var json = await httpResponse.Content.ReadAsStringAsync(); + // response = System.Text.Json.JsonSerializer.Deserialize(json); + response ??= new ErrorResponse("No response from the server"); } catch (Exception ex) @@ -260,11 +271,12 @@ public async Task CustomDetectObjects(string modelName, string ima request.Add(new StreamContent(image_data), "image", Path.GetFileName(image_path)); - using var httpResponse = await Client.PostAsync("vision/custom", request); + using var httpResponse = await Client.PostAsync("vision/custom", request) + .ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); // response = await httpResponse.Content.ReadFromJsonAsync(); - var json = await httpResponse.Content.ReadAsStringAsync(); + var json = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false); response = System.Text.Json.JsonSerializer.Deserialize(json); response ??= new ErrorResponse("No response from the server"); } @@ -309,11 +321,12 @@ public async Task RegisterFace(string userId, Path.GetFileName(filename)); } - using var httpResponse = await Client.PostAsync("vision/face/register", request); + using var httpResponse = await Client.PostAsync("vision/face/register", request) + .ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); // response = await httpResponse.Content.ReadFromJsonAsync(); - var json = await httpResponse.Content.ReadAsStringAsync(); + var json = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false); response = System.Text.Json.JsonSerializer.Deserialize(json); response ??= new ErrorResponse("No response from the server"); } @@ -351,10 +364,12 @@ public async Task RecognizeFace(string? filename, if (minConfidence.HasValue) request.Add(new StringContent(minConfidence.Value.ToString()), "min_confidence"); - using var httpResponse = await Client.PostAsync("vision/face/recognize", request); + using var httpResponse = await Client.PostAsync("vision/face/recognize", request) + .ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); - response = await httpResponse.Content.ReadFromJsonAsync(); + response = await httpResponse.Content.ReadFromJsonAsync() + .ConfigureAwait(false); response ??= new ErrorResponse("No response from the server"); } catch (Exception ex) @@ -378,10 +393,12 @@ public async Task DeleteRegisteredFace(string userId) { new StringContent(userId), "userid" } }; - using var httpResponse = await Client.PostAsync("vision/face/delete", request); + using var httpResponse = await Client.PostAsync("vision/face/delete", request) + .ConfigureAwait(false); httpResponse.EnsureSuccessStatusCode(); - response = await httpResponse.Content.ReadFromJsonAsync(); + response = await httpResponse.Content.ReadFromJsonAsync() + .ConfigureAwait(false); response ??= new ErrorResponse("No response from the server"); } catch (Exception ex) @@ -398,12 +415,14 @@ public async Task ListRegisteredFaces() try { #pragma warning disable CS8625 // Cannot convert null literal to non-nullable reference type. - using var httpResponse = await Client.PostAsync("vision/face/list", null); + using var httpResponse = await Client.PostAsync("vision/face/list", null) + .ConfigureAwait(false); #pragma warning restore CS8625 // Cannot convert null literal to non-nullable reference type. httpResponse.EnsureSuccessStatusCode(); - response = await httpResponse.Content.ReadFromJsonAsync(); + response = await httpResponse.Content.ReadFromJsonAsync() + .ConfigureAwait(false); response ??= new ErrorResponse("No response from the server"); } catch (Exception ex) diff --git a/demos/dotNet/CodeProject.AI.Explorer/Form1.cs b/demos/dotNet/CodeProject.AI.Explorer/Form1.cs index 6700acf3..42680c3f 100644 --- a/demos/dotNet/CodeProject.AI.Explorer/Form1.cs +++ b/demos/dotNet/CodeProject.AI.Explorer/Form1.cs @@ -8,8 +8,7 @@ using System.Windows.Forms; using CodeProject.AI.API.Common; - -using SkiaSharp; +using CodeProject.AI.SDK; using SkiaSharp.Views.Desktop; namespace CodeProject.AI.Demo.Explorer @@ -39,7 +38,7 @@ public Form1() } private async void Ping(object sender, EventArgs e) { - var response = await _AIService.Ping(); + var response = await _AIService.Ping().ConfigureAwait(false); if (_serverLive != response.success) { _serverLive = response.success; @@ -153,7 +152,7 @@ private async void DetectFaceBtn_Click(object sender, EventArgs e) return; } - var result = await _AIService.DetectFaces(_imageFileName); + var result = await _AIService.DetectFaces(_imageFileName).ConfigureAwait(false); if (result is DetectFacesResponse detectedFaces) { Image? image = GetImage(_imageFileName); @@ -210,7 +209,8 @@ private async void CompareFacesBtn_Click(object sender, EventArgs e) return; } - var result = await _AIService.MatchFaces(_faceImageFileName1, _faceImageFileName2); + var result = await _AIService.MatchFaces(_faceImageFileName1, _faceImageFileName2) + .ConfigureAwait(false); if (result is MatchFacesResponse matchedFaces) { detectionResult.Text = $"Similarity: {Math.Round(matchedFaces.similarity, 4)}"; @@ -233,7 +233,7 @@ private async void DetectSceneBtn_Click(object sender, EventArgs e) return; } - var result = await _AIService.DetectScene(_imageFileName); + var result = await _AIService.DetectScene(_imageFileName).ConfigureAwait(false); if (result is DetectSceneResponse detectedScene) { var image = GetImage(_imageFileName); @@ -260,7 +260,7 @@ private async void DetectObjectsBtn_Click(object sender, EventArgs e) } Stopwatch stopwatch = Stopwatch.StartNew(); - var result = await _AIService.DetectObjects(_imageFileName); + ResponseBase result = await _AIService.DetectObjects(_imageFileName).ConfigureAwait(false); stopwatch.Stop(); if (result is DetectObjectsResponse detectedObjects) @@ -336,7 +336,8 @@ private async void RegisterFaceBtn_Click(object sender, EventArgs e) return; } - var result = await _AIService.RegisterFace(UserIdTextbox.Text, _registerFileNames); + var result = await _AIService.RegisterFace(UserIdTextbox.Text, _registerFileNames) + .ConfigureAwait(false); if (result is RegisterFaceResponse registeredFace) SetStatus("Registration complete"); else @@ -359,7 +360,7 @@ private async void RecognizeFaceBtn_Click(object sender, EventArgs e) if(float.TryParse(MinConfidence.Text, out float parsedConfidence)) minConfidence = parsedConfidence; - var result = await _AIService.RecognizeFace(filename, minConfidence); + var result = await _AIService.RecognizeFace(filename, minConfidence).ConfigureAwait(false); if (result is RecognizeFacesResponse recognizeFace) { try @@ -411,7 +412,7 @@ private async void ListFacesBtn_Click(object sender, EventArgs e) ClearResults(); SetStatus("Listing known faces"); - var result = await _AIService.ListRegisteredFaces(); + var result = await _AIService.ListRegisteredFaces().ConfigureAwait(false); if (result is ListRegisteredFacesResponse registeredFaces) { if (result?.success ?? false) @@ -456,7 +457,8 @@ private async void DeleteFaceBtn_Click(object sender, EventArgs e) ClearResults(); SetStatus("Deleting registered face"); - var result = await _AIService.DeleteRegisteredFace(UserIdTextbox.Text); + var result = await _AIService.DeleteRegisteredFace(UserIdTextbox.Text) + .ConfigureAwait(false); if (result?.success ?? false) SetStatus("Completed Face deletion"); else @@ -513,7 +515,7 @@ private async void RunBenchmark(bool useCustom) : _AIService.DetectObjects(_benchmarkFileName); taskList.Add(task); } - await Task.WhenAll(taskList); + await Task.WhenAll(taskList).ConfigureAwait(false); sw.Stop(); BenchmarkResults.Text = $"Benchmark: {Math.Round(nIterations / (sw.ElapsedMilliseconds/ 1000.0), 2)} FPS"; @@ -562,7 +564,7 @@ private void ProcessError(ResponseBase? result) /// SkiSharp handles more image formats than System.Drawing. private Image? GetImage(string filename) { - var skiaImage = SKImage.FromEncodedData(filename); + var skiaImage = ImageUtils.GetImage(filename); if (skiaImage is null) return null; diff --git a/global.json b/global.json index 9a7235f0..e702da27 100644 --- a/global.json +++ b/global.json @@ -1,6 +1,17 @@ { "sdk": { - //"version": "7.0.202", + // BE CAREFUL: If you pin the version, then + // 1. The SDK you pin it to must be available on every supported platform + // That means Windows 10+, macOS (Intel and arm64), Ubuntu 20.04+ and RPi64. + // 2. You have to update the docs / article to ensure it clear to developers + // which SDK they need + // 3. There should be a check in the dev setup script ensuring that users have + // this version, or higher, installed on their dev system. + // + // If you can't do 1 and 2 then you cannot pin the version. + // + // "version": "7.0.203", + //"rollForward": "latestFeature", "allowPrerelease": false } diff --git a/src/API/Common/BackendRouteMap.cs b/src/API/Common/BackendRouteMap.cs index 5e4fdefa..ff4d059e 100644 --- a/src/API/Common/BackendRouteMap.cs +++ b/src/API/Common/BackendRouteMap.cs @@ -148,7 +148,7 @@ public class RouteQueueInfo public string QueueName { get; private set; } /// - /// Gets the command identifier which distiguishes the backend operations to perform based + /// Gets the command identifier which distinguishes the backend operations to perform based /// on the frontend endpoint. /// public string Command { get; private set; } diff --git a/src/API/Common/ModuleCollection.cs b/src/API/Common/ModuleCollection.cs index 1883bc0c..55200bb8 100644 --- a/src/API/Common/ModuleCollection.cs +++ b/src/API/Common/ModuleCollection.cs @@ -25,9 +25,9 @@ public ModuleCollection() : base(StringComparer.OrdinalIgnoreCase) { } } /// - /// Holds information on which versions of a server a given module is compatible with. + /// Holds information on a given release of a module. /// - public class VersionCompatibility + public class ModuleRelease { /// /// The version of a module @@ -35,14 +35,24 @@ public class VersionCompatibility public string? ModuleVersion { get; set; } /// - /// The Inclusive range of server versions for which this module vresion can be installed on + /// The Inclusive range of server versions for which this module version can be installed on /// public string[]? ServerVersionRange { get; set; } /// /// The date this version was released /// - public string? ReleaseDate { get; set; } + public string? ReleaseDate { get; set; } + + /// + /// Any notes associated with this release + /// + public string? ReleaseNotes { get; set; } + + /// + /// Gets or sets a string indicating how important this update is. + /// + public string? Importance { get; set; } } /// @@ -66,6 +76,12 @@ public class ModuleBase /// public string[] Platforms { get; set; } = Array.Empty(); + /// + /// Gets or sets the number of MB of memory needed for this module to perform operations. + /// If null, then no checks done. + /// + public int? RequiredMb { get; set; } + /// /// Gets or sets the Description for the module. /// @@ -80,7 +96,16 @@ public class ModuleBase /// Gets or sets the list of module versions and the server version that matches /// each of these versions. /// - public VersionCompatibility[] VersionCompatibililty { get; set; } = Array.Empty(); + public ModuleRelease[] ModuleReleases { get; set; } = Array.Empty(); + + /// + /// Gets or sets the legacy structure containing a list of module versions and the server + /// version that matches each of these versions. This name is deprecated and is only here so + /// we can read old modulesettings files. Once read these values will be transferred to + /// ModuleReleases. Deprecated not just because it was a bad name, but also becauase it was + /// a badly *spelled* name. + /// + public ModuleRelease[] VersionCompatibililty { get; set; } = Array.Empty(); /// /// Gets or sets the current version. @@ -261,8 +286,16 @@ public string SettingsSummary { get { + // Allow the module path to wrap. + // var path = ModulePath.Replace("\\", "\\"); + // path = path.Replace("/", "/"); + + // or not... + var path = ModulePath; + var summary = new StringBuilder(); summary.AppendLine($"Module '{Name}' (ID: {ModuleId})"); + summary.AppendLine($"Module Path: {path}"); summary.AppendLine($"AutoStart: {AutoStart}"); summary.AppendLine($"Queue: {Queue}"); summary.AppendLine($"Platforms: {string.Join(',', Platforms)}"); @@ -313,7 +346,7 @@ public static class ModuleConfigExtensions /// 'pre-install' them in situations like a Docker image. We pre-install modules in a /// separate folder than the downloaded and installed modules in order to avoid conflicts /// (in Docker) when a user maps a local folder to the modules dir. Doing this to the 'pre - /// insalled' dir would make the contents (the preinstalled modules) disappear. + /// installed' dir would make the contents (the preinstalled modules) disappear. public static void Initialise(this ModuleConfig module, string moduleId, string modulesPath, string preInstalledModulesPath) { @@ -333,11 +366,16 @@ public static void Initialise(this ModuleConfig module, string moduleId, string if (module.LogVerbosity == LogVerbosity.Unknown) module.LogVerbosity = LogVerbosity.Info; - // Transfer old legacy value to new replacement property if it exists, and no new value was set + // Transfer old legacy value to new replacement property if it exists, and no new value + // was set if (module.Activate is not null && module.AutoStart is null) module.AutoStart = module.Activate; + if ((module.VersionCompatibililty?.Length ?? 0) > 0 && (module.ModuleReleases?.Length ?? 0) == 0) + module!.ModuleReleases = module!.VersionCompatibililty!; - module.Activate = null; + // No longer used. These properties are still here to allow us to load legacy config files. + module.Activate = null; + module.VersionCompatibililty = Array.Empty(); } /// @@ -358,20 +396,20 @@ public static bool Available(this ModuleConfig module, string platform, string? bool versionOK = string.IsNullOrWhiteSpace(currentServerVersion); if (!versionOK) { - if (module.VersionCompatibililty?.Any() ?? false) + if (module.ModuleReleases?.Any() ?? false) { - foreach (VersionCompatibility version in module.VersionCompatibililty) + foreach (ModuleRelease release in module.ModuleReleases) { - if (version.ServerVersionRange is null || version.ServerVersionRange.Length < 2) + if (release.ServerVersionRange is null || release.ServerVersionRange.Length < 2) continue; - string? minServerVersion = version.ServerVersionRange[0]; - string? maxServerVersion = version.ServerVersionRange[1]; + string? minServerVersion = release.ServerVersionRange[0]; + string? maxServerVersion = release.ServerVersionRange[1]; if (string.IsNullOrEmpty(minServerVersion)) minServerVersion = "0.0"; if (string.IsNullOrEmpty(maxServerVersion)) maxServerVersion = currentServerVersion; - if (version.ModuleVersion == module.Version && + if (release.ModuleVersion == module.Version && VersionInfo.Compare(minServerVersion, currentServerVersion) <= 0 && VersionInfo.Compare(maxServerVersion, currentServerVersion) >= 0) { @@ -380,8 +418,10 @@ public static bool Available(this ModuleConfig module, string platform, string? } } } - else // old modules will not have VersionCompatibility, but we are backward compatible + else // old modules will not have ModuleReleases, but we are backward compatible + { versionOK = true; + } } // Second check: Is this module available on this platform? @@ -456,6 +496,49 @@ public static void UpsertSetting(this ModuleConfig module, string name, } } + /* Not possible until we have this in the same project as ModuleSettings (which is in + FrontEnd) or we refactor this extension class into a true class that is initialised + with a ref to the ModuleSettings class. + + /// + /// Gets a text summary of the settings for this module. + /// + public static string SettingsSummary(this ModuleConfig module, ModuleSettings moduleSeettings, + string? currentModulePath = null) + { + var summary = new StringBuilder(); + summary.AppendLine($"Module '{module.Name}' (ID: {module.ModuleId})"); + summary.AppendLine($"AutoStart: {module.AutoStart}"); + summary.AppendLine($"Queue: {module.Queue}"); + summary.AppendLine($"Platforms: {string.Join(',', module.Platforms)}"); + summary.AppendLine($"GPU: Support {((module.SupportGPU == true)? "enabled" : "disabled")}"); + summary.AppendLine($"Parallelism: {module.Parallelism}"); + summary.AppendLine($"Accelerator: {module.AcceleratorDeviceName}"); + summary.AppendLine($"Half Precis.: {module.HalfPrecision}"); + summary.AppendLine($"Runtime: {module.Runtime}"); + summary.AppendLine($"Runtime Loc: {module.RuntimeLocation}"); + summary.AppendLine($"FilePath: {module.FilePath}"); + summary.AppendLine($"Pre installed: {module.PreInstalled}"); + //summary.AppendLine($"Module Dir: {module.ModulePath}"); + summary.AppendLine($"Start pause: {module.PostStartPauseSecs} sec"); + summary.AppendLine($"LogVerbosity: {module.LogVerbosity}"); + summary.AppendLine($"Valid: {module.Valid}"); + summary.AppendLine($"Environment Variables"); + + if (module.EnvironmentVariables is not null) + { + int maxLength = module.EnvironmentVariables.Max(x => x.Key.ToString().Length); + foreach (var envVar in module.EnvironmentVariables) + { + var value = moduleSettings.ExpandOption(envVar.Value, currentModulePath); + summary.AppendLine($" {envVar.Key.PadRight(maxLength)} = {envVar.Value}"); + } + } + + return summary.ToString().Trim(); + } + */ + /// /// Sets or updates a value in the settings Json structure. /// @@ -469,63 +552,89 @@ public static bool UpsertSettings(JsonObject? settings, string moduleId, if (settings is null) return false; - if (!settings.ContainsKey("Modules") || settings["Modules"] is null) - settings["Modules"] = new JsonObject(); - - JsonObject? allModules = settings["Modules"] as JsonObject; - allModules ??= new JsonObject(); - - if (!allModules.ContainsKey(moduleId) || allModules[moduleId] is null) - allModules[moduleId] = new JsonObject(); - - var moduleSettings = (JsonObject)allModules[moduleId]!; - - // Handle pre-defined global values first - if (name.EqualsIgnoreCase("Activate") || name.EqualsIgnoreCase("AutoStart")) - { - moduleSettings["AutoStart"] = value?.ToLower() == "true"; - } - else if (name.EqualsIgnoreCase("SupportGPU")) - { - moduleSettings["SupportGPU"] = value?.ToLower() == "true"; - } - else if (name.EqualsIgnoreCase("Parallelism")) + // Lots of try/catch since this has been a point of issue and it's good to narrow it down + try { - if (int.TryParse(value, out int parallelism)) - moduleSettings["Parallelism"] = parallelism; + if (!settings.ContainsKey("Modules") || settings["Modules"] is null) + settings["Modules"] = new JsonObject(); } - else if (name.EqualsIgnoreCase("UseHalfPrecision")) + catch (Exception e) { - moduleSettings["HalfPrecision"] = value; + Console.WriteLine($"Failed to create root modules object in settings: {e.Message}"); + return false; } - else if (name.EqualsIgnoreCase("AcceleratorDeviceName")) + + JsonObject? allModules = null; + try { - moduleSettings["AcceleratorDeviceName"] = value; + allModules = settings["Modules"] as JsonObject; + allModules ??= new JsonObject(); + + if (!allModules.ContainsKey(moduleId) || allModules[moduleId] is null) + allModules[moduleId] = new JsonObject(); } - else if (name.EqualsIgnoreCase("LogVerbosity")) + catch (Exception e) { - if (Enum.TryParse(value, out LogVerbosity verbosity)) - moduleSettings["LogVerbosity"] = verbosity.ToString(); + Console.WriteLine($"Failed to create module object in modules collection: {e.Message}"); + return false; } - else if (name.EqualsIgnoreCase("PostStartPauseSecs")) + + try { - if (int.TryParse(value, out int pauseSec)) - moduleSettings["PostStartPauseSecs"] = pauseSec; + var moduleSettings = (JsonObject)allModules[moduleId]!; + + // Handle pre-defined global values first + if (name.EqualsIgnoreCase("Activate") || name.EqualsIgnoreCase("AutoStart")) + { + moduleSettings["AutoStart"] = value?.ToLower() == "true"; + } + else if (name.EqualsIgnoreCase("SupportGPU")) + { + moduleSettings["SupportGPU"] = value?.ToLower() == "true"; + } + else if (name.EqualsIgnoreCase("Parallelism")) + { + if (int.TryParse(value, out int parallelism)) + moduleSettings["Parallelism"] = parallelism; + } + else if (name.EqualsIgnoreCase("UseHalfPrecision")) + { + moduleSettings["HalfPrecision"] = value; + } + else if (name.EqualsIgnoreCase("AcceleratorDeviceName")) + { + moduleSettings["AcceleratorDeviceName"] = value; + } + else if (name.EqualsIgnoreCase("LogVerbosity")) + { + if (Enum.TryParse(value, out LogVerbosity verbosity)) + moduleSettings["LogVerbosity"] = verbosity.ToString(); + } + else if (name.EqualsIgnoreCase("PostStartPauseSecs")) + { + if (int.TryParse(value, out int pauseSec)) + moduleSettings["PostStartPauseSecs"] = pauseSec; + } + else + { + if (moduleSettings["EnvironmentVariables"] is null) + moduleSettings["EnvironmentVariables"] = new JsonObject(); + + var environmentVars = (JsonObject)moduleSettings["EnvironmentVariables"]!; + environmentVars[name.ToUpper()] = value; + } + + // Clean up legacy values + if (moduleSettings["Activate"] is not null && moduleSettings["AutoStart"] is null) + moduleSettings["AutoStart"] = moduleSettings["Activate"]; + moduleSettings.Remove("Activate"); } - else + catch (Exception e) { - if (moduleSettings["EnvironmentVariables"] is null) - moduleSettings["EnvironmentVariables"] = new JsonObject(); - - var environmentVars = (JsonObject)moduleSettings["EnvironmentVariables"]!; - environmentVars[name.ToUpper()] = value; + Console.WriteLine($"Failed to update module setting: {e.Message}"); + return false; } - // Clean up legacy values - if (moduleSettings["Activate"] is not null && moduleSettings["AutoStart"] is null) - moduleSettings["AutoStart"] = moduleSettings["Activate"]; - moduleSettings.Remove("Activate"); - return true; } @@ -570,7 +679,7 @@ public static void AddEnvironmentVariables(this ModuleConfig module, if (string.IsNullOrWhiteSpace(dir)) return new JsonObject(); - string content = await File.ReadAllTextAsync(path); + string content = await File.ReadAllTextAsync(path).ConfigureAwait(false); // var settings = JsonSerializer.Deserialize>(content); var settings = JsonSerializer.Deserialize(content); @@ -588,7 +697,7 @@ public static void AddEnvironmentVariables(this ModuleConfig module, /// This set of module settings /// The path to save /// true on success; false otherwise - public async static Task SaveSettings(JsonObject? settings, string path) + public async static Task SaveSettingsAsync(JsonObject? settings, string path) { if (settings is null || string.IsNullOrWhiteSpace(path)) return false; @@ -605,7 +714,7 @@ public async static Task SaveSettings(JsonObject? settings, string path) var options = new JsonSerializerOptions { WriteIndented = true }; string configJson = JsonSerializer.Serialize(settings, options); - await File.WriteAllTextAsync(path, configJson); + await File.WriteAllTextAsync(path, configJson).ConfigureAwait(false); return true; } @@ -659,7 +768,7 @@ public async static Task SaveAllSettings(this ModuleCollection modules, st var options = new JsonSerializerOptions { WriteIndented = true }; string configJson = JsonSerializer.Serialize(modules, options); - await File.WriteAllTextAsync(path, configJson); + await File.WriteAllTextAsync(path, configJson).ConfigureAwait(false); return true; } @@ -691,22 +800,22 @@ public async static Task CreateModulesListing(this ModuleCollection module var moduleList = modules.Values .OrderBy(m => m.ModuleId) .Select(m => new { - ModuleId = m.ModuleId, - Name = m.Name, - Version = m.Version, - Description = m.Description, - Platforms = m.Platforms, - Runtime = m.Runtime, - VersionCompatibililty = m.VersionCompatibililty, - License = m.License, - LicenseUrl = m.LicenseUrl, - Downloads = 0 + ModuleId = m.ModuleId, + Name = m.Name, + Version = m.Version, + Description = m.Description, + Platforms = m.Platforms, + Runtime = m.Runtime, + ModuleReleases = m.ModuleReleases, + License = m.License, + LicenseUrl = m.LicenseUrl, + Downloads = 0 }); var options = new JsonSerializerOptions { WriteIndented = true }; string configJson = JsonSerializer.Serialize(moduleList, options); - await File.WriteAllTextAsync(path, configJson); + await File.WriteAllTextAsync(path, configJson).ConfigureAwait(false); return true; } diff --git a/src/API/Common/ModuleDescription.cs b/src/API/Common/ModuleDescription.cs index 5183c621..f4dafd59 100644 --- a/src/API/Common/ModuleDescription.cs +++ b/src/API/Common/ModuleDescription.cs @@ -115,16 +115,11 @@ public class ModuleDescription : ModuleBase public int Downloads { get; set; } /// - /// Gets or sets the version of this module currently installed. This value is not - /// deserialised, but instead must be set by the server. + /// Gets or sets the ModuleRelease of the latest release of this module that is compatible + /// with the current server. This value is not deserialised, but instead must be set by the + /// server. /// - public string? CurrentInstalledVersion { get; set; } - - /// - /// Gets or sets the latest version of this module that is compatible with the current - /// server. This value is not deserialised, but instead must be set by the server. - /// - public string? LatestCompatibleVersion { get; set; } + public ModuleRelease? LatestRelease { get; set; } /// /// Gets or sets the release date of the latest compatible version of this module @@ -184,9 +179,9 @@ public static void Initialise(this ModuleDescription module, string currentServe SetLatestCompatibleVersion(module, currentServerVersion); // Set the status of all entries based on availability on this platform - module.Status = string.IsNullOrWhiteSpace(module.LatestCompatibleVersion) + module.Status = string.IsNullOrWhiteSpace(module?.LatestRelease?.ModuleVersion) || !module.IsAvailable(SystemInfo.Platform, currentServerVersion) - ? ModuleStatusType.NotAvailable : ModuleStatusType.Available; + ? ModuleStatusType.NotAvailable : ModuleStatusType.Available; } /// @@ -203,13 +198,14 @@ public static bool IsAvailable(this ModuleDescription module, string platform, s return false; // First check: Is there a version of this module that's compatible with the current server? - if (serverVersion is not null && string.IsNullOrWhiteSpace(module.LatestCompatibleVersion)) - SetLatestCompatibleVersion(module, serverVersion); + if (serverVersion is not null && string.IsNullOrWhiteSpace(module?.LatestRelease?.ModuleVersion)) + SetLatestCompatibleVersion(module!, serverVersion); - bool versionOK = serverVersion is null || !string.IsNullOrWhiteSpace(module.LatestCompatibleVersion); + bool versionOK = serverVersion is null || + !string.IsNullOrWhiteSpace(module?.LatestRelease?.ModuleVersion); // Second check: Is this module available on this platform? - return module.Valid && versionOK && + return module!.Valid && versionOK && ( module.Platforms!.Any(p => p.EqualsIgnoreCase("all")) || module.Platforms!.Any(p => p.EqualsIgnoreCase(platform)) ); } @@ -219,33 +215,34 @@ private static void SetLatestCompatibleVersion(ModuleDescription module, string // HACK: To be removed after CPAI 2.1 is released. The Versions array wasn't added to // the downloadable list of modules until server version 2.1. All modules pre-server // 2.1 are compatible with server 2.1+, so - if (module.VersionCompatibililty is null || module.VersionCompatibililty.Count() == 0) + if ((module.ModuleReleases?.Length ?? 0) == 0) { - module.LatestCompatibleVersion = module.Version; - module.CompatibleVersionReleaseDate = "2022-03-20"; + module.LatestRelease = new ModuleRelease() + { + ModuleVersion = module.Version, + ReleaseDate = "2022-03-20" + }; + return; } - else + + foreach (ModuleRelease release in module!.ModuleReleases!) { - foreach (VersionCompatibility version in module.VersionCompatibililty) - { - if (version.ServerVersionRange is null || version.ServerVersionRange.Length < 2) - continue; + if (release.ServerVersionRange is null || release.ServerVersionRange.Length < 2) + continue; - string? minServerVersion = version.ServerVersionRange[0]; - string? maxServerVersion = version.ServerVersionRange[1]; + string? minServerVersion = release.ServerVersionRange[0]; + string? maxServerVersion = release.ServerVersionRange[1]; - if (string.IsNullOrEmpty(minServerVersion)) minServerVersion = "0.0"; - if (string.IsNullOrEmpty(maxServerVersion)) maxServerVersion = currentServerVersion; + if (string.IsNullOrEmpty(minServerVersion)) minServerVersion = "0.0"; + if (string.IsNullOrEmpty(maxServerVersion)) maxServerVersion = currentServerVersion; - if (VersionInfo.Compare(minServerVersion, currentServerVersion) <= 0 && - VersionInfo.Compare(maxServerVersion, currentServerVersion) >= 0) + if (VersionInfo.Compare(minServerVersion, currentServerVersion) <= 0 && + VersionInfo.Compare(maxServerVersion, currentServerVersion) >= 0) + { + if (module.LatestRelease is null || + VersionInfo.Compare(module.LatestRelease.ModuleVersion, release.ModuleVersion) <= 0) { - if (module.LatestCompatibleVersion is null || - VersionInfo.Compare(module.LatestCompatibleVersion, version.ModuleVersion) <= 0) - { - module.LatestCompatibleVersion = version.ModuleVersion; - module.CompatibleVersionReleaseDate = version.ReleaseDate; - } + module.LatestRelease = release; } } } diff --git a/src/API/Common/ProcessStatus.cs b/src/API/Common/ProcessStatus.cs index 8ef11b45..1fd1e80b 100644 --- a/src/API/Common/ProcessStatus.cs +++ b/src/API/Common/ProcessStatus.cs @@ -88,6 +88,11 @@ public class ProcessStatus /// public string? ModuleId { get; set; } + /// + /// Gets or sets the name of the queue this module is processing + /// + public string? Queue { get; set; } + /// /// Gets or sets the module name /// @@ -155,8 +160,15 @@ public string Summary StringBuilder summary = new StringBuilder(); // summary.AppendLine($"Process '{Name}' (ID: {ModuleId})"); - summary.AppendLine($"Started: {Started?.ToLocalTime().ToString("dd-MMM-yyyy h:mm:ss tt")}"); - summary.AppendLine($"LastSeen: {LastSeen?.ToLocalTime().ToString("dd-MMM-yyyy h:mm:ss tt")}"); + string timezone = TimeZoneInfo.Local.StandardName; + string format = "dd MMM yyyy h:mm:ss tt"; + string started = (Started is null) ? "Not seen" + : Started.Value.ToLocalTime().ToString(format) + " " + timezone; + string lastSeen = (LastSeen is null) ? "Not seen" + : LastSeen.Value.ToLocalTime().ToString(format) + " " + timezone; + + summary.AppendLine($"Started: {started}"); + summary.AppendLine($"LastSeen: {lastSeen}"); summary.AppendLine($"Status: {Status}"); summary.AppendLine($"Processed: {Processed}"); summary.AppendLine($"Provider: {ExecutionProvider}"); diff --git a/src/API/Server/Backend/CommandDispatcher.cs b/src/API/Server/Backend/CommandDispatcher.cs index ef73c46c..84754dab 100644 --- a/src/API/Server/Backend/CommandDispatcher.cs +++ b/src/API/Server/Backend/CommandDispatcher.cs @@ -66,7 +66,8 @@ public async Task QueueRequest(string queueName, RequestPayload payload, CancellationToken token = default) { var response = await _queueServices.SendRequestAsync(queueName.ToLower(), - new BackendRequest(payload), token); + new BackendRequest(payload), token) + .ConfigureAwait(false); return response; } } diff --git a/src/API/Server/Backend/QueueProcessingExtensions.cs b/src/API/Server/Backend/QueueProcessingExtensions.cs index 5f85b900..fa399781 100644 --- a/src/API/Server/Backend/QueueProcessingExtensions.cs +++ b/src/API/Server/Backend/QueueProcessingExtensions.cs @@ -11,6 +11,7 @@ public static IServiceCollection AddQueueProcessing(this IServiceCollection serv services.AddSingleton(); services.AddSingleton(); services.AddSingleton(); + services.AddSingleton(); return services; } } diff --git a/src/API/Server/Backend/QueueServices.cs b/src/API/Server/Backend/QueueServices.cs index 096fe79d..4b3db04a 100644 --- a/src/API/Server/Backend/QueueServices.cs +++ b/src/API/Server/Backend/QueueServices.cs @@ -77,19 +77,21 @@ public async ValueTask SendRequestAsync(string queueName, } // setup a request timeout. - using var cancelationSource = new CancellationTokenSource(_settings.ResponseTimeout); - var timeoutToken = cancelationSource.Token; + using var cancellationSource = new CancellationTokenSource(_settings.ResponseTimeout); + var timeoutToken = cancellationSource.Token; + using var linkedCTS = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutToken); + CancellationToken theToken = linkedCTS.Token; + using var ctr = theToken.Register(() => { completion.TrySetCanceled(); }); try { - CancellationToken theToken = - CancellationTokenSource.CreateLinkedTokenSource(token, timeoutToken).Token; - // setup the timeout callback. - theToken.Register(() => { completion.TrySetCanceled(); }); - try { + // Add the request onto the queue (by writing to it) + // the request will be pulled from the queue by the backend module's + // request for a command. The backend module will send the command result + // back and this will be used to set the TaskCompletionResult result. await queue.Writer.WriteAsync(request, theToken).ConfigureAwait(false); _logger.LogTrace($"Client request '{request.reqtype}' in queue '{queueName}' (#reqid {request.reqid})"); } @@ -101,19 +103,20 @@ public async ValueTask SendRequestAsync(string queueName, return new BackendErrorResponse($"The request in '{queueName}' was canceled by caller (#reqid {request.reqid})"); } - var jsonString = await completion.Task; + // Await the result of the TaskCompletion for the command that was put on the queue + var jsonString = await completion.Task.ConfigureAwait(false); if (jsonString is null) return new BackendErrorResponse($"null json returned from backend (#reqid {request.reqid})"); - return jsonString; - } + return jsonString; + } catch (OperationCanceledException) { if (timeoutToken.IsCancellationRequested) return new BackendErrorResponse($"The request timed out (#reqid {request.reqid})"); - return new BackendErrorResponse($"The request was canceled by caller (#reqid {request.reqid})"); + return new BackendErrorResponse($"The request was canceled by caller (#reqid {request.reqid})"); } catch (JsonException) { @@ -143,6 +146,10 @@ private Channel GetOrCreateQueue(string queueName) /// public bool SetResult(string req_id, string? responseString) { + // Gets the TaskCompletionSource associated with the command + // and set the TaskCompletionSource's result to the response + // returned from the backend module, thus completing the frontend + // request. if (!_pendingResponses.TryGetValue(req_id, out TaskCompletionSource? completion)) return false; @@ -194,9 +201,10 @@ public bool SetResult(string req_id, string? responseString) do { // setup a request timeout. - using var cancellationSource = new CancellationTokenSource(_settings.CommandDequeueTimeout); - var timeoutToken = cancellationSource.Token; - var theToken = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutToken).Token; + using var cancellationSource = new CancellationTokenSource(_settings.CommandDequeueTimeout); + var timeoutToken = cancellationSource.Token; + using var linkedCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutToken); + var theToken = linkedCancellationTokenSource.Token; // NOTE FOR VS CODE users: In debug, you may want to uncheck "All Exceptions" under the // breakpoints section (the bottom section) of the Run and Debug tab. diff --git a/src/API/Server/Backend/TriggerTaskRunner.cs b/src/API/Server/Backend/TriggerTaskRunner.cs new file mode 100644 index 00000000..d7408207 --- /dev/null +++ b/src/API/Server/Backend/TriggerTaskRunner.cs @@ -0,0 +1,168 @@ + +using System; +using System.Diagnostics; +using System.IO; +using CodeProject.AI.SDK.Common; +using Microsoft.Extensions.Logging; + +namespace CodeProject.AI.API.Server.Backend +{ + /// + /// For managing the life cycle of commands triggered by inference. + /// + public class TriggerTaskRunner + { + private ILogger _logger; + + /// + /// Constructor + /// + /// The logger + public TriggerTaskRunner(ILogger logger) + { + _logger = logger; + } + + /// + /// Runs a task command + /// + /// the task + /// The working directory of this process + /// A Task + public bool RunCommand(TriggerTask task, string workingDir = "") + { + if (task is null || string.IsNullOrWhiteSpace(task.Command)) + return false; + + Process? process = null; + try + { + var procStartInfo = string.IsNullOrEmpty(task.Args) + ? new ProcessStartInfo(task.Command) + { + UseShellExecute = false, + WorkingDirectory = workingDir, + CreateNoWindow = false, + RedirectStandardOutput = true, + RedirectStandardError = true + } + : new ProcessStartInfo(task.Command, task.Args) + { + UseShellExecute = false, + WorkingDirectory = workingDir, + CreateNoWindow = false, + RedirectStandardOutput = true, + RedirectStandardError = true + }; + process = new Process + { + StartInfo = procStartInfo, + EnableRaisingEvents = true + }; + process.OutputDataReceived += SendOutputToLog; + process.ErrorDataReceived += SendErrorToLog; + + // Start the process + _logger.LogTrace($"Starting {Text.ShrinkPath(task.Command, 50)} {Text.ShrinkPath(task.Args, 50)}"); + + if (process.Start()) + { + process.BeginOutputReadLine(); + process.BeginErrorReadLine(); + return true; + } + + return false; + } + catch (Exception ex) + { + _logger.LogError(ex, $"Error trying to start {task.Command} in {workingDir}"); + _logger.LogError(ex.Message); + return false; + } + } + + private void SendOutputToLog(object sender, DataReceivedEventArgs data) + { + string? message = data?.Data; + + string filename = string.Empty; + if (sender is Process process) + { + filename = Path.GetFileName(process.StartInfo.Arguments.Replace("\"", "")); + if (string.IsNullOrWhiteSpace(filename)) + filename = Path.GetFileName(process.StartInfo.FileName.Replace("\"", "")); + + if (process.HasExited && string.IsNullOrEmpty(message)) + return; + } + + if (string.IsNullOrWhiteSpace(message)) + return; + + if (!string.IsNullOrEmpty(filename)) + filename += ": "; + + var testString = message.ToLower(); + + // We're picking up messages written to the console so let's provide a little help for + // messages that are trying to get themselves categorised properly. + // Optimisation: We probably should order these by info/trace/debug/warn/error/crit, but + // for sanity we'll keep them in order of anxiety. + if (testString.StartsWith("crit: ")) + _logger.LogCritical(filename + message.Substring("crit: ".Length)); + else if (testString.StartsWith("critical: ")) + _logger.LogCritical(filename + message.Substring("critical: ".Length)); + else if (testString.StartsWith("err: ")) + _logger.LogError(filename + message.Substring("err: ".Length)); + else if (testString.StartsWith("error: ")) + _logger.LogError(filename + message.Substring("error: ".Length)); + else if (testString.StartsWith("warn: ")) + _logger.LogWarning(filename + message.Substring("warn: ".Length)); + else if (testString.StartsWith("warning: ")) + _logger.LogWarning(filename + message.Substring("warning: ".Length)); + else if (testString.StartsWith("info: ")) + _logger.LogInformation(filename + message.Substring("info: ".Length)); + else if (testString.StartsWith("information: ")) + _logger.LogInformation(filename + message.Substring("information: ".Length)); + else if (testString.StartsWith("dbg: ")) + _logger.LogDebug(filename + message.Substring("dbg: ".Length)); + else if (testString.StartsWith("debug: ")) + _logger.LogDebug(filename + message.Substring("debug: ".Length)); + else if (testString.StartsWith("trc: ")) + _logger.LogTrace(filename + message.Substring("trc: ".Length)); + else if (testString.StartsWith("trace: ")) + _logger.LogTrace(filename + message.Substring("trace: ".Length)); + else + _logger.LogInformation(filename + message); + } + + private void SendErrorToLog(object sender, DataReceivedEventArgs data) + { + string? error = data?.Data; + + string filename = string.Empty; + if (sender is Process process) + { + filename = Path.GetFileName(process.StartInfo.Arguments.Replace("\"", "")); + if (string.IsNullOrWhiteSpace(filename)) + filename = Path.GetFileName(process.StartInfo.FileName.Replace("\"", "")); + + // This same logic (and output) is sent to stdout so no need to duplicate here. + // if (process.HasExited && string.IsNullOrEmpty(error)) + // error = "has exited"; + } + + if (string.IsNullOrWhiteSpace(error)) + return; + + if (!string.IsNullOrEmpty(filename)) + filename += ": "; + + if (string.IsNullOrEmpty(error)) + error = "No error provided"; + + _logger.LogError(filename + error); + } + } +} \ No newline at end of file diff --git a/src/API/Server/Backend/TriggersConfig.cs b/src/API/Server/Backend/TriggersConfig.cs new file mode 100644 index 00000000..4ca5590a --- /dev/null +++ b/src/API/Server/Backend/TriggersConfig.cs @@ -0,0 +1,266 @@ +using System.Runtime.Serialization; + +namespace CodeProject.AI.API.Server.Backend +{ + /// + /// Defines the comparison to be checked between a a reported and expected property value + /// + public enum TriggerComparison + { + /// + /// Value observed is equal to the value in the trigger + /// + [EnumMember(Value = "Equals")] + Equals, + + /// + /// Value observed is less than the value in the trigger + /// + [EnumMember(Value = "LessThan")] + LessThan, + + /// + /// Value observed is less than or equal to the value in the trigger + /// + [EnumMember(Value = "LessThanOrEquals")] + LessThanOrEquals, + + /// + /// Value observed is greater than the value in the trigger + /// + [EnumMember(Value = "Equals")] + GreaterThan, + + /// + /// Value observed is greater than or equal to the value in the trigger + /// + [EnumMember(Value = "GreaterThanOrEquals")] + GreaterThanOrEquals, + + /// + /// Value observed is not equal to the value in the trigger + /// + [EnumMember(Value = "NotEquals")] + NotEquals + } + + /// + /// The type of task to be run when a trigger is triggered + /// + public enum TriggerTaskType + { + /// + /// Execute a command locally + /// + Command + } + + /// + /// The type of task to be run when a trigger is triggered + /// + public class TriggerTask + { + /// + /// Execute a shell comment locally + /// + public TriggerTaskType Type { get; set; } + + /// + /// The command string + /// + public string? Command { get; set; } + + /// + /// The command arguments + /// + public string? Args { get; set; } + } + + /// + /// The set of tasks for each platform that should be executed + /// + public class PlatformTasks + { + /// + /// The task for Windows x64 + /// + public TriggerTask? Windows { get; set; } + + /// + /// The task for Windows arm64 + /// + public TriggerTask? WindowsArm64 { get; set; } + + /// + /// The task for Linux x64 + /// + public TriggerTask? Linux { get; set; } + + /// + /// The task for Linux arm64 + /// + public TriggerTask? LinuxArm64 { get; set; } + + /// + /// The task for macOS x64 + /// + public TriggerTask? MacOS { get; set; } + + /// + /// The task for macOS arm64 + /// + public TriggerTask? MacOSArm64 { get; set; } + } + + /// + /// Defines a trigger event + /// + public class Trigger + { + /// + /// Gets or sets the queue to be watched for this trigger + /// + public string? Queue { get; set; } + + /// + /// Gets or sets the name of the collection in the module's response that holds the + /// collection of predictions. If this is null then it's assumed the response is just + /// label/confidence for a single prediction. If there is a collection of predictions, + /// each prediction will hold its own label/confidence + /// + public string? PredictionsCollectionName { get; set; } + + /// + /// Gets or sets the property to be tested for this trigger + /// + public string? PropertyName { get; set; } + + /// + /// Gets or sets the value to be checked for this trigger to be triggered + /// + public object? PropertyValue { get; set; } + + /// + /// Gets or sets how the value of this trigger is to be checked against the observed value + /// + public TriggerComparison? PropertyComparison { get; set; } + + /// + /// The value of the inference confidence to test for this trigger. A value of null means + /// do not check confidence + /// + public float? Confidence { get; set; } + + /// + /// Gets or sets how the value of confidence (if provided) is to be checked against the + /// observed confidence + /// + public TriggerComparison? ConfidenceComparison { get; set; } + + /// + /// Gets or sets the task for each platform to be run when triggered + /// + public PlatformTasks? PlatformTasks { get; set; } + + /// + /// Gets the task for the given platform + /// + /// The name of the platform + /// A task + public TriggerTask? GetTask(string platform) => platform.ToLower() switch + { + "windows" => PlatformTasks?.Windows, + "windowsarm64" => PlatformTasks?.WindowsArm64, + "linux" => PlatformTasks?.Linux, + "linuxarm64" => PlatformTasks?.LinuxArm64, + "macos" => PlatformTasks?.MacOS, + "macosarm64" => PlatformTasks?.MacOSArm64, + _ => null + }; + + /// + /// Tests whether the given property value satisfies the trigger + /// + /// The property value + /// The confidence of this value + /// true if the test passes; false otherwise + public bool Test(string? value, float confidence) + { + // Test must pass the confidence test first. + if (Confidence is not null) + { + if (ConfidenceComparison == TriggerComparison.Equals) + if (confidence != Confidence) + return false; + + if (ConfidenceComparison == TriggerComparison.GreaterThan) + if (confidence <= Confidence) + return false; + + if (ConfidenceComparison == TriggerComparison.GreaterThanOrEquals) + if (confidence < Confidence) + return false; + + if (ConfidenceComparison == TriggerComparison.LessThan) + if (confidence >= Confidence) + return false; + + if (ConfidenceComparison == TriggerComparison.LessThanOrEquals) + if (confidence > Confidence) + return false; + + if (ConfidenceComparison == TriggerComparison.NotEquals) + if (confidence == Confidence) + return false; + } + + string? propertyValue = PropertyValue?.ToString(); + + if (value is null || propertyValue is null) + { + if (PropertyComparison == TriggerComparison.Equals) + return value is null && propertyValue is null; + + if (PropertyComparison == TriggerComparison.NotEquals) + return value is null && propertyValue is not null || + value is not null && propertyValue is null; + + return false; + } + + if (PropertyComparison == TriggerComparison.Equals) + return value == propertyValue; + + if (PropertyComparison == TriggerComparison.GreaterThan) + return value.CompareTo(propertyValue) > 0; + + if (PropertyComparison == TriggerComparison.GreaterThanOrEquals) + return value.CompareTo(propertyValue) >= 0; + + if (PropertyComparison == TriggerComparison.LessThan) + return value.CompareTo(propertyValue) < 0; + + if (PropertyComparison == TriggerComparison.LessThanOrEquals) + return value.CompareTo(propertyValue) <= 0; + + if (PropertyComparison == TriggerComparison.NotEquals) + return value != propertyValue; + + return false; + } + } + + /// + /// Triggers config values. + /// + public class TriggersConfig + { + public static string TriggersCfgFilename = "triggers.json"; + public static string TriggersCfgSection = "triggersSection"; + + /// + /// Gets or sets the version info + /// + public Trigger[]? Triggers { get; set; } + } +} diff --git a/src/API/Server/FrontEnd/AiModules/AiModuleInstaller.cs b/src/API/Server/FrontEnd/AiModules/AiModuleInstaller.cs index 2d4d4436..836b0ba6 100644 --- a/src/API/Server/FrontEnd/AiModules/AiModuleInstaller.cs +++ b/src/API/Server/FrontEnd/AiModules/AiModuleInstaller.cs @@ -91,7 +91,7 @@ public async Task InstallInitialModules() // } // Just because we need at least one await - await Task.Delay(1); + await Task.Delay(1).ConfigureAwait(false); // Add the initial installed tasks here // eg var result = await InstallModuleAsync("TextSummary", "1.1"); @@ -106,11 +106,12 @@ public async Task InstallInitialModules() { try { - _logger.LogInformation($"** Installing initial module {idVersion.Key}."); + _logger.LogInformation($"** Installing initial module {idVersion.Key}."); - (bool success, string error) = await DownloadAndInstallModuleAsync(idVersion.Key, idVersion.Value); + var downloadTask = DownloadAndInstallModuleAsync(idVersion.Key, idVersion.Value); + (bool success, string error) = await downloadTask.ConfigureAwait(false); if (!success) - _logger.LogError($"Unable to install {idVersion.Key} ({idVersion.Value}): " + error); + _logger.LogInformation($"Unable to install {idVersion.Key}: " + error); } catch (Exception ex) { @@ -125,7 +126,7 @@ public async Task InstallInitialModules() { try { - _logger.LogInformation($"** Installing initial module {idVersion.Key}."); + _logger.LogInformation($"** Installing initial module {idVersion.Key}."); installTasks.Add(DownloadAndInstallModuleAsync(idVersion.Key, idVersion.Value)); } catch (Exception ex) @@ -138,7 +139,7 @@ public async Task InstallInitialModules() { try { - var result = await task; + var result = await task.ConfigureAwait(false); if (!result.success) _logger.LogError(result.message ?? "Unknown Error Installing Initial Modules."); } @@ -190,19 +191,17 @@ public static ModuleDescription ModuleDescriptionFromModuleConfig(ModuleConfig m { var moduleDescription = new ModuleDescription() { - ModuleId = module.ModuleId, - Name = module.Name, - Version = module.Version, + ModuleId = module.ModuleId, + Name = module.Name, + Version = module.Version, - Description = module.Description, - Platforms = module.Platforms, - License = module.License, - LicenseUrl = module.LicenseUrl, + Description = module.Description, + Platforms = module.Platforms, + License = module.License, + LicenseUrl = module.LicenseUrl, - PreInstalled = module.PreInstalled, - - VersionCompatibililty = module.VersionCompatibililty, - CurrentInstalledVersion = module.Version + PreInstalled = module.PreInstalled, + ModuleReleases = module.ModuleReleases }; // Set initial properties. Most importantly it sets the status. @@ -238,7 +237,8 @@ public async Task> GetDownloadableModules() _lastDownloadableModuleCheckTime = DateTime.Now; // Download the list of downloadable modules as a JSON string, then deserialise - string downloads = await _packageDownloader.DownloadTextFileAsync(_moduleOptions.ModuleListUrl!); + string downloads = await _packageDownloader.DownloadTextFileAsync(_moduleOptions.ModuleListUrl!) + .ConfigureAwait(false); var options = new JsonSerializerOptions { @@ -256,8 +256,10 @@ public async Task> GetDownloadableModules() { int basUrlLength = _moduleOptions.ModuleListUrl!.Length - "modules.json".Length; string baseDownloadUrl = _moduleOptions.ModuleListUrl![..basUrlLength]; + if (baseDownloadUrl == "file://") + baseDownloadUrl = _moduleSettings.DownloadedModulePackagesPath; foreach (var module in moduleList) - module.DownloadUrl = baseDownloadUrl + $"downloads/{module.ModuleId}-{module.Version}.zip"; + module.DownloadUrl = baseDownloadUrl + $"\\{module.ModuleId}-{module.Version}.zip"; } string currentServerVersion = _versionConfig.VersionInfo?.Version ?? string.Empty; @@ -281,7 +283,6 @@ public async Task> GetDownloadableModules() if (downloadableModule is not null) { downloadableModule.Status = ModuleStatusType.Installed; - downloadableModule.CurrentInstalledVersion = module.Version; if (VersionInfo.Compare(downloadableModule.Version, module.Version) > 0) downloadableModule.Status = ModuleStatusType.UpdateAvailable; @@ -358,9 +359,13 @@ public async Task> GetDownloadableModules() /// /// The module to install /// The version of the module to install + /// Whether or not to ignore the download cache. If true, the module + /// will always be freshly downloaded /// A Tuple containing true for success; false otherwise, and a string containing /// the error message if the operation was not successful. - public async Task<(bool, string)> DownloadAndInstallModuleAsync(string moduleId, string version) + public async Task<(bool, string)> DownloadAndInstallModuleAsync(string moduleId, + string version, + bool noCache = false) { // if (SystemInfo.RuntimeEnvironment == RuntimeEnvironment.Development) // return (false, $"Can't install modules when running in Development"); @@ -370,7 +375,7 @@ public async Task> GetDownloadableModules() _logger.LogInformation($"Preparing to install module '{moduleId}'"); - ModuleDescription? moduleDownload = await GetCurrentModuleDescription(moduleId); + ModuleDescription? moduleDownload = await GetCurrentModuleDescription(moduleId).ConfigureAwait(false); if (moduleDownload is null) return (false, $"Unable to find the download info for '{moduleId}'"); @@ -391,10 +396,10 @@ public async Task> GetDownloadableModules() if (module is not null && module.Valid) { if (VersionInfo.Compare(moduleDownload.Version, module.Version) <= 0) - return (false, $"The same, or a newer version, of Module {moduleId} is already installed"); + return (false, $"{moduleId} is already installed"); // If current module is a lower version then uninstall first - (bool success, string uninstallError) = await UninstallModuleAsync(moduleId); + (bool success, string uninstallError) = await UninstallModuleAsync(moduleId).ConfigureAwait(false); if (!success) return (false, $"Unable to uninstall older version of {moduleId}: {uninstallError}"); } @@ -411,15 +416,15 @@ public async Task> GetDownloadableModules() bool downloaded = false; string error = string.Empty; - if (System.IO.File.Exists(downloadPath)) + if (!noCache && System.IO.File.Exists(downloadPath)) { _logger.LogInformation($" (using cached download for '{moduleId}')"); downloaded = true; } else { - (downloaded, error) = await _packageDownloader.DownloadFileAsync(moduleDownload.DownloadUrl!, - downloadPath); + (downloaded, error) = await _packageDownloader.DownloadFileAsync(moduleDownload.DownloadUrl!, downloadPath) + .ConfigureAwait(false); } if (downloaded && !System.IO.File.Exists(downloadPath)) @@ -435,7 +440,7 @@ public async Task> GetDownloadableModules() return (false, $"Unable to download module '{moduleId}' from {moduleDownload.DownloadUrl}. Error: {error}"); } - return await InstallModuleAsync(downloadPath, moduleId); + return await InstallModuleAsync(downloadPath, moduleId).ConfigureAwait(false); } /// @@ -464,7 +469,7 @@ public async Task> GetDownloadableModules() } else { - moduleDownload = await GetCurrentModuleDescription(moduleId); + moduleDownload = await GetCurrentModuleDescription(moduleId).ConfigureAwait(false); if (moduleDownload is not null) { moduleDownload.Status = ModuleStatusType.Unpacking; @@ -505,7 +510,8 @@ to work. Doing it manually means we are assuming a modulesettings.json always string? settingsModuleId = null; try { - string content = await File.ReadAllTextAsync(Path.Combine(moduleDir, "modulesettings.json")); + string content = await File.ReadAllTextAsync(Path.Combine(moduleDir, "modulesettings.json")) + .ConfigureAwait(false); var documentOptions = new JsonDocumentOptions { @@ -572,7 +578,7 @@ to work. Doing it manually means we are assuming a modulesettings.json always moduleDownload.Status = ModuleStatusType.Installing; ProcessStartInfo procStartInfo; - if (SystemInfo.OperatingSystem.EqualsIgnoreCase("Windows")) + if (SystemInfo.IsWindows) procStartInfo = new ProcessStartInfo(_moduleSettings.ModuleInstallerScriptPath); else procStartInfo = new ProcessStartInfo("bash", _moduleSettings.ModuleInstallerScriptPath); @@ -609,18 +615,20 @@ to work. Doing it manually means we are assuming a modulesettings.json always // Wait for the Process to complete before exiting the method or else the // Process may be killed at some random time when the process variable is GC. - using var cts = new CancellationTokenSource(TimeSpan.FromMinutes(10)); - await process.WaitForExitAsync(cts.Token); + using var cts = new CancellationTokenSource(_moduleOptions.ModuleInstallTimeout); + await process.WaitForExitAsync(cts.Token).ConfigureAwait(false); _logger.LogInformation($"Installer exited with code {process.ExitCode}"); - await logWriter.WriteLineAsync($"Installer exited with code {process.ExitCode}"); + await logWriter.WriteLineAsync($"Installer exited with code {process.ExitCode}") + .ConfigureAwait(false); } else { if (moduleDownload is not null) moduleDownload.Status = ModuleStatusType.FailedInstall; - await logWriter.WriteLineAsync($"Unable to start the Module installer for '{moduleId}'"); + await logWriter.WriteLineAsync($"Unable to start the Module installer for '{moduleId}'") + .ConfigureAwait(false); return (false, $"Unable to start the Module installer for '{moduleId}'"); } } @@ -632,7 +640,8 @@ to work. Doing it manually means we are assuming a modulesettings.json always if (moduleDownload is not null) moduleDownload.Status = ModuleStatusType.FailedInstall; - await logWriter.WriteLineAsync($"Timed out attempting to install Module '{moduleId}'"); + await logWriter.WriteLineAsync($"Timed out attempting to install Module '{moduleId}'") + .ConfigureAwait(false); return (false, $"Timed out attempting to install Module '{moduleId}' (${e.Message})"); } catch (Exception e) @@ -640,7 +649,8 @@ to work. Doing it manually means we are assuming a modulesettings.json always if (moduleDownload is not null) moduleDownload.Status = ModuleStatusType.FailedInstall; - await logWriter.WriteLineAsync($"Unable to install Module '{moduleId}' (${e.Message})"); + await logWriter.WriteLineAsync($"Unable to install Module '{moduleId}' (${e.Message})") + .ConfigureAwait(false); return (false, $"Unable to install Module '{moduleId}' (${e.Message})"); } @@ -673,7 +683,7 @@ to work. Doing it manually means we are assuming a modulesettings.json always if (!Directory.Exists(moduleDir)) return (false, $"Unable to find {moduleId}'s install directory {moduleDir ?? "null"}"); - ModuleDescription? moduleDownload = await GetCurrentModuleDescription(moduleId); + ModuleDescription? moduleDownload = await GetCurrentModuleDescription(moduleId).ConfigureAwait(false); // If the module to be uninstalled is no longer a download, create an entry and add it // to the download list so at least we can provide updates on it disappearing. @@ -692,12 +702,12 @@ to work. Doing it manually means we are assuming a modulesettings.json always // Console.WriteLine("Setting ModuleStatusType.Uninstalling"); moduleDownload.Status = ModuleStatusType.Uninstalling; - if (!await _moduleProcessService.KillProcess(module)) + if (!await _moduleProcessService.KillProcess(module).ConfigureAwait(false)) { Console.WriteLine("Setting ModuleStatusType.Unknown"); RefreshDownloadableModuleList(); - moduleDownload = await GetCurrentModuleDescription(moduleId); + moduleDownload = await GetCurrentModuleDescription(moduleId).ConfigureAwait(false); if (moduleDownload is not null) moduleDownload.Status = ModuleStatusType.Unknown; @@ -711,7 +721,7 @@ to work. Doing it manually means we are assuming a modulesettings.json always Directory.Delete(moduleDir, true); Console.WriteLine("Setting newly deleted module to ModuleStatusType.Available"); - moduleDownload = await GetCurrentModuleDescription(moduleId); + moduleDownload = await GetCurrentModuleDescription(moduleId).ConfigureAwait(false); if (moduleDownload is not null) moduleDownload.Status = ModuleStatusType.Available; } @@ -719,13 +729,13 @@ to work. Doing it manually means we are assuming a modulesettings.json always { _logger.LogError($"Unable to delete install folder for {moduleId} ({e.Message})"); _logger.LogInformation("Will wait a moment: sometimes a delete just needs time to complete"); - await Task.Delay(3); + await Task.Delay(3).ConfigureAwait(false); } if (Directory.Exists(moduleDir)) // shouldn't actually be possible to get here if delete failed { Console.WriteLine("Setting ModuleStatusType.UninstallFailed"); - moduleDownload = await GetCurrentModuleDescription(moduleId); + moduleDownload = await GetCurrentModuleDescription(moduleId).ConfigureAwait(false); if (moduleDownload is not null) moduleDownload.Status = ModuleStatusType.UninstallFailed; @@ -735,7 +745,7 @@ to work. Doing it manually means we are assuming a modulesettings.json always } if (_moduleCollection.ContainsKey(moduleId) && - !_moduleCollection.TryRemove(moduleId, out ModuleConfig _)) + !_moduleCollection.TryRemove(moduleId, out _)) { if (moduleDownload is not null) moduleDownload.Status = ModuleStatusType.UninstallFailed; @@ -762,7 +772,7 @@ to work. Doing it manually means we are assuming a modulesettings.json always /// A ModuleDescription object, or null if not found. private async Task GetCurrentModuleDescription(string moduleId) { - List moduleList = await GetDownloadableModules(); + List moduleList = await GetDownloadableModules().ConfigureAwait(false); return moduleList.FirstOrDefault(m => m.ModuleId?.EqualsIgnoreCase(moduleId) == true); } @@ -772,7 +782,10 @@ private void SendOutputToLog(TextWriter log, object sender, DataReceivedEventArg if (!string.IsNullOrWhiteSpace(message)) { - log.WriteLine(Text.StripXTermColors(message)); + message = Text.StripSpinnerChars(message); + + string timestamp = DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss: "); + log.WriteLine(timestamp + Text.StripXTermColors(message)); string? moduleId = GetModuleIdFromEventSender(sender); if (moduleId is not null) @@ -788,7 +801,10 @@ private void SendErrorToLog(TextWriter log, object sender, DataReceivedEventArgs if (!string.IsNullOrWhiteSpace(message)) { - log.WriteLine(Text.StripXTermColors(message)); + message = Text.StripSpinnerChars(message); + + string timestamp = DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss: "); + log.WriteLine(timestamp + Text.StripXTermColors(message)); string? moduleId = GetModuleIdFromEventSender(sender); if (moduleId is not null) @@ -797,7 +813,7 @@ private void SendErrorToLog(TextWriter log, object sender, DataReceivedEventArgs _logger.LogError(message); } } - + /// /// Gets a module ID from an event /// @@ -828,7 +844,7 @@ private async void ModuleInstallComplete(object? sender, EventArgs e) return; } - ModuleDescription? moduleDownload = await GetCurrentModuleDescription(moduleId); + ModuleDescription? moduleDownload = await GetCurrentModuleDescription(moduleId).ConfigureAwait(false); if (moduleDownload is null) { _logger.LogError("Unable to find recently installed module in downloadable module list"); @@ -862,7 +878,7 @@ private async void ModuleInstallComplete(object? sender, EventArgs e) _moduleCollection.TryAdd(moduleId, moduleConfig); // StartProcess does more than just start the module wo we need to call it // even if the module's AutoStart is false. - if (await _moduleProcessService.StartProcess(moduleConfig)) + if (await _moduleProcessService.StartProcess(moduleConfig).ConfigureAwait(false)) _logger.LogInformation($"Module {moduleId} started successfully."); else if(!(moduleConfig.AutoStart ?? false)) _logger.LogInformation($"Module {moduleId} not configured to AutoStart."); diff --git a/src/API/Server/FrontEnd/AiModules/AiModuleRunner.cs b/src/API/Server/FrontEnd/AiModules/AiModuleRunner.cs index 4f4c70cf..3d23e003 100644 --- a/src/API/Server/FrontEnd/AiModules/AiModuleRunner.cs +++ b/src/API/Server/FrontEnd/AiModules/AiModuleRunner.cs @@ -130,6 +130,7 @@ public AiModuleRunner(IOptions versionOptions, { ModuleId = module.ModuleId, Name = module.Name, + Queue = module.Queue, Status = status }); } @@ -149,7 +150,7 @@ public AiModuleRunner(IOptions versionOptions, public async override Task StartAsync(CancellationToken cancellationToken) { _logger.LogTrace("ModuleRunner Start"); - await base.StartAsync(cancellationToken); + await base.StartAsync(cancellationToken).ConfigureAwait(false); } /// @@ -161,15 +162,15 @@ public override async Task StopAsync(CancellationToken cancellationToken) foreach (var module in _modules.Values) tasks.Add(KillProcess(module)); - await Task.WhenAll(tasks); + await Task.WhenAll(tasks).ConfigureAwait(false); - await base.StopAsync(cancellationToken); + await base.StopAsync(cancellationToken).ConfigureAwait(false); } /// protected override async Task ExecuteAsync(CancellationToken stoppingToken) { - await Task.Delay(100); // let everything else start up as well + await Task.Delay(100).ConfigureAwait(false); // let everything else start up as well if (_modules is null) { @@ -207,7 +208,8 @@ protected override async Task ExecuteAsync(CancellationToken stoppingToken) else { // Let's make sure the front end is up and running before we start the backend // analysis services - await Task.Delay(TimeSpan.FromSeconds(preLaunchModuleDelaySecs), stoppingToken); + await Task.Delay(TimeSpan.FromSeconds(preLaunchModuleDelaySecs), stoppingToken) + .ConfigureAwait(false); foreach (var entry in _modules!) { @@ -224,19 +226,16 @@ protected override async Task ExecuteAsync(CancellationToken stoppingToken) if (status == null) continue; - await StartProcess(module); + await StartProcess(module).ConfigureAwait(false); } } // Install Initial Modules last so already installed modules will run // while the installations are happening. - if (SystemInfo.ExecutionEnvironment != ExecutionEnvironment.Docker) - { - _logger.LogInformation("Installing Initial Modules."); - await _moduleInstaller.InstallInitialModules(); - } + if (!SystemInfo.IsDocker) + await _moduleInstaller.InstallInitialModules().ConfigureAwait(false); - await Task.Delay(Timeout.Infinite, stoppingToken); + await Task.Delay(Timeout.Infinite, stoppingToken).ConfigureAwait(false); _logger.LogInformation("ModuleRunner Stopped"); } diff --git a/src/API/Server/FrontEnd/AiModules/ModuleProcessServices.cs b/src/API/Server/FrontEnd/AiModules/ModuleProcessServices.cs index fd5c2d8d..0b152c09 100644 --- a/src/API/Server/FrontEnd/AiModules/ModuleProcessServices.cs +++ b/src/API/Server/FrontEnd/AiModules/ModuleProcessServices.cs @@ -118,7 +118,7 @@ public IEnumerable ListProcessStatuses() public bool RemoveProcessStatus(string moduleId) { if (_processStatuses.ContainsKey(moduleId) && - !_processStatuses.TryRemove(moduleId, out ProcessStatus _)) + !_processStatuses.TryRemove(moduleId, out _)) { return false; } @@ -160,11 +160,15 @@ public async Task KillProcess(ModuleConfig module) // Send a 'Quit' request but give it time to wrap things up before we step in further var payload = new RequestPayload("Quit"); payload.SetValue("moduleId", module.ModuleId); - await _queueServices.SendRequestAsync(module.Queue!, new BackendRequest(payload)); + await _queueServices.SendRequestAsync(module.Queue!, new BackendRequest(payload)) + .ConfigureAwait(false); int shutdownServerDelaySecs = _moduleSettings.DelayAfterStoppingModulesSecs; if (shutdownServerDelaySecs > 0) - await Task.Delay(TimeSpan.FromSeconds(shutdownServerDelaySecs)); + { + await Task.Delay(TimeSpan.FromSeconds(shutdownServerDelaySecs)) + .ConfigureAwait(false); + } try { @@ -172,7 +176,14 @@ public async Task KillProcess(ModuleConfig module) { _logger.LogInformation($"Forcing shutdown of {process.ProcessName}/{module.ModuleId}"); process.Kill(true); - await process.WaitForExitAsync(); + + Stopwatch stopWatch = Stopwatch.StartNew(); + _logger.LogDebug($"Waiting for {module.ModuleId} to end."); + + await process.WaitForExitAsync().ConfigureAwait(false); + + stopWatch.Stop(); + _logger.LogDebug($"{module.ModuleId} ended after {stopWatch.ElapsedMilliseconds} ms"); } else _logger.LogInformation($"{module.ModuleId} went quietly"); @@ -213,6 +224,7 @@ public async Task StartProcess(ModuleConfig module) { ModuleId = module.ModuleId, Name = module.Name, + Queue = module.Queue, Status = ProcessStatusType.Unknown }; _processStatuses.TryAdd(module.ModuleId, status); @@ -235,7 +247,8 @@ public async Task StartProcess(ModuleConfig module) { ProcessStartInfo procStartInfo = CreateProcessStartInfo(module); - _logger.LogInformation($"Attempting to start {module.ModuleId} with {procStartInfo.FileName} {procStartInfo.Arguments}"); + _logger.LogDebug(""); + _logger.LogDebug($"Attempting to start {module.ModuleId} with {procStartInfo.FileName} {procStartInfo.Arguments}"); process = new Process { @@ -279,7 +292,7 @@ public async Task StartProcess(ModuleConfig module) // Trying to reduce startup CPU and instantaneous memory use for low resource // environments such as Docker or RPi - await Task.Delay(TimeSpan.FromSeconds(postStartPauseSecs)); + await Task.Delay(TimeSpan.FromSeconds(postStartPauseSecs)).ConfigureAwait(false); status.Status = ProcessStatusType.Started; } else @@ -296,7 +309,7 @@ public async Task StartProcess(ModuleConfig module) _logger.LogError(ex.StackTrace); #if DEBUG _logger.LogError($" *** Did you setup the Development environment?"); - if (SystemInfo.Platform == "Windows") + if (SystemInfo.IsWindows) _logger.LogError($" Run \\src\\setup.bat"); else _logger.LogError($" In /src, run 'bash setup.sh'"); @@ -308,7 +321,7 @@ public async Task StartProcess(ModuleConfig module) } if (process is null) - _runningProcesses.TryRemove(module.ModuleId, out Process _); + _runningProcesses.TryRemove(module.ModuleId, out _); return process != null; } @@ -335,7 +348,7 @@ public async Task RestartProcess(ModuleConfig module) if (_runningProcesses.TryGetValue(module.ModuleId, out Process? process) && process != null) { status.Status = ProcessStatusType.Stopping; - await KillProcess(module); + await KillProcess(module).ConfigureAwait(false); status.Status = ProcessStatusType.Stopped; } else @@ -345,7 +358,7 @@ public async Task RestartProcess(ModuleConfig module) if (module.AutoStart == false || !module.Available(SystemInfo.Platform, _versionConfig.VersionInfo?.Version)) return true; - return await StartProcess(module); + return await StartProcess(module).ConfigureAwait(false); } /// @@ -415,7 +428,7 @@ private ProcessStartInfo CreateProcessStartInfo(ModuleConfig module) string filePath = _moduleSettings.GetFilePath(module); string? command = _moduleSettings.GetCommandPath(module); - _logger.LogDebug($"Command : {command}"); + _logger.LogTrace($"Command: {command}"); // Setup the process we're going to launch #if Windows @@ -470,17 +483,41 @@ private void SendOutputToLog(object sender, DataReceivedEventArgs data) if (string.IsNullOrWhiteSpace(message)) return; - // if (string.IsNullOrEmpty(filename)) - // filename = "Process"; - if (!string.IsNullOrEmpty(filename)) filename += ": "; - // Force ditch the MS logging scoping headings - if (!message.StartsWith("info: ") && !message.EndsWith("[0]")) + var testString = message.ToLower(); + + // We're picking up messages written to the console so let's provide a little help for + // messages that are trying to get themselves categorised properly. + // Optimisation: We probably should order these by info/trace/debug/warn/error/crit, but + // for sanity we'll keep them in order of anxiety. + if (testString.StartsWith("crit: ")) + _logger.LogCritical(filename + message.Substring("crit: ".Length)); + else if (testString.StartsWith("critical: ")) + _logger.LogCritical(filename + message.Substring("critical: ".Length)); + else if (testString.StartsWith("err: ")) + _logger.LogError(filename + message.Substring("err: ".Length)); + else if (testString.StartsWith("error: ")) + _logger.LogError(filename + message.Substring("error: ".Length)); + else if (testString.StartsWith("warn: ")) + _logger.LogWarning(filename + message.Substring("warn: ".Length)); + else if (testString.StartsWith("warning: ")) + _logger.LogWarning(filename + message.Substring("warning: ".Length)); + else if (testString.StartsWith("info: ")) + _logger.LogInformation(filename + message.Substring("info: ".Length)); + else if (testString.StartsWith("information: ")) + _logger.LogInformation(filename + message.Substring("information: ".Length)); + else if (testString.StartsWith("dbg: ")) + _logger.LogDebug(filename + message.Substring("dbg: ".Length)); + else if (testString.StartsWith("debug: ")) + _logger.LogDebug(filename + message.Substring("debug: ".Length)); + else if (testString.StartsWith("trc: ")) + _logger.LogTrace(filename + message.Substring("trc: ".Length)); + else if (testString.StartsWith("trace: ")) + _logger.LogTrace(filename + message.Substring("trace: ".Length)); + else _logger.LogInformation(filename + message); - - // Console.WriteLine("REDIRECT STDOUT: " + filename + message); } private void SendErrorToLog(object sender, DataReceivedEventArgs data) @@ -502,31 +539,13 @@ private void SendErrorToLog(object sender, DataReceivedEventArgs data) if (string.IsNullOrWhiteSpace(error)) return; - // if (string.IsNullOrEmpty(filename)) - // filename = "Process"; if (!string.IsNullOrEmpty(filename)) filename += ": "; if (string.IsNullOrEmpty(error)) error = "No error provided"; - if (error.Contains("LoadLibrary failed with error 126") && - error.Contains("onnxruntime_providers_cuda.dll")) - { - error = "Attempted to load ONNX runtime CUDA provider. No luck, moving on..."; - _logger.LogInformation(filename + error); - } - else if (error != "info: Microsoft.Hosting.Lifetime[0]") - { - // TOTAL HACK. ONNX/Tensorflow output is WAY too verbose for an error - if (error.Contains("I tensorflow/cc/saved_model/reader.cc:") || - error.Contains("I tensorflow/cc/saved_model/loader.cc:")) - _logger.LogInformation(filename + error); - else - _logger.LogError(filename + error); - }; - - // Console.WriteLine("REDIRECT ERROR: " + filename + error); + _logger.LogError(filename + error); } /// @@ -546,11 +565,11 @@ private void ModuleExecutionComplete(object? sender, EventArgs e) return; } - _logger.LogInformation($"Module {moduleId} has shutdown"); + _logger.LogInformation($"** Module {moduleId} has shutdown"); // Remove this from the list of running processes - if (_runningProcesses.TryGetValue(moduleId, out Process _)) - _runningProcesses.TryRemove(moduleId, out Process _); + if (_runningProcesses.TryGetValue(moduleId, out _)) + _runningProcesses.TryRemove(moduleId, out _); } } @@ -584,13 +603,27 @@ private void ModuleExecutionComplete(object? sender, EventArgs e) processEnvironmentVars.TryAdd("CPAI_MODULE_ID", module.ModuleId); processEnvironmentVars.TryAdd("CPAI_MODULE_NAME", module.Name); processEnvironmentVars.TryAdd("CPAI_MODULE_PATH", _moduleSettings.GetModulePath(module)); - processEnvironmentVars.TryAdd("CPAI_MODULE_QUEUENAME", module.Queue); processEnvironmentVars.TryAdd("CPAI_MODULE_PARALLELISM", module.Parallelism.ToString()); + processEnvironmentVars.TryAdd("CPAI_MODULE_QUEUENAME", module.Queue); + if ((module.RequiredMb ?? 0) > 0) + processEnvironmentVars.TryAdd("CPAI_MODULE_REQUIRED_MB", module.RequiredMb?.ToString()); processEnvironmentVars.TryAdd("CPAI_MODULE_SUPPORT_GPU", (module.SupportGPU ?? false).ToString()); processEnvironmentVars.TryAdd("CPAI_ACCEL_DEVICE_NAME", module.AcceleratorDeviceName); processEnvironmentVars.TryAdd("CPAI_HALF_PRECISION", module.HalfPrecision); processEnvironmentVars.TryAdd("CPAI_LOG_VERBOSITY", (module.LogVerbosity ?? LogVerbosity.Info).ToString()); + // Make sure the runtime environment variables used by the server are passed to the + // child process. Otherwise the NET module may start in Production mode. We *hope* the + // environment vars are passed down to to spawned processes, but we'll add these two + // just in case. + var aspnetEnv = Environment.GetEnvironmentVariable("ASPNETCORE_ENVIRONMENT"); + if (aspnetEnv != null) + processEnvironmentVars.TryAdd("ASPNETCORE_ENVIRONMENT", aspnetEnv); + + var dotnetEnv = Environment.GetEnvironmentVariable("DOTNET_ENVIRONMENT"); + if (dotnetEnv != null) + processEnvironmentVars.TryAdd("DOTNET_ENVIRONMENT", dotnetEnv); + return processEnvironmentVars; } } diff --git a/src/API/Server/FrontEnd/AiModules/ModuleSettings.cs b/src/API/Server/FrontEnd/AiModules/ModuleSettings.cs index 737a9c06..8ba9ac74 100644 --- a/src/API/Server/FrontEnd/AiModules/ModuleSettings.cs +++ b/src/API/Server/FrontEnd/AiModules/ModuleSettings.cs @@ -81,7 +81,7 @@ public string ModuleInstallerScriptPath { get { - if (SystemInfo.OperatingSystem.EqualsIgnoreCase("windows")) + if (SystemInfo.IsWindows) return _moduleOptions.ModuleInstallerScriptsPath + "\\setup.bat"; return _moduleOptions.ModuleInstallerScriptsPath + "/setup.sh"; @@ -172,42 +172,63 @@ public static void LoadModuleSettings(IConfigurationBuilder config, string modul string settingsFile = Path.Combine(modulePath, "modulesettings.json"); if (File.Exists(settingsFile)) + { + Console.WriteLine($"Trace: Loading {settingsFile}"); config.AddJsonFile(settingsFile, optional: true, reloadOnChange: reloadOnChange); + } if (!string.IsNullOrEmpty(runtimeEnv)) { settingsFile = Path.Combine(modulePath, $"modulesettings.{runtimeEnv}.json"); if (File.Exists(settingsFile)) + { + Console.WriteLine($"Trace: Loading {settingsFile}"); config.AddJsonFile(settingsFile, optional: true, reloadOnChange: reloadOnChange); + } } settingsFile = Path.Combine(modulePath, $"modulesettings.{os}.json"); if (File.Exists(settingsFile)) + { + Console.WriteLine($"Trace: Loading {settingsFile}"); config.AddJsonFile(settingsFile, optional: true, reloadOnChange: reloadOnChange); + } if (!string.IsNullOrEmpty(runtimeEnv)) { settingsFile = Path.Combine(modulePath, $"modulesettings.{os}.{runtimeEnv}.json"); if (File.Exists(settingsFile)) + { + Console.WriteLine($"Trace: Loading {settingsFile}"); config.AddJsonFile(settingsFile, optional: true, reloadOnChange: reloadOnChange); + } } settingsFile = Path.Combine(modulePath, $"modulesettings.{os}.{architecture}.json"); if (File.Exists(settingsFile)) + { + Console.WriteLine($"Trace: Loading {settingsFile}"); config.AddJsonFile(settingsFile, optional: true, reloadOnChange: reloadOnChange); + } if (!string.IsNullOrEmpty(runtimeEnv)) { settingsFile = Path.Combine(modulePath, $"modulesettings.{os}.{architecture}.{runtimeEnv}.json"); if (File.Exists(settingsFile)) + { + Console.WriteLine($"Trace: Loading {settingsFile}"); config.AddJsonFile(settingsFile, optional: true, reloadOnChange: reloadOnChange); + } } - if (SystemInfo.ExecutionEnvironment == ExecutionEnvironment.Docker) + if (SystemInfo.IsDocker) { settingsFile = Path.Combine(modulePath, $"modulesettings.docker.json"); if (File.Exists(settingsFile)) + { + Console.WriteLine($"Trace: Loading {settingsFile}"); config.AddJsonFile(settingsFile, optional: true, reloadOnChange: reloadOnChange); + } } } @@ -236,6 +257,7 @@ public ModuleSettings(IConfiguration config, /// /// Returns a string that represents the current directory a module lives in. Note that a /// module's folder is always the same name as its Id. + /// REVIEW: [Matthew] module.ModulePath is set safely and can be used instead of this if you wish /// /// The module to launch /// A string object @@ -249,15 +271,8 @@ public string GetModulePath(ModuleBase module) /// /// Returns a string that represents the working directory for a module. + /// REVIEW: [Matthew] module.WorkingDirectory is set safely and can be used instead of this if you wish /// - /// - /// REVIEW: [Mattew] module.WorkingDirectory is set safely and can be used instead of this if you wish - /// The working directory isn't necessarily the dir the executed file is in. eg. .NET - /// exes can be buried deep in /bin/Debug/net6/net6.0-windows. The working directory also - /// isn't the Module directory, since the actual executable code for a module could be in a - /// subdirectory of that module. So we start by assuming it's the path where the executed - /// file is, but allow for an override (in the case of .NET development) if provided. - /// /// The module to launch /// A string object public string GetWorkingDirectory(ModuleBase module) @@ -313,14 +328,14 @@ public string GetFilePath(ModuleConfig module) // If it is "Python" then use our default Python location (in this case, python 3.7 or // 3.8 if Linux/macOS) if (runtime == "python") - runtime = SystemInfo.OperatingSystem == "Windows" ? "python37" : "python38"; + runtime = SystemInfo.IsWindows ? "python37" : "python38"; // HACK: In Docker, Python installs for downloaded modules can be local for downloaded // modules, or shared for pre-installed modules. For preinstalled/shared the python // command is in the format of python3.N because we don't install Python in the runtimes // folder, but in the OS itself. In Docker this means we call "python3.8", rather than // "/runtimes/bin/linux/python38/venv/bin/python3 - if (SystemInfo.ExecutionEnvironment == ExecutionEnvironment.Docker && + if (SystemInfo.IsDocker && module.RuntimeLocation == "Shared" && runtime.StartsWith("python")) { if (!runtime.StartsWith("python3.")) diff --git a/src/API/Server/FrontEnd/Config/ModuleOptions.cs b/src/API/Server/FrontEnd/Config/ModuleOptions.cs index bd4c5a98..2c1dcee9 100644 --- a/src/API/Server/FrontEnd/Config/ModuleOptions.cs +++ b/src/API/Server/FrontEnd/Config/ModuleOptions.cs @@ -1,4 +1,5 @@ -using System.Collections.Generic; +using System; +using System.Collections.Generic; namespace CodeProject.AI.API.Server.Frontend { @@ -12,6 +13,11 @@ public class ModuleOptions /// public string? ModuleListUrl { get; set; } + /// + /// Gets or sets the timeout for installing a module + /// + public TimeSpan ModuleInstallTimeout { get; set; } + /// /// The password that must be provided when uploading a new module for installation via /// the API. diff --git a/src/API/Server/FrontEnd/Config/PersistedOverrideSettings.cs b/src/API/Server/FrontEnd/Config/PersistedOverrideSettings.cs index d386491b..934ff6b0 100644 --- a/src/API/Server/FrontEnd/Config/PersistedOverrideSettings.cs +++ b/src/API/Server/FrontEnd/Config/PersistedOverrideSettings.cs @@ -1,5 +1,4 @@ using System.IO; -using System; using System.Text.Json.Nodes; using System.Threading.Tasks; @@ -44,20 +43,22 @@ public PersistedOverrideSettings(string storagePath) else settingsFilePath = Path.Combine(_storagePath, SettingsFilename); - return await ModuleConfigExtensions.LoadSettings(settingsFilePath); + return await ModuleConfigExtensions.LoadSettings(settingsFilePath) + .ConfigureAwait(false); } /// /// Saves the persisted override settings of the current setup to file. /// /// A JsonObject containing the settings - public async Task SaveSettings(JsonObject? settings) + public async Task SaveSettingsAsync(JsonObject? settings) { string settingsFilePath = SystemInfo.RuntimeEnvironment == RuntimeEnvironment.Development ? Path.Combine(_storagePath, DevSettingsFilename) : Path.Combine(_storagePath, SettingsFilename); - return await ModuleConfigExtensions.SaveSettings(settings, settingsFilePath); + return await ModuleConfigExtensions.SaveSettingsAsync(settings, settingsFilePath) + .ConfigureAwait(false); } } } diff --git a/src/API/Server/FrontEnd/Controllers/LogController.cs b/src/API/Server/FrontEnd/Controllers/LogController.cs index 02d2a544..3c58310f 100644 --- a/src/API/Server/FrontEnd/Controllers/LogController.cs +++ b/src/API/Server/FrontEnd/Controllers/LogController.cs @@ -21,7 +21,7 @@ public class LogController : ControllerBase /// /// Constructor /// - /// + /// The logger public LogController(ILogger logger) { _logger = logger; @@ -53,13 +53,6 @@ public ResponseBase AddLog([FromForm] string? entry, if (!string.IsNullOrWhiteSpace(label)) msg += "{{" + label + "}}"; - if (entry.Contains("LoadLibrary failed with error 126") && - entry.Contains("onnxruntime_providers_cuda.dll")) - { - entry = "Attempted to load ONNX runtime CUDA provider. No luck, moving on..."; - log_level = LogLevel.Information; - } - // strip out any terminal colourisation entry = Regex.Replace(entry, "\\[\\d+(;\\d+)\\d+m", string.Empty); diff --git a/src/API/Server/FrontEnd/Controllers/ModuleController.cs b/src/API/Server/FrontEnd/Controllers/ModuleController.cs index 8946ac82..f84338bd 100644 --- a/src/API/Server/FrontEnd/Controllers/ModuleController.cs +++ b/src/API/Server/FrontEnd/Controllers/ModuleController.cs @@ -83,11 +83,12 @@ public async Task UploadModule() using (Stream fileStream = new FileStream(downloadPath, FileMode.Create, FileAccess.Write)) { - await uploadedFile.CopyToAsync(fileStream); + await uploadedFile.CopyToAsync(fileStream).ConfigureAwait(false); fileStream.Close(); } - (bool success, string error) = await _moduleInstaller.InstallModuleAsync(downloadPath, null); + (bool success, string error) = await _moduleInstaller.InstallModuleAsync(downloadPath, null) + .ConfigureAwait(false); return success? new SuccessResponse() : CreateErrorResponse("Unable install module: " + error); } @@ -122,7 +123,8 @@ public async Task ListInstalledModules() .ToList() ?? new List(); // Mark those modules that can't be downloaded - List downloadables = await _moduleInstaller.GetDownloadableModules(); + List downloadables = await _moduleInstaller.GetDownloadableModules() + .ConfigureAwait(false); foreach (ModuleDescription module in modules) { if (!downloadables.Any(download => download.ModuleId == module.ModuleId)) @@ -145,7 +147,8 @@ public async Task ListInstalledModules() [ProducesResponseType(StatusCodes.Status400BadRequest)] public async Task ListAvailableModules() { - List moduleList = await _moduleInstaller.GetDownloadableModules(); + List moduleList = await _moduleInstaller.GetDownloadableModules() + .ConfigureAwait(false); return new ModuleListResponse() { @@ -164,7 +167,8 @@ public async Task ListAvailableModules() [ProducesResponseType(StatusCodes.Status400BadRequest)] public async Task ListAllModules() { - List downloadableModules = await _moduleInstaller.GetDownloadableModules() + List downloadableModules = await _moduleInstaller.GetDownloadableModules() + .ConfigureAwait(false) ?? new List(); string currentServerVersion = _versionConfig.VersionInfo!.Version; @@ -203,14 +207,18 @@ public async Task ListAllModules() /// /// The module to install /// The version of the module to install + /// Whether or not to ignore the download cache. If true, the module + /// will always be freshly downloaded /// A Response Object. - [HttpPost("install/{moduleId}/{version}", Name = "Install Module")] + [HttpPost("install/{moduleId}/{version}/{nocache:bool?}", Name = "Install Module")] [Produces("application/json")] [ProducesResponseType(StatusCodes.Status200OK)] [ProducesResponseType(StatusCodes.Status400BadRequest)] - public async Task InstallModuleAsync(string moduleId, string version) + public async Task InstallModuleAsync(string moduleId, string version, + bool noCache = false) { - (bool success, string error) = await _moduleInstaller.DownloadAndInstallModuleAsync(moduleId, version); + var downloadTask = _moduleInstaller.DownloadAndInstallModuleAsync(moduleId, version, noCache); + (bool success, string error) = await downloadTask.ConfigureAwait(false); return success? new SuccessResponse() : CreateErrorResponse(error); } @@ -225,7 +233,8 @@ public async Task InstallModuleAsync(string moduleId, string versi [ProducesResponseType(StatusCodes.Status400BadRequest)] public async Task UninstallModuleAsync(string moduleId) { - (bool success, string error) = await _moduleInstaller.UninstallModuleAsync(moduleId); + (bool success, string error) = await _moduleInstaller.UninstallModuleAsync(moduleId) + .ConfigureAwait(false); return success? new SuccessResponse() : CreateErrorResponse(error); } diff --git a/src/API/Server/FrontEnd/Controllers/ProxyController.cs b/src/API/Server/FrontEnd/Controllers/ProxyController.cs index 6dcfc383..5591d123 100644 --- a/src/API/Server/FrontEnd/Controllers/ProxyController.cs +++ b/src/API/Server/FrontEnd/Controllers/ProxyController.cs @@ -1,37 +1,38 @@ using System; using System.Collections.Generic; +using System.Diagnostics; using System.IO; using System.Globalization; using System.Linq; using System.Text; using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading.Tasks; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Options; -using CodeProject.AI.SDK; using CodeProject.AI.API.Server.Backend; using CodeProject.AI.API.Common; -using System.Text.Json.Nodes; -using System.Diagnostics; +using CodeProject.AI.SDK; +using CodeProject.AI.SDK.Common; namespace CodeProject.AI.API.Server.Frontend.Controllers { // ------------------------------------------------------------------------------ - // When a backend analysis module starts it will register itself with the main CodeProject.AI + // When a backend analysis module starts it will register itself with the main // Server. It does this by Posting as Register request to the Server which // - provides the end part of url for the request // - the name of the queue that the request will be sent to. // - the command string that will be associated with the payload sent to the queue. // - // To initiate an AI operation, the client will post a payload to the + // To initiate an AI operation, the client will post a payload to the server // This is accomplished by // - getting the url ending. // - using this to get the queue name and command name - // - sending the above and payload to the queue - // - await the respons + // - sending the above, plus a payload, to the queue + // - await the response // - return the response to the caller. // ------------------------------------------------------------------------------ @@ -47,6 +48,8 @@ public class ProxyController : ControllerBase private readonly CommandDispatcher _dispatcher; private readonly BackendRouteMap _routeMap; private readonly ModuleCollection _modules; + private readonly TriggersConfig _triggersConfig; + private readonly TriggerTaskRunner _commandRunner; /// /// Initializes a new instance of the VisionController class. @@ -54,12 +57,19 @@ public class ProxyController : ControllerBase /// The Command Dispatcher instance. /// The Route Manager /// Contains the Collection of modules - public ProxyController(CommandDispatcher dispatcher, BackendRouteMap routeMap, - IOptions modulesConfig) + /// Contains the triggers + /// The command runner + public ProxyController(CommandDispatcher dispatcher, + BackendRouteMap routeMap, + IOptions modulesConfig, + IOptions triggersConfig, + TriggerTaskRunner commandRunner) { - _dispatcher = dispatcher; - _routeMap = routeMap; - _modules = modulesConfig.Value; + _dispatcher = dispatcher; + _routeMap = routeMap; + _modules = modulesConfig.Value; + _triggersConfig = triggersConfig.Value; + _commandRunner = commandRunner; } /// @@ -75,7 +85,8 @@ public async Task Post(string path) Stopwatch sw = Stopwatch.StartNew(); - var response = await _dispatcher.QueueRequest(routeInfo!.QueueName, payload); + var response = await _dispatcher.QueueRequest(routeInfo!.QueueName, payload) + .ConfigureAwait(false); long analysisRoundTripMs = sw.ElapsedMilliseconds; @@ -89,6 +100,9 @@ public async Task Post(string path) jsonResponse ??= new JsonObject(); jsonResponse["analysisRoundTripMs"] = analysisRoundTripMs; + // Check for, and execute if needed, triggers + ProcessTriggers(routeInfo!.QueueName, jsonResponse); + // Wrap it back up responseString = JsonSerializer.Serialize(jsonResponse) as string; return new ContentResult @@ -137,7 +151,7 @@ public IActionResult ApiSummary() string category = index > 0 ? routeInfo.Path.Substring(0, index) : routeInfo.Path; string route = index > 0 ? routeInfo.Path.Substring(index + 1) : string.Empty; - // string path = $"/{version}/{category}/{route}"; + // string path = $"/{version}/{category}/{route}"; string path = $"{version}/{routeInfo.Path}"; if (category != currentCategory) @@ -281,5 +295,59 @@ private byte[] GetFileData(IFormFile x) return data; } + + private void ProcessTriggers(string queueName, JsonObject response) + { + if (_triggersConfig.Triggers is null || _triggersConfig.Triggers.Length == 0) + return; + + string platform = SystemInfo.Platform; + + try + { + foreach (Trigger trigger in _triggersConfig.Triggers) + { + // If the trigger is queue specific, check + if (!string.IsNullOrWhiteSpace(trigger.Queue) && + !trigger.Queue.EqualsIgnoreCase(queueName)) + continue; + + // Is there a task to run on this platform, and a property to look for? + TriggerTask? task = trigger.GetTask(platform); + if (string.IsNullOrEmpty(trigger.PropertyName) || task is null || + string.IsNullOrEmpty(task.Command)) + continue; + + if (string.IsNullOrWhiteSpace(trigger.PredictionsCollectionName)) + { + float.TryParse(response["confidence"]?.ToString(), out float confidence); + string? value = response[trigger.PropertyName]?.ToString(); + if (trigger.Test(value, confidence)) + _commandRunner.RunCommand(task); + } + else + { + var predictions = response[trigger.PredictionsCollectionName]; + if (predictions is not null) + { + foreach (var prediction in predictions.AsArray()) + { + if (prediction is null) + continue; + + float.TryParse(prediction["confidence"]?.ToString(), out float confidence); + string? value = prediction[trigger.PropertyName]?.ToString(); + if (trigger.Test(value, confidence)) + _commandRunner.RunCommand(task); + } + } + } + } + } + catch (Exception ex) + { + Console.WriteLine(ex); + } + } } } diff --git a/src/API/Server/FrontEnd/Controllers/QueueController.cs b/src/API/Server/FrontEnd/Controllers/QueueController.cs index ee586a89..147bca4b 100644 --- a/src/API/Server/FrontEnd/Controllers/QueueController.cs +++ b/src/API/Server/FrontEnd/Controllers/QueueController.cs @@ -1,5 +1,7 @@ using System; using System.IO; +using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using System.Threading.Tasks; @@ -14,7 +16,7 @@ namespace CodeProject.AI.API.Server.Frontend.Controllers { /// - /// Handles pulling requests from the Command Queue and returning reponses to the calling method. + /// Handles pulling requests from the Command Queue and returning responses to the calling method. /// [Route("v1/queue")] [ApiController] @@ -40,7 +42,7 @@ public QueueController(QueueServices queueService, /// /// The name of the Queue. /// The ID of the module making the request - /// The excution provider, typically the GPU library in use + /// The execution provider, typically the GPU library in use /// The aborted request token. /// The Request Object. [HttpGet("{name}", Name = "GetRequestFromQueue")] @@ -52,8 +54,8 @@ public async Task GetQueue([FromRoute] string name, [FromQuery] string? executionProvider, CancellationToken token) { - - BackendRequestBase? request = await _queueService.DequeueRequestAsync(name, token); + BackendRequestBase? request = await _queueService.DequeueRequestAsync(name, token) + .ConfigureAwait(false); bool shuttingDown = false; @@ -79,26 +81,34 @@ public async Task GetQueue([FromRoute] string name, /// the named queue if available. /// /// The id of the request the response is for. - /// The ID of the module making the request - /// The hardware accelerator execution provider. /// The Request Object. [HttpPost("{reqid}", Name = "SetResponseInQueue")] [ProducesResponseType(StatusCodes.Status200OK)] [ProducesResponseType(StatusCodes.Status400BadRequest)] - public async Task SetResponse(string reqid, [FromQuery] string moduleId, - [FromQuery] string? executionProvider = null) + public async Task SetResponse(string reqid) { - string? response = null; + string? responseString = null; using var bodyStream = HttpContext.Request.Body; - if (bodyStream != null) + if (bodyStream != null) { using var textreader = new StreamReader(bodyStream); - response = await textreader.ReadToEndAsync(); + responseString = await textreader.ReadToEndAsync().ConfigureAwait(false); } - UpdateProcessStatus(moduleId, true, executionProvider: executionProvider); + var response = JsonSerializer.Deserialize(responseString ?? ""); + + string? command = response?["command"]?.ToString(); + string? moduleId = response?["moduleId"]?.ToString(); + string? executionProvider = response?["executionProvider"]?.ToString(); + + if (!string.IsNullOrWhiteSpace(moduleId)) + { + bool incrementProcessCount = command is not null && !command.EqualsIgnoreCase("status"); + UpdateProcessStatus(moduleId, incrementProcessCount: incrementProcessCount, + executionProvider: executionProvider); + } - var success = _queueService.SetResult(reqid, response); + var success = _queueService.SetResult(reqid, responseString); if (!success) return BadRequest("failure to set response."); @@ -115,6 +125,7 @@ private void UpdateProcessStatus(string moduleId, bool incrementProcessCount = f { if (status!.Status != ProcessStatusType.Stopping) status.Status = shuttingDown? ProcessStatusType.Stopping : ProcessStatusType.Started; + status.Started ??= DateTime.UtcNow; status.LastSeen = DateTime.UtcNow; diff --git a/src/API/Server/FrontEnd/Controllers/SettingsController.cs b/src/API/Server/FrontEnd/Controllers/SettingsController.cs index aea47e66..2d379506 100644 --- a/src/API/Server/FrontEnd/Controllers/SettingsController.cs +++ b/src/API/Server/FrontEnd/Controllers/SettingsController.cs @@ -123,7 +123,7 @@ public async Task UpsertSettingAsync(string moduleId, [FromForm] s // Special case if (settings.Name.EqualsIgnoreCase("Restart")) { - success = await _moduleProcessServices.RestartProcess(module); + success = await _moduleProcessServices.RestartProcess(module).ConfigureAwait(false); } else { @@ -131,15 +131,16 @@ public async Task UpsertSettingAsync(string moduleId, [FromForm] s module.UpsertSetting(settings.Name, settings.Value); // Restart the module and persist the settings - if (await _moduleProcessServices.RestartProcess(module)) + if (await _moduleProcessServices.RestartProcess(module).ConfigureAwait(false)) { var settingStore = new PersistedOverrideSettings(_storagePath); - var overrideSettings = await settingStore.LoadSettings(); + var overrideSettings = await settingStore.LoadSettings().ConfigureAwait(false); if (ModuleConfigExtensions.UpsertSettings(overrideSettings, module.ModuleId!, settings.Name, settings.Value)) { - success = await settingStore.SaveSettings(overrideSettings); + success = await settingStore.SaveSettingsAsync(overrideSettings) + .ConfigureAwait(false); } } } @@ -166,7 +167,7 @@ public async Task UpsertSettingsAsync([FromBody] SettingsDict sett // Load up the current persisted settings so we can update and re-save them var settingStore = new PersistedOverrideSettings(_storagePath); - var overrideSettings = await settingStore.LoadSettings(); + var overrideSettings = await settingStore.LoadSettings().ConfigureAwait(false); // Keep tabs on which modules need to be restarted List? moduleIdsToRestart = new(); @@ -212,11 +213,15 @@ public async Task UpsertSettingsAsync([FromBody] SettingsDict sett { ModuleConfig? module = _moduleCollection.GetModule(moduleId); if (module is not null) - restartSuccess = await _moduleProcessServices.RestartProcess(module) && restartSuccess; + { + var restartTask = _moduleProcessServices.RestartProcess(module); + restartSuccess = await restartTask.ConfigureAwait(false) && restartSuccess; + } } // Only persist these override settings if all modules restarted successfully - bool success = restartSuccess && await settingStore.SaveSettings(overrideSettings); + bool success = restartSuccess && await settingStore.SaveSettingsAsync(overrideSettings) + .ConfigureAwait(false); return new ResponseBase { success = success }; } diff --git a/src/API/Server/FrontEnd/Controllers/StatusController.cs b/src/API/Server/FrontEnd/Controllers/StatusController.cs index 6cfe2b93..598c5f6f 100644 --- a/src/API/Server/FrontEnd/Controllers/StatusController.cs +++ b/src/API/Server/FrontEnd/Controllers/StatusController.cs @@ -3,13 +3,13 @@ using System.Threading.Tasks; using System.Text; +using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Mvc; -using Microsoft.AspNetCore.Hosting; +using Microsoft.Extensions.Options; using CodeProject.AI.API.Common; using CodeProject.AI.SDK.Common; -using Microsoft.Extensions.Options; namespace CodeProject.AI.API.Server.Frontend.Controllers { @@ -25,6 +25,7 @@ public class StatusController : ControllerBase /// private readonly ServerVersionService _versionService; private readonly ModuleSettings _moduleSettings; + private readonly ServerOptions _serverOptions; private readonly ModuleProcessServices _moduleProcessService; private readonly ModuleCollection _moduleCollection; @@ -33,15 +34,18 @@ public class StatusController : ControllerBase /// /// The Version instance. /// The module settings instance + /// The server options /// The Module Process Services. /// The Module Collection. public StatusController(ServerVersionService versionService, ModuleSettings moduleSettings, + IOptions serverOptions, ModuleProcessServices moduleProcessService, IOptions moduleCollection) { _versionService = versionService; _moduleSettings = moduleSettings; + _serverOptions = serverOptions.Value; _moduleProcessService = moduleProcessService; _moduleCollection = moduleCollection.Value; } @@ -97,12 +101,15 @@ public ResponseBase GetVersion() [ProducesResponseType(StatusCodes.Status400BadRequest)] public async Task GetSystemStatus() { - // run these in parallel as they have a Task.Delay(1000) in them. - var cpuUsageTask = SystemInfo.GetCpuUsage(); - var gpuUsageTask = SystemInfo.GetGpuUsage(); - var gpuInfoTask = SystemInfo.GetGpuUsageInfo(); - var gpuVideoInfoTask = SystemInfo.GetVideoAdapterInfo(); - var serverVersion = _versionService.VersionConfig?.VersionInfo?.Version ?? string.Empty; + var serverVersion = _versionService.VersionConfig?.VersionInfo?.Version ?? string.Empty; + + // Run these in parallel as they have a Task.Delay(1000) in them. + string gpuInfo = await SystemInfo.GetGpuUsageInfoAsync().ConfigureAwait(false); + int gpuUsage = await SystemInfo.GetGpuUsageAsync().ConfigureAwait(false); + string gpuVideoInfo = await SystemInfo.GetVideoAdapterInfoAsync().ConfigureAwait(false); + ulong gpuMemUsage = await SystemInfo.GetGpuMemoryUsageAsync().ConfigureAwait(false); + int cpuUsage = SystemInfo.GetCpuUsage(); + ulong systemMemUsage = SystemInfo.GetSystemMemoryUsage(); var systemStatus = new StringBuilder(); systemStatus.AppendLine($"Server version: {serverVersion}"); @@ -110,11 +117,11 @@ public async Task GetSystemStatus() systemStatus.AppendLine(); systemStatus.AppendLine(); - systemStatus.AppendLine(await gpuInfoTask); + systemStatus.AppendLine(gpuInfo); systemStatus.AppendLine(); systemStatus.AppendLine(); - systemStatus.AppendLine(await gpuVideoInfoTask); + systemStatus.AppendLine(gpuVideoInfo); systemStatus.AppendLine(); systemStatus.AppendLine(); @@ -135,10 +142,10 @@ public async Task GetSystemStatus() var response = new { - CpuUsage = await cpuUsageTask, - SystemMemUsage = await SystemInfo.GetSystemMemoryUsage(), - GpuUsage = await gpuUsageTask, - GpuMemUsage = await SystemInfo.GetGpuMemoryUsage(), + CpuUsage = cpuUsage, + SystemMemUsage = systemMemUsage, + GpuUsage = gpuUsage, + GpuMemUsage = gpuMemUsage, ServerStatus = systemStatus.ToString() }; @@ -175,7 +182,7 @@ public ObjectResult GetPaths([FromServices] IWebHostEnvironment env) [ProducesResponseType(StatusCodes.Status400BadRequest)] public async Task GetUpdateAvailable() { - VersionInfo? latest = await _versionService.GetLatestVersion(); + VersionInfo? latest = await _versionService.GetLatestVersion().ConfigureAwait(false); if (latest is null) { return new VersionUpdateResponse @@ -230,12 +237,24 @@ public ResponseBase ListAnalysisStatus() foreach (ProcessStatus process in _moduleProcessService.ListProcessStatuses()) { - if (!string.IsNullOrEmpty(process.ModuleId)) + ModuleConfig? module = string.IsNullOrEmpty(process.ModuleId) ? null + : _moduleCollection.GetModule(process.ModuleId); + + if (module is not null) { - ModuleConfig? module = _moduleCollection.GetModule(process.ModuleId); - process.StartupSummary = module?.SettingsSummary ?? string.Empty; + process.StartupSummary = module.SettingsSummary ?? string.Empty; if (string.IsNullOrEmpty(process.StartupSummary)) + { Console.WriteLine($"Unable to find module for {process.ModuleId}"); + } + else + { + // Expanding out the macros causes the display to be too wide + // process.StartupSummary = _moduleSettings.ExpandOption(process.StartupSummary, + // module.ModulePath); + string appRoot = _serverOptions.ApplicationRootPath!; + process.StartupSummary = process.StartupSummary.Replace(appRoot, "<root>"); + } } } diff --git a/src/API/Server/FrontEnd/Frontend.csproj b/src/API/Server/FrontEnd/Frontend.csproj index 6bda01ec..a9ede97d 100644 --- a/src/API/Server/FrontEnd/Frontend.csproj +++ b/src/API/Server/FrontEnd/Frontend.csproj @@ -7,12 +7,20 @@ https://learn.microsoft.com/en-us/visualstudio/msbuild/property-functions?view=v - + API Server + CodeProject.AI Server + A Service hosting the CodeProject.AI WebAPI for face detection and recognition, object detection, and scene classification, and other AI operations. + CodeProject + CodeProject + 2.1.12 + + + + CodeProject.AI.API.Server.Frontend CodeProject.AI.Server - 2.1.0 disable enable 14515168-17dd-49db-9023-0749bb408a37 @@ -21,27 +29,22 @@ https://learn.microsoft.com/en-us/visualstudio/msbuild/property-functions?view=v AnyCPU net7.0 false - - - - - Linux - ..\..\..\.. - codeproject/ai-server + false + true - CodeProject.AI Server - CodeProject - CodeProject - true - false SSPL-1.0 - A Service hosting the CodeProject.AI WebAPI for face detection and recognition, object detection, and scene classification, and other AI operations. https://www.codeproject.com/ai codeproject125x125.png - net7.0 + + + + + Linux + ..\..\..\.. + codeproject/ai-server diff --git a/src/API/Server/FrontEnd/Logging/ServerLogger.cs b/src/API/Server/FrontEnd/Logging/ServerLogger.cs index 140b1525..631e7cf5 100644 --- a/src/API/Server/FrontEnd/Logging/ServerLogger.cs +++ b/src/API/Server/FrontEnd/Logging/ServerLogger.cs @@ -122,18 +122,31 @@ public void Log(LogLevel logLevel, EventId eventId, TState state, string message = formatter(state, exception); string label = string.Empty; - // Trim the category down a little - if (!string.IsNullOrEmpty(_categoryName)) + // We could create a dictionary of search/replace/new log level but then we run into + // issues such as "contains X AND contains Y" so just hardcode it here. + + // This is more or less expected as we test for ONNXruntime. It's info, not a crash + if (message.Contains("LoadLibrary failed with error 126") && + message.Contains("onnxruntime_providers_cuda.dll")) + { + message = "Attempted to load ONNX runtime CUDA provider. No luck, moving on..."; + logLevel = LogLevel.Information; + } + // Annoying + else if (message.Contains("Failed to read environment variable [DOTNET_ROOT]")) + { + logLevel = LogLevel.Debug; + } + // Pointless + else if (message.Contains("Microsoft.Hosting.Lifetime[0]")) + { + return; + } + // ONNX/Tensorflow output is WAY too verbose for an error + else if (message.Contains("I tensorflow/cc/saved_model/reader.cc:") || + message.Contains("I tensorflow/cc/saved_model/loader.cc:")) { - /* - var parts = _categoryName.Split('.', StringSplitOptions.RemoveEmptyEntries); - category = parts[^1]; - if (parts.Any(p => p == "CodeProject")) - category = "CodeProject." + category; - */ - - // if (_categoryName.StartsWithIgnoreCase("CodeProject.")) - // category = "Server"; + logLevel = LogLevel.Information; } // We're using the .NET logger which means we don't have a huge amount of control diff --git a/src/API/Server/FrontEnd/Program.cs b/src/API/Server/FrontEnd/Program.cs index 2f1a32b6..951ea955 100644 --- a/src/API/Server/FrontEnd/Program.cs +++ b/src/API/Server/FrontEnd/Program.cs @@ -8,9 +8,11 @@ using System.Net.Sockets; using System.Reflection; using System.Runtime.InteropServices; +using System.Threading; using System.Threading.Tasks; using CodeProject.AI.API.Common; +using CodeProject.AI.API.Server.Backend; using CodeProject.AI.SDK.Common; using Microsoft.AspNetCore.Hosting; @@ -26,9 +28,13 @@ namespace CodeProject.AI.API.Server.Frontend /// public class Program { + const int defaultPort = 32168; + const int legacyPort = 5000; + const int legacyPortOsx = 5500; + static private ILogger? _logger = null; - static int _port = 32168; + static int _port = defaultPort; // static int _sPort = 5001; - eventually for SSL /// @@ -37,137 +43,147 @@ public class Program /// The command line args. public static async Task Main(string[] args) { - // TODO: Pull these from the correct location - const string company = "CodeProject"; - const string product = "AI"; - - await SystemInfo.InitializeAsync(); + const string productCategory = "AI"; - // lower cased as Linux has case sensitive file names - string os = SystemInfo.OperatingSystem.ToLower(); - string architecture = SystemInfo.Architecture.ToLower(); - string? runtimeEnv = SystemInfo.RuntimeEnvironment == SDK.Common.RuntimeEnvironment.Development || - SystemInfo.IsDevelopmentCode ? "development" : string.Empty; + bool isWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows); var assembly = Assembly.GetExecutingAssembly(); - var assemblyName = (assembly.GetName().Name ?? string.Empty) - + (os == "windows" ? ".exe" : ".dll"); - var serviceName = assembly.GetCustomAttribute()?.Product + var assemblyName = (assembly.GetName().Name ?? string.Empty) + (isWindows? ".exe" : ".dll"); + var companyName = assembly.GetCustomAttribute()?.Company + ?? "CodeProject"; + var productName = assembly.GetCustomAttribute()?.Product ?? assemblyName.Replace(".", " "); + + var serviceName = productName; var servicePath = Path.Combine(AppContext.BaseDirectory, assemblyName); var serviceDescription = assembly.GetCustomAttribute()?.Description - ?? string.Empty; + ?? string.Empty; - if (args.Length == 1) + // Prevent this app from starting more that one instance + using (var mutex = new Mutex(false, serviceName)) { - if (args[0].EqualsIgnoreCase("/Install")) + if (!mutex.WaitOne(0)) { - WindowsServiceInstaller.Install(servicePath, serviceName, serviceDescription); + Console.WriteLine("This application is already running."); return; } - else if (args[0].EqualsIgnoreCase("/Uninstall")) + + await SystemInfo.InitializeAsync().ConfigureAwait(false); + + // lower cased as Linux has case sensitive file names + string os = SystemInfo.OperatingSystem.ToLower(); + string architecture = SystemInfo.Architecture.ToLower(); + string? runtimeEnv = SystemInfo.RuntimeEnvironment == SDK.Common.RuntimeEnvironment.Development || + SystemInfo.IsDevelopmentCode ? "development" : string.Empty; + + + if (args.Length == 1) { - WindowsServiceInstaller.Uninstall(serviceName); - KillOrphanedProcesses(runtimeEnv); - return; + if (args[0].EqualsIgnoreCase("/Install")) + { + WindowsServiceInstaller.Install(servicePath, serviceName, serviceDescription); + return; + } + else if (args[0].EqualsIgnoreCase("/Uninstall")) + { + WindowsServiceInstaller.Uninstall(serviceName); + KillOrphanedProcesses(runtimeEnv); + return; + } + else if (args[0].EqualsIgnoreCase("/Start")) + { + WindowsServiceInstaller.Start(serviceName); + return; + } + else if (args[0].EqualsIgnoreCase("/Stop")) + { + WindowsServiceInstaller.Stop(serviceName); + KillOrphanedProcesses(runtimeEnv); + return; + } } - else if (args[0].EqualsIgnoreCase("/Start")) + + // make sure any processes that didn't get killed on the Service shutdown get killed now. + KillOrphanedProcesses(runtimeEnv); + + // GetProcessStatus a directory for the given platform that allows modules to store persisted data + string programDataDir = Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData); + string applicationDataDir = $"{programDataDir}\\{companyName}\\{productCategory}".Replace('\\', Path.DirectorySeparatorChar); + + // .NET's suggestion for macOS and Linux aren't great. Let's do something different. + if (SystemInfo.IsMacOS) { - WindowsServiceInstaller.Start(serviceName); - return; + applicationDataDir = $"/Library/Application Support/{companyName}/{productCategory}"; } - else if (args[0].EqualsIgnoreCase("/Stop")) + else if (SystemInfo.IsLinux) { - WindowsServiceInstaller.Stop(serviceName); - KillOrphanedProcesses(runtimeEnv); - return; + applicationDataDir = $"/etc/{companyName.ToLower()}/{productCategory.ToLower()}"; } - } - - // make sure any processes that didn't get killed on the Service shutdown get killed now. - KillOrphanedProcesses(runtimeEnv); - - // GetProcessStatus a directory for the given platform that allows momdules to store persisted data - string programDataDir = Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData); - string applicationDataDir = $"{programDataDir}\\{company}\\{product}".Replace('\\', Path.DirectorySeparatorChar); - - // .NET's suggestion for macOS and Linux aren't great. Let's do something different. - if (os == "macos") - { - applicationDataDir = $"/Library/Application Support/{company}/{product}"; - } - else if (os == "linux") - { - applicationDataDir = $"/etc/{company.ToLower()}/{product.ToLower()}"; - } - - // Store this dir in the config settings so we can get to it later. - var inMemoryConfigData = new Dictionary { - { "ApplicationDataDir", applicationDataDir } - }; - bool reloadConfigOnChange = SystemInfo.ExecutionEnvironment != ExecutionEnvironment.Docker; + // Store this dir in the config settings so we can get to it later. + var inMemoryConfigData = new Dictionary { + { "ApplicationDataDir", applicationDataDir } + }; - // Setup our custom Configuration Loader pipeline and build the configuration. - IHost? host = CreateHostBuilder(args) - .ConfigureAppConfiguration(SetupConfigurationLoaders(args, os, architecture, - runtimeEnv, applicationDataDir, - inMemoryConfigData, - reloadConfigOnChange)) - .Build() - ; + bool reloadConfigOnChange = !SystemInfo.IsDocker; - _logger = host.Services.GetService>(); + // Setup our custom Configuration Loader pipeline and build the configuration. + IHost? host = CreateHostBuilder(args) + .ConfigureAppConfiguration(SetupConfigurationLoaders(args, os, architecture, + runtimeEnv, applicationDataDir, + inMemoryConfigData, + reloadConfigOnChange)) + .Build() + ; - if (_logger != null) - { - string systemInfo = SystemInfo.GetSystemInfo(); - foreach (string line in systemInfo.Split('\n')) - _logger.LogInformation("** " + line.TrimEnd()); + _logger = host.Services.GetService>(); - _logger.LogInformation($"** App DataDir: {applicationDataDir}"); + if (_logger != null) + { + string systemInfo = SystemInfo.GetSystemInfo(); + foreach (string line in systemInfo.Split('\n')) + _logger.LogInformation("** " + line.TrimEnd()); - string info = await SystemInfo.GetGpuUsageInfo(); - foreach (string line in info.Split('\n')) - _logger.LogInformation(line.TrimEnd()); + _logger.LogInformation($"** App DataDir: {applicationDataDir}"); - info = await SystemInfo.GetVideoAdapterInfo(); - foreach (string line in info.Split('\n')) - _logger.LogInformation(line.TrimEnd()); - } + string info = await SystemInfo.GetVideoAdapterInfoAsync().ConfigureAwait(false); + foreach (string line in info.Split('\n')) + _logger.LogInformation(line.TrimEnd()); + } - Task? hostTask; - hostTask = host.RunAsync(); -#if DEBUG - try - { - OpenBrowser($"http://localhost:{_port}/"); - } - catch (Exception ex) - { - _logger?.LogError(ex, "Unable to open Dashboard on startup."); - } -#endif - try - { - await hostTask; + Task? hostTask; + hostTask = host.RunAsync(); + #if DEBUG + try + { + OpenBrowser($"http://localhost:{_port}/"); + } + catch (Exception ex) + { + _logger?.LogError(ex, "Unable to open Dashboard on startup."); + } + #endif + try + { + await hostTask.ConfigureAwait(false); - Console.WriteLine("Shutting down"); - } - catch (Exception ex) - { - // TODO: Host is gone, so no logger ?? - Console.WriteLine($"\n\nUnable to start the server: {ex.Message}.\n" + - "Check that another instance is not running on the same port."); - Console.Write("Press Enter to close."); - Console.ReadLine(); + Console.WriteLine("Shutting down"); + } + catch (Exception ex) + { + // TODO: Host is gone, so no logger ?? + Console.WriteLine($"\n\nUnable to start the server: {ex.Message}.\n" + + "Check that another instance is not running on the same port."); + Console.Write("Press Enter to close."); + Console.ReadLine(); + } } } private static void KillOrphanedProcesses(string? runtimeEnv) { - if (SystemInfo.OperatingSystem.EqualsIgnoreCase("Windows")) + if (SystemInfo.IsWindows) { try { @@ -216,7 +232,7 @@ private static Action SetupConfigurat // RemoveProcessStatus the default sources and rebuild it. config.Sources.Clear(); - // add in the default appsetting.json file and its variants + // add in the default appsettings.json file and its variants // In order // appsettings.json // appsettings.development.json @@ -259,7 +275,7 @@ private static Action SetupConfigurat config.AddJsonFile(settingsFile, optional: true, reloadOnChange: reloadConfigOnChange); } - if (SystemInfo.ExecutionEnvironment == ExecutionEnvironment.Docker) + if (SystemInfo.IsDocker) { settingsFile = Path.Combine(baseDir, $"appsettings.docker.json"); if (File.Exists(settingsFile)) @@ -284,13 +300,17 @@ private static Action SetupConfigurat config.AddJsonFile(Path.Combine(baseDir, VersionConfig.VersionCfgFilename), reloadOnChange: reloadConfigOnChange, optional: true); + // Load the triggers.json file to load the triggers + config.AddJsonFile(Path.Combine(baseDir, TriggersConfig.TriggersCfgFilename), + reloadOnChange: reloadConfigOnChange, optional: true); + // Load the modulesettings.json files to get analysis module settings LoadModulesConfiguration(config, runtimeEnv); // Load the last saved config values as set by the user LoadUserOverrideConfiguration(config, applicationDataDir, runtimeEnv, reloadConfigOnChange); - // Load Envinronmnet Variables into Configuration + // Load Environment Variables into Configuration config.AddEnvironmentVariables(); // Add command line back in to force it to have full override powers. @@ -316,7 +336,7 @@ private static Action SetupConfigurat // things. To be done at a later date. private static void LoadModulesConfiguration(IConfigurationBuilder config, string? runtimeEnv) { - bool reloadOnChange = SystemInfo.ExecutionEnvironment != ExecutionEnvironment.Docker; + bool reloadOnChange = !SystemInfo.IsDocker; IConfiguration configuration = config.Build(); (var modulesPath, var preInstalledModulesPath) = EnsureDirectories(configuration, runtimeEnv); @@ -534,44 +554,49 @@ public static IHostBuilder CreateHostBuilder(string[] args) _port = GetServerPort(hostbuilderContext); bool foundPort = false; - if (IsPortAvailable(_port)) + // Listen on the port that the appsettings defines (we force the + // use of the default port. IsPortAvailable can sometimes be too + // conservative) + if (_port == defaultPort || IsPortAvailable(_port)) { serverOptions.Listen(IPAddress.IPv6Any, _port); foundPort = true; } - // We always want this port. - if (_port != 32168 && IsPortAvailable(32168)) + // If we aren't listening to the default port (32168), then listen + // to it! (and don't bother asking if it's available. Just try it.) + if (_port != defaultPort /* && IsPortAvailable(defaultPort)*/) { if (!foundPort) - _port = 32168; + _port = defaultPort; - serverOptions.Listen(IPAddress.IPv6Any, 32168); + serverOptions.Listen(IPAddress.IPv6Any, defaultPort); foundPort = true; } if (!disableLegacyPort) { - // Add some legacy ports + // Add some legacy ports. First macOS (port 5500) if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) { - if (_port != 5500 && IsPortAvailable(5500)) + if (_port != legacyPortOsx && IsPortAvailable(legacyPortOsx)) { if (!foundPort) - _port = 5500; + _port = legacyPortOsx; - serverOptions.Listen(IPAddress.IPv6Any, 5500); + serverOptions.Listen(IPAddress.IPv6Any, legacyPortOsx); foundPort = true; } } + // Then everything else (port 5000) else { - if (_port != 5000 && IsPortAvailable(5000)) + if (_port != legacyPort && IsPortAvailable(legacyPort)) { if (!foundPort) - _port = 5000; + _port = legacyPort; - serverOptions.Listen(IPAddress.IPv6Any, 5000); + serverOptions.Listen(IPAddress.IPv6Any, legacyPort); foundPort = true; } } @@ -614,7 +639,7 @@ public static IHostBuilder CreateHostBuilder(string[] args) } /// - /// Checks as to whether a given port on this machine is avaialble for use. + /// Checks as to whether a given port on this machine is available for use. /// /// The port number /// true if the port is available; false otherwise diff --git a/src/API/Server/FrontEnd/Properties/launchSettings.json b/src/API/Server/FrontEnd/Properties/launchSettings.json index 3017f1c8..4aa36d3c 100644 --- a/src/API/Server/FrontEnd/Properties/launchSettings.json +++ b/src/API/Server/FrontEnd/Properties/launchSettings.json @@ -4,7 +4,8 @@ "commandName": "Project", "launchUrl": "http://localhost:32168", "environmentVariables": { - "ASPNETCORE_ENVIRONMENT": "Development" + "ASPNETCORE_ENVIRONMENT": "Development", + "YOLOv5_VERBOSE": "false" }, "applicationUrl": "http://localhost:32168", "dotnetRunMessages": true diff --git a/src/API/Server/FrontEnd/Startup.cs b/src/API/Server/FrontEnd/Startup.cs index c2edc887..c6c6f4a8 100644 --- a/src/API/Server/FrontEnd/Startup.cs +++ b/src/API/Server/FrontEnd/Startup.cs @@ -106,6 +106,8 @@ public void ConfigureServices(IServiceCollection services) services.AddVersionProcessRunner(Configuration); + services.Configure(Configuration.GetSection(TriggersConfig.TriggersCfgSection)); + // Configure the shutdown timeout to 60s instead of 2 services.Configure( opts => opts.ShutdownTimeout = TimeSpan.FromSeconds(60)); @@ -115,7 +117,7 @@ public void ConfigureServices(IServiceCollection services) /// Configures the application pipeline. /// /// The Application Builder. - /// The Hosting Evironment. + /// The Hosting Environment. /// The logger /// The installation instance config values. /// The Version Configuration @@ -150,6 +152,18 @@ public void Configure(IApplicationBuilder app, app.UseDefaultFiles(); app.UseStaticFiles(); + /* Should we choose to provide a folder in which we can dump items such as items + generated by a module, we could do: + app.UseStaticFiles(new StaticFileOptions + { + FileProvider = new Microsoft.Extensions.FileProviders.PhysicalFileProvider( + Path.Combine(builder.Environment.ContentRootPath, "/modules/moduleId/models")); + }); + + that is, if you can decipher + https://learn.microsoft.com/en-us/aspnet/core/fundamentals/static-files?view=aspnetcore-7.0#serve-files-from-multiple-locations + */ + app.UseRouting(); app.UseCors("allowAllOrigins"); @@ -170,7 +184,7 @@ private void InitializeInstallConfig() _installConfig.Id = Guid.NewGuid(); } - // if this is a new install or replacing a pre V2.1.0 version + // if this is a new install or replacing a pre V2.1 version if (string.IsNullOrEmpty(_installConfig.Version)) AiModuleInstaller.QueueInitialModulesInstallation(); @@ -178,11 +192,9 @@ private void InitializeInstallConfig() try { - var configValues = new { install = _installConfig }; - + var configValues = new { install = _installConfig }; string appDataDir = Configuration["ApplicationDataDir"] ?? throw new ArgumentNullException("ApplicationDataDir is not defined in configuration"); - string configFilePath = Path.Combine(appDataDir, InstallConfig.InstallCfgFilename); if (!Directory.Exists(appDataDir)) diff --git a/src/API/Server/FrontEnd/Utilities/PackageDownloader.cs b/src/API/Server/FrontEnd/Utilities/PackageDownloader.cs index 0c5bcfdd..2c7ddeb0 100644 --- a/src/API/Server/FrontEnd/Utilities/PackageDownloader.cs +++ b/src/API/Server/FrontEnd/Utilities/PackageDownloader.cs @@ -51,7 +51,7 @@ public async Task DownloadTextFileAsync(string uri) { // remove file:// and then convert /dir -> C:\dir or c:\dir -> /dir as needed uri = uri.Substring("file://".Length); - if (SystemInfo.OperatingSystem.EqualsIgnoreCase("Windows")) + if (SystemInfo.IsWindows) { if (uri.StartsWith("/")) uri = "C:" + uri; @@ -60,13 +60,13 @@ public async Task DownloadTextFileAsync(string uri) uri = uri.Substring("c:".Length); uri = Text.FixSlashes(uri); - return await File.ReadAllTextAsync(uri); + return await File.ReadAllTextAsync(uri).ConfigureAwait(false); } - if (!Uri.TryCreate(uri, UriKind.Absolute, out Uri _)) + if (!Uri.TryCreate(uri, UriKind.Absolute, out _)) throw new InvalidOperationException($"{nameof(uri)} is not a valid URI."); - return await _httpClient.GetStringAsync(uri); + return await _httpClient.GetStringAsync(uri).ConfigureAwait(false); } /// @@ -86,7 +86,7 @@ public async Task DownloadTextFileAsync(string uri) throw new ArgumentOutOfRangeException(error); } - if (!Uri.TryCreate(uri, UriKind.Absolute, out Uri _)) + if (!Uri.TryCreate(uri, UriKind.Absolute, out _)) { error = $"{nameof(uri)} is not a valid URI."; throw new InvalidOperationException(error); @@ -94,7 +94,7 @@ public async Task DownloadTextFileAsync(string uri) try { - byte[] fileBytes = await _httpClient.GetByteArrayAsync(uri); + byte[] fileBytes = await _httpClient.GetByteArrayAsync(uri).ConfigureAwait(false); if (fileBytes.Length > 0) { File.WriteAllBytes(outputPath, fileBytes); diff --git a/src/API/Server/FrontEnd/Version/ServerVersionProcessRunner.cs b/src/API/Server/FrontEnd/Version/ServerVersionProcessRunner.cs index aa715728..7baaef8c 100644 --- a/src/API/Server/FrontEnd/Version/ServerVersionProcessRunner.cs +++ b/src/API/Server/FrontEnd/Version/ServerVersionProcessRunner.cs @@ -1,5 +1,4 @@ using System; -using System.Configuration; using System.Threading; using System.Threading.Tasks; @@ -26,7 +25,7 @@ public class ServerVersionProcessRunner : BackgroundService /// The Queue management service. /// The logger public ServerVersionProcessRunner(ServerVersionService versionService, - ILogger logger) + ILogger logger) { _versionService = versionService; _logger = logger; @@ -36,17 +35,17 @@ public ServerVersionProcessRunner(ServerVersionService versionService, protected override async Task ExecuteAsync(CancellationToken stoppingToken) { // Let's make sure the front end is up and running before we start the version process - await Task.Delay(TimeSpan.FromSeconds(5), stoppingToken); + await Task.Delay(TimeSpan.FromSeconds(5), stoppingToken).ConfigureAwait(false); - CheckCurrentVersion(); + await CheckCurrentVersion().ConfigureAwait(false); } - private async void CheckCurrentVersion() + private async Task CheckCurrentVersion() { // Grab the latest version info if (_versionService != null) { - VersionInfo? latest = await _versionService.GetLatestVersion(); + VersionInfo? latest = await _versionService.GetLatestVersion().ConfigureAwait(false); if (latest != null && _versionService.VersionConfig?.VersionInfo != null) { _logger.LogDebug($"Current Version is {_versionService.VersionConfig.VersionInfo.Version}"); @@ -55,14 +54,14 @@ private async void CheckCurrentVersion() if (compare < 0) { if (latest.SecurityUpdate ?? false) - _logger.LogInformation($" *** A SECURITY UPDATE {latest.Version} is available ** "); + _logger.LogInformation($"*** A SECURITY UPDATE {latest.Version} is available"); else - _logger.LogInformation($" *** A new version {latest.Version} is available ** "); + _logger.LogInformation($"*** A new version {latest.Version} is available"); } else if (compare == 0) _logger.LogInformation("Server: This is the latest version"); else - _logger.LogInformation("Server: This is a new, unreleased version"); + _logger.LogInformation("*** Server: This is a new, unreleased version"); } } } diff --git a/src/API/Server/FrontEnd/Version/ServerVersionService.cs b/src/API/Server/FrontEnd/Version/ServerVersionService.cs index 8276bafd..a6f0d73d 100644 --- a/src/API/Server/FrontEnd/Version/ServerVersionService.cs +++ b/src/API/Server/FrontEnd/Version/ServerVersionService.cs @@ -22,7 +22,7 @@ public class ServerVersionService private readonly ServerOptions _serverOptions; /// - /// Initializs a new instance of the Startup class. + /// Initializes a new instance of the Startup class. /// /// The version Options instance. /// The install Options instance. @@ -85,6 +85,7 @@ public ServerVersionService(IOptions versionOptions, // is purely things like OS / GPU. string currentVersion = VersionConfig.VersionInfo?.Version ?? string.Empty; _client.DefaultRequestHeaders.Add("X-CPAI-Server-Version", currentVersion); + var sysProperties = SystemInfo.Summary; var systemInfoJson = JsonSerializer.Serialize(sysProperties); _client.DefaultRequestHeaders.Add("X-CPAI-Server-SystemInfo", systemInfoJson); @@ -95,7 +96,8 @@ public ServerVersionService(IOptions versionOptions, try { - string data = await _client.GetStringAsync(_serverOptions.ServerVersionCheckUrl); + string data = await _client.GetStringAsync(_serverOptions.ServerVersionCheckUrl) + .ConfigureAwait(false); if (!string.IsNullOrWhiteSpace(data)) { var options = new JsonSerializerOptions diff --git a/src/API/Server/FrontEnd/appsettings.Development.json b/src/API/Server/FrontEnd/appsettings.Development.json index 842b57ca..527f524e 100644 --- a/src/API/Server/FrontEnd/appsettings.Development.json +++ b/src/API/Server/FrontEnd/appsettings.Development.json @@ -9,7 +9,7 @@ "ModuleOptions": { // Will we be launching the backend analysis modules when the server starts? (handy to disable // for debugging the modules separately) - "LaunchModules": true, + "LaunchModules": false, // This needs to be set to allow module uploads and installs via the API "InstallPassword": "demo-password", @@ -17,7 +17,7 @@ // Location of the Json list of modules that can be downloaded // For debugging: choose either local host or local file system - "ModuleListUrl": "file://modules.json", // From local json file + "ModuleListUrl": "file://modules.json", // From local json file // "ModuleListUrl": "http://localhost:9001/ai/modules/list", // For a Local CodeProject.com install // For testing module downloads without fear of your existing modules getting nuked diff --git a/src/API/Server/FrontEnd/appsettings.json b/src/API/Server/FrontEnd/appsettings.json index 5ca047eb..458c5f6f 100644 --- a/src/API/Server/FrontEnd/appsettings.json +++ b/src/API/Server/FrontEnd/appsettings.json @@ -73,6 +73,10 @@ // This needs to be set to allow module uploads and installs via the API "InstallPassword": null, + // The time allowed for a module to be installed. 20 mins should be plenty, but for a Raspberry + // or Orange Pi, or slow internet, it will need longer. + "ModuleInstallTimeout": "00:20:00", + // Location of the Json list of modules that can be downloaded "ModuleListUrl": "https://www.codeproject.com/ai/modules/list", diff --git a/src/API/Server/FrontEnd/modules.json b/src/API/Server/FrontEnd/modules.json index 060692f6..45dfc5b8 100644 --- a/src/API/Server/FrontEnd/modules.json +++ b/src/API/Server/FrontEnd/modules.json @@ -2,8 +2,8 @@ { "ModuleId": "ALPR", "Name": "License Plate Reader", - "Version": "2.2", - "Description": "Detects and readers licence plates using YOLO object detection and the PaddleOCR toolkit", + "Version": "2.5", + "Description": "Detects and readers single-line and multi-line licence plates using YOLO object detection and the PaddleOCR toolkit", "Platforms": [ "windows", "linux", @@ -11,14 +11,16 @@ "macos-arm64" ], "Runtime": "python37", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], - "ReleaseDate": "2022-11-01" + "ReleaseDate": "2022-11-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "2.1", @@ -26,7 +28,9 @@ "2.0.9", "2.0.9" ], - "ReleaseDate": "2022-12-01" + "ReleaseDate": "2022-12-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "2.2", @@ -34,7 +38,39 @@ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-03-20", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "2.3", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-04-20", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" + }, + { + "ModuleVersion": "2.4", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-05-10", + "ReleaseNotes": "PaddlePaddle install more reliable", + "Importance": "Minor" + }, + { + "ModuleVersion": "2.5", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-06-04", + "ReleaseNotes": "Updated PaddlePaddle", + "Importance": null } ], "License": "SSPL", @@ -44,7 +80,7 @@ { "ModuleId": "BackgroundRemover", "Name": "Background Remover", - "Version": "1.2", + "Version": "1.4", "Description": "Automatically removes the background from a picture", "Platforms": [ "windows", @@ -52,14 +88,16 @@ "macos-arm64" ], "Runtime": "python39", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], - "ReleaseDate": "2022-11-01" + "ReleaseDate": "2022-11-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.1", @@ -67,15 +105,39 @@ "1.6.9", "2.0.8" ], - "ReleaseDate": "2022-11-01" + "ReleaseDate": "2022-11-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1.0", + "2.1.6" + ], + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null + }, + { + "ModuleVersion": "1.3", + "ServerVersionRange": [ + "2.1.0", + "2.1.6" + ], + "ReleaseDate": "2023-04-20", + "ReleaseNotes": "Install improved for GPU enabled systems", + "Importance": null + }, + { + "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-08-05", + "ReleaseNotes": "Bugs in error reporting corrected", + "Importance": "Minor" } ], "License": "SSPL", @@ -85,7 +147,7 @@ { "ModuleId": "Cartooniser", "Name": "Cartooniser", - "Version": "1.0", + "Version": "1.1", "Description": "Convert a photo into an anime style cartoon", "Platforms": [ "windows", @@ -95,14 +157,26 @@ "macos-arm64" ], "Runtime": "python39", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-03-28" + "ReleaseDate": "2023-03-28", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.1", + "ServerVersionRange": [ + "2.1.7", + "" + ], + "ReleaseDate": "2023-04-29", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "MIT", @@ -112,7 +186,7 @@ { "ModuleId": "FaceProcessing", "Name": "Face Processing", - "Version": "1.2", + "Version": "1.5", "Description": "A number of Face image APIs including detect, recognize, and compare.", "Platforms": [ "windows", @@ -122,14 +196,16 @@ "macos-arm64" ], "Runtime": "python37", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], - "ReleaseDate": "2022-03-01" + "ReleaseDate": "2022-03-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.2", @@ -137,17 +213,108 @@ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-03-20", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.3", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-05-17", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.4", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-08-05", + "ReleaseNotes": "Bugs in error reporting corrected", + "Importance": "Minor" + }, + { + "ModuleVersion": "1.5", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-08-12", + "ReleaseNotes": "PyTorch version downgrade", + "Importance": null } ], "License": "GPL-3.0", "LicenseUrl": "https://opensource.org/licenses/GPL-3.0", "Downloads": 0 }, + { + "ModuleId": "ObjectDetectionCoral", + "Name": "ObjectDetection (Coral)", + "Version": "1.3", + "Description": "The object detection module uses the Coral TPU to locate and classify the objects the models have been trained on.", + "Platforms": [ + "windows", + "linux", + "linux-arm64", + "macos", + "macos-arm64" + ], + "Runtime": "python37", + "ModuleReleases": [ + { + "ModuleVersion": "1.0", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-07-11", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.1", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-07-12", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-07-12", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.3", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-08-11", + "ReleaseNotes": "installer corrections, macOS/Ubuntu support improved", + "Importance": null + } + ], + "License": "Apache-2.0", + "LicenseUrl": "https://opensource.org/licenses/Apache-2.0", + "Downloads": 0 + }, { "ModuleId": "ObjectDetectionNet", "Name": "Object Detection (YOLOv5 .NET)", - "Version": "1.2", + "Version": "1.5", "Description": "Provides Object Detection using YOLOv5 ONNX models with DirectML. This module is best for those on Windows and Linux without CUDA enabled GPUs", "Platforms": [ "windows", @@ -156,31 +323,67 @@ "macos", "macos-arm64" ], - "Runtime": "dotnet", - "VersionCompatibililty": [ + "Runtime": "execute", + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], - "ReleaseDate": "2022-06-01" + "ReleaseDate": "2022-06-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.1", "ServerVersionRange": [ - "2.1", - "2.1" + "2.1.0", + "2.1.0" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null }, { "ModuleVersion": "1.2", "ServerVersionRange": [ - "2.1", + "2.1.0", + "2.1.6" + ], + "ReleaseDate": "2023-04-09", + "ReleaseNotes": "Corrected installer issues", + "Importance": null + }, + { + "ModuleVersion": "1.3", + "ServerVersionRange": [ + "2.1.0", + "2.1.6" + ], + "ReleaseDate": "2023-04-20", + "ReleaseNotes": "Corrected module launch command", + "Importance": null + }, + { + "ModuleVersion": "1.4", + "ServerVersionRange": [ + "2.1.8", + "2.1.8" + ], + "ReleaseDate": "2023-04-20", + "ReleaseNotes": "Minor changes in module setup", + "Importance": "Minor" + }, + { + "ModuleVersion": "1.5", + "ServerVersionRange": [ + "2.1.9", "" ], - "ReleaseDate": "2023-04-09" + "ReleaseDate": "2023-05-04", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "MIT", @@ -190,8 +393,8 @@ { "ModuleId": "ObjectDetectionTFLite", "Name": "ObjectDetection (TF-Lite)", - "Version": "1.2", - "Description": "The object detection module Tensorflow Lite to locate and classify the objects the models have been trained on.", + "Version": "1.4", + "Description": "The object detection module uses Tensorflow Lite to locate and classify the objects the models have been trained on.", "Platforms": [ "windows", "linux", @@ -200,30 +403,56 @@ "macos-arm64" ], "Runtime": "python39", - "VersionCompatibililty": [ + "ModuleReleases": [ { - "ModuleVersion": "1.2", + "ModuleVersion": "1.0", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-04-10" + "ReleaseDate": "2023-03-20", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", - "2.1" + "" ], - "ReleaseDate": "2023-04-03" + "ReleaseDate": "2023-04-03", + "ReleaseNotes": null, + "Importance": null }, { - "ModuleVersion": "1.0", + "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-04-10", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.3", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-04-10", + "ReleaseNotes": "Updated Windows installer", + "Importance": null + }, + { + "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", - "2.1" + "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-05-17", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "Apache-2.0", @@ -233,38 +462,97 @@ { "ModuleId": "ObjectDetectionYolo", "Name": "Object Detection (YOLOv5 6.2)", - "Version": "1.2", - "Description": "Provides Object Detection using YOLOv5 v6.2 library with support for CPUs and CUDA enabled GPUs.", + "Version": "1.4", + "Description": "Provides Object Detection using YOLOv5 6.2 targeting CUDA 11.7/Torch 1.13 for newer GPUs.", "Platforms": [ "all" ], "Runtime": "python37", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], - "ReleaseDate": "2022-03-01" + "ReleaseDate": "2022-03-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1.0", + "2.1.6" + ], + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null + }, + { + "ModuleVersion": "1.3", + "ServerVersionRange": [ + "2.1.7", + "" + ], + "ReleaseDate": "2023-04-29", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" + }, + { + "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-08-12", + "ReleaseNotes": "PyTorch version downgrade", + "Importance": null } ], "License": "GPL-3.0", "LicenseUrl": "https://opensource.org/licenses/GPL-3.0", "Downloads": 0 }, + { + "ModuleId": "ObjectDetectionYoloRKNN", + "Name": "Object Detection (YOLOv5 RKNN)", + "Version": "1.1", + "Description": "Provides Object Detection using YOLOv5 RKNN models. This module only works with Rockchip RK3588/RK3588S NPUs like the Orange Pi 5/5B/5 Plus", + "Platforms": [ + "linux-arm64" + ], + "Runtime": "python39", + "ModuleReleases": [ + { + "ModuleVersion": "1.0", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-08-06", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.1", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-08-06", + "ReleaseNotes": "Corrected installer in docker environment", + "Importance": null + } + ], + "License": "Apache-2.0", + "LicenseUrl": "https://opensource.org/licenses/Apache-2.0", + "Downloads": 0 + }, { "ModuleId": "OCR", "Name": "Optical Character Recognition", - "Version": "1.2", + "Version": "1.4", "Description": "Provides OCR support using the PaddleOCR toolkit", "Platforms": [ "windows", @@ -273,14 +561,16 @@ "macos-arm64" ], "Runtime": "python37", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], - "ReleaseDate": "2022-11-01" + "ReleaseDate": "2022-11-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.2", @@ -288,7 +578,29 @@ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null + }, + { + "ModuleVersion": "1.3", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-05-15", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" + }, + { + "ModuleVersion": "1.4", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-05-10", + "ReleaseNotes": "PaddlePaddle install more reliable", + "Importance": "Minor" } ], "License": "Apache 2.0", @@ -298,20 +610,62 @@ { "ModuleId": "PortraitFilter", "Name": "Portrait Filter", - "Version": "1.1", + "Version": "1.4", "Description": "Provides a depth-of-field (bokeh) effect on images. Great for selfies.", "Platforms": [ "windows" ], "Runtime": "execute", - "VersionCompatibililty": [ + "ModuleReleases": [ + { + "ModuleVersion": "1.0", + "ServerVersionRange": [ + "1.0", + "2.0.8" + ], + "ReleaseDate": "2022-06-01", + "ReleaseNotes": null, + "Importance": null + }, { "ModuleVersion": "1.1", + "ServerVersionRange": [ + "2.1", + "2.1.6" + ], + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null + }, + { + "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1", + "2.1.7" + ], + "ReleaseDate": "2023-04-20", + "ReleaseNotes": "Updated launch command", + "Importance": null + }, + { + "ModuleVersion": "1.3", + "ServerVersionRange": [ + "2.1", + "2.1.8" + ], + "ReleaseDate": "2023-05-03", + "ReleaseNotes": "Minor module initialisation changes", + "Importance": null + }, + { + "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-05-17", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "MIT", @@ -321,7 +675,7 @@ { "ModuleId": "SceneClassifier", "Name": "Scene Classification", - "Version": "1.2", + "Version": "1.3", "Description": "Classifies an image according to one of 365 pre-trained scenes", "Platforms": [ "windows", @@ -331,22 +685,46 @@ "macos-arm64" ], "Runtime": "python37", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], - "ReleaseDate": "2022-03-01" + "ReleaseDate": "2022-03-01", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.1", + "ServerVersionRange": [ + "2.1", + "2.1.6" + ], + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null }, { "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1", + "2.1.8" + ], + "ReleaseDate": "2023-05-03", + "ReleaseNotes": "Minor module initialisation changes", + "Importance": null + }, + { + "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-05-17", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "Apache 2.0", @@ -356,21 +734,53 @@ { "ModuleId": "SentimentAnalysis", "Name": "Sentiment Analysis", - "Version": "1.1", - "Description": "Provides an alaysis of the sentiment of a piece of text. Positive or negative?", + "Version": "1.3", + "Description": "Provides an analysis of the sentiment of a piece of text. Positive or negative?", "Platforms": [ "windows", "macos" ], "Runtime": "execute", - "VersionCompatibililty": [ + "ModuleReleases": [ + { + "ModuleVersion": "1.0", + "ServerVersionRange": [ + "1.0", + "2.0.8" + ], + "ReleaseDate": "2022-06-01", + "ReleaseNotes": null, + "Importance": null + }, { "ModuleVersion": "1.1", + "ServerVersionRange": [ + "2.1", + "2.1.6" + ], + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null + }, + { + "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1", + "2.1.8" + ], + "ReleaseDate": "2023-05-03", + "ReleaseNotes": "Minor module initialisation changes", + "Importance": null + }, + { + "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-05-17", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "CC-BY-4.0", @@ -380,7 +790,7 @@ { "ModuleId": "SuperResolution", "Name": "Super Resolution", - "Version": "1.3", + "Version": "1.5", "Description": "Increases the resolution of an image using AI", "Platforms": [ "windows", @@ -390,14 +800,16 @@ "macos-arm64" ], "Runtime": "python39", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.6.8" ], - "ReleaseDate": "2022-03-01" + "ReleaseDate": "2022-03-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.1", @@ -405,15 +817,19 @@ "2.6.9", "2.0.8" ], - "ReleaseDate": "2022-11-01" + "ReleaseDate": "2022-11-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", - "2.1" + "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null }, { "ModuleVersion": "1.3", @@ -421,7 +837,29 @@ "2.1", "" ], - "ReleaseDate": "2023-04-11" + "ReleaseDate": "2023-04-11", + "ReleaseNotes": "Missing assets restored", + "Importance": null + }, + { + "ModuleVersion": "1.4", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-04-11", + "ReleaseNotes": "Corrected inferenceMs type", + "Importance": null + }, + { + "ModuleVersion": "1.5", + "ServerVersionRange": [ + "2.1", + "" + ], + "ReleaseDate": "2023-05-17", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "Apache 2.0", @@ -431,7 +869,7 @@ { "ModuleId": "TextSummary", "Name": "Text Summary", - "Version": "1.2", + "Version": "1.3", "Description": "Summarizes text content by selecting a number of sentences that are most representitive of the content.", "Platforms": [ "windows", @@ -441,14 +879,16 @@ "macos-arm64" ], "Runtime": "python37", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "1.6.8" ], - "ReleaseDate": "2022-11-01" + "ReleaseDate": "2022-11-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.1", @@ -456,26 +896,75 @@ "1.6.9", "2.0.8" ], - "ReleaseDate": "2022-11-01" + "ReleaseDate": "2022-11-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1", + "2.1.6" + ], + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null + }, + { + "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-05-17", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "No License", "LicenseUrl": "https://github.com/edubey/text-summarizer", "Downloads": 0 }, + { + "ModuleId": "TrainingYoloV5", + "Name": "Training for YoloV5 6.2", + "Version": "1.1", + "Description": "Train custom models for YOLOv5 v6.2 with support for CPUs, CUDA enabled GPUs, and Apple Silicon.", + "Platforms": [ + "all" + ], + "Runtime": "python39", + "ModuleReleases": [ + { + "ModuleVersion": "1.0", + "ServerVersionRange": [ + "2.1.10", + "" + ], + "ReleaseDate": "2022-08-02", + "ReleaseNotes": null, + "Importance": null + }, + { + "ModuleVersion": "1.1", + "ServerVersionRange": [ + "2.1.11", + "" + ], + "ReleaseDate": "2023-08-12", + "ReleaseNotes": "Added \u0027patience\u0027, \u0027workers\u0027 as parameters", + "Importance": null + } + ], + "License": "GPL-3.0", + "LicenseUrl": "https://opensource.org/licenses/GPL-3.0", + "Downloads": 0 + }, { "ModuleId": "YOLOv5-3.1", "Name": "Object Detection (YOLOv5 3.1)", - "Version": "1.2", - "Description": "The object detection module uses YOLO (You Only Look Once) to locate and classify the objects the models have been trained on. At this point there are 80 different types of objects that can be detected.", + "Version": "1.3", + "Description": "Provides Object Detection using YOLOv5 3.1 targeting CUDA 10.2/Torch 1.7 for older GPUs.", "Platforms": [ "windows", "linux", @@ -483,22 +972,36 @@ "macos" ], "Runtime": "python37", - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], - "ReleaseDate": "2022-11-01" + "ReleaseDate": "2022-11-01", + "ReleaseNotes": null, + "Importance": null }, { "ModuleVersion": "1.2", + "ServerVersionRange": [ + "2.1", + "2.1.6" + ], + "ReleaseDate": "2023-03-20", + "ReleaseNotes": "Updated for CodeProject.AI Server 2.1", + "Importance": null + }, + { + "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], - "ReleaseDate": "2023-03-20" + "ReleaseDate": "2023-05-17", + "ReleaseNotes": "Updated module settings", + "Importance": "Minor" } ], "License": "GPL-3.0", diff --git a/src/API/Server/FrontEnd/triggers.json b/src/API/Server/FrontEnd/triggers.json new file mode 100644 index 00000000..c0b1feff --- /dev/null +++ b/src/API/Server/FrontEnd/triggers.json @@ -0,0 +1,23 @@ +{ + "triggersSection": { + "triggers": [ + /* + { + "Queue" : "objectdetection_queue", + "PredictionsCollectionName" : "predictions", + "PropertyName" : "label", + "PropertyValue" : "car", + "PropertyComparison" : "equals", + "Confidence" : 0.5, + "ConfidenceComparison" : "greaterthan", + "PlatformTasks" : { + "Windows" : { "Command": "cmd", "Args": "/c echo Hi Windows. I see a car", "Type": "Command" }, + "Linux" : { "Command": "bash", "Args": "echo Hi Linux. I see a car", "Type": "Command" }, + "LinuxArm64" : { "Command": "bash", "Args": "echo Hi Linux. I see a car", "Type": "Command" }, + "macOS" : { "Command": "zsh", "Args": "echo Hi Linux. I see a car", "Type": "Command" }, + } + } + */ + ] + } +} \ No newline at end of file diff --git a/src/API/Server/FrontEnd/version.json b/src/API/Server/FrontEnd/version.json index 42d496f9..81b7e950 100644 --- a/src/API/Server/FrontEnd/version.json +++ b/src/API/Server/FrontEnd/version.json @@ -3,12 +3,12 @@ "versionInfo": { "Major": 2, "Minor": 1, - "Patch": 0, + "Patch": 12, "Build": 0, "PreRelease": "Beta", "SecurityUpdate": false, - "File": "CodeProject.AI.Server-2.1.0.zip", - "ReleaseNotes": "New and Improved module system, Performance improvements, Raspberry Pi Coral.AI support and bugs fixes." + "File": "CodeProject.AI.Server-2.1.12.zip", + "ReleaseNotes": "Improvements to module installers." } } } \ No newline at end of file diff --git a/src/API/Server/FrontEnd/wwwroot/Index.html b/src/API/Server/FrontEnd/wwwroot/Index.html index f2d42391..692bfc23 100644 --- a/src/API/Server/FrontEnd/wwwroot/Index.html +++ b/src/API/Server/FrontEnd/wwwroot/Index.html @@ -16,8 +16,8 @@ - - + + @@ -1208,7 +1544,7 @@
-
+
@@ -1294,9 +1610,9 @@
- + - +
@@ -1325,7 +1641,11 @@ + + + "; } - if (moduleId == "ObjectDetectionYolo" || moduleId == "YOLOv5-3.1" || moduleId == "ObjectDetectionNet" || moduleId == "ObjectDetectionTFLite") { + if (queue == "objectdetection_queue") { rowHtml += "
  • Model Size »" + "
  • public class BackendClient { + private static readonly JsonSerializerOptions jsonSerializerOptions = + new JsonSerializerOptions(JsonSerializerDefaults.Web); + private record LoggingData(string message, string category, LogLevel logLevel, string label); private static HttpClient? _httpClient; @@ -57,7 +61,7 @@ public BackendClient(string url, TimeSpan timeout = default, CancellationToken t // RequestUri = new Uri(requestUri), // Method = HttpMethod.Get, // }; - // request.DefaultRequestHeaders.Add("X-CPAI-Moduleid", moduleid); + // request.DefaultRequestHeaders.Add("X-CPAI-Moduleid", moduleid); // if (executionProvider != null) // request.DefaultRequestHeaders.Add("X-CPAI-ExecutionProvider", executionProvider); // httpResponse = await _httpClient!.SendAsync(request, token).ConfigureAwait(false); @@ -67,30 +71,25 @@ public BackendClient(string url, TimeSpan timeout = default, CancellationToken t requestUri += $"&executionProvider={executionProvider}"; BackendRequest? request = null; - HttpResponseMessage? httpResponse = null; try { - httpResponse = await _httpClient!.GetAsync(requestUri, token).ConfigureAwait(false); - if (httpResponse?.StatusCode == System.Net.HttpStatusCode.OK) - { - _errorPauseSecs = 0; - - var jsonString = await httpResponse.Content.ReadAsStringAsync(token) - .ConfigureAwait(false); - - request = JsonSerializer.Deserialize(jsonString, - new JsonSerializerOptions(JsonSerializerDefaults.Web)); - } + request = await _httpClient!.GetFromJsonAsync(requestUri, token) + .ConfigureAwait(false); } - catch + catch (JsonException) { + // This is probably due to timing out and therefore no JSON to parse. + } + catch (Exception ex) + { + Debug.WriteLine(ex); Console.WriteLine($"Unable to get request from {queueName} for {moduleId}"); _errorPauseSecs = Math.Min(_errorPauseSecs > 0 ? _errorPauseSecs * 2 : 5, 60); if (!token.IsCancellationRequested && _errorPauseSecs > 0) { Console.WriteLine($"Pausing on error for {_errorPauseSecs} secs."); - await Task.Delay(_errorPauseSecs * 1_000, token); + await Task.Delay(_errorPauseSecs * 1_000, token).ConfigureAwait(false); } } @@ -101,37 +100,21 @@ public BackendClient(string url, TimeSpan timeout = default, CancellationToken t /// Sends a response for a request to the CodeProject.AI Server. /// /// The Request ID. - /// The Id of the module making this request + /// The module sending this response. /// The content to send. /// A Cancellation Token. - /// The hardware acceleration execution provider /// A Task. public async Task SendResponse(string reqid, string moduleId, HttpContent content, - CancellationToken token, string? executionProvider = null) + CancellationToken token) { - // TODO: A better way to pass this is via header: - // string requestUri = $"v1/queue/{reqid}"; - // var request = new HttpRequestMessage() { - // RequestUri = new Uri(requestUri), - // Method = HttpMethod.Post, - // }; - // request.DefaultRequestHeaders.Add("X-CPAI-Moduleid", moduleid); - // if (executionProvider != null) - // request.DefaultRequestHeaders.Add("X-CPAI-ExecutionProvider", executionProvider); - // httpResponse = await _httpClient!.SendAsync(request, token).ConfigureAwait(false); - - string requestUri = $"v1/queue/{reqid}?moduleid={moduleId}"; - if (executionProvider != null) - requestUri += $"&executionProvider={executionProvider}"; - try { - await _httpClient!.PostAsync(requestUri, content, token) + await _httpClient!.PostAsync($"v1/queue/{reqid}", content, token) .ConfigureAwait(false); } catch { - Console.WriteLine($"Unable to send response to request for {moduleId} (#reqid {reqid})"); + Console.WriteLine($"Unable to send response from module {moduleId} (#reqid {reqid})"); } } @@ -152,21 +135,26 @@ public ValueTask LogToServer(string message, string category, return ValueTask.CompletedTask; } + /// + /// Called to process the logging data pulled off a queue by a bacground task. See the + /// LogToServer method above. + /// + /// + /// + /// A Task private async Task SendLoggingData(LoggingData data, CancellationToken token) { var form = new FormUrlEncodedContent(new[] { - new KeyValuePair("entry", data.message), - new KeyValuePair("category", data.category), - new KeyValuePair("label", data.label), + new KeyValuePair("entry", data.message), + new KeyValuePair("category", data.category), + new KeyValuePair("label", data.label), new KeyValuePair("log_level", data.logLevel.ToString()) }); - /*var response = */ try { - await _httpClient!.PostAsync($"v1/log", form, token) - .ConfigureAwait(false); + await _httpClient!.PostAsync($"v1/log", form, token).ConfigureAwait(false); } catch { @@ -178,16 +166,18 @@ private async Task ProcessLoggingQueue(CancellationToken token = default) { while(!token.IsCancellationRequested) { - LoggingData data = await _loggingQueue.Reader.ReadAsync(token); + LoggingData data = await _loggingQueue.Reader.ReadAsync(token).ConfigureAwait(false); if (!token.IsCancellationRequested) + { try { - await SendLoggingData(data, token); + await SendLoggingData(data, token).ConfigureAwait(false); } catch(Exception e) { Debug.Write(e); } + } } _loggingQueue.Writer.Complete(); diff --git a/src/SDK/NET/Analysis/BackendRequests.cs b/src/SDK/NET/Analysis/BackendRequests.cs index b3dfd197..21f53907 100644 --- a/src/SDK/NET/Analysis/BackendRequests.cs +++ b/src/SDK/NET/Analysis/BackendRequests.cs @@ -67,7 +67,7 @@ public class RequestPayload /// public IEnumerable? files { get; set; } - // The additional segements at the end of the url path. + // The additional segments at the end of the url path. public string[] urlSegments { get; set; } = Array.Empty(); /// @@ -91,7 +91,7 @@ public RequestPayload(string command) /// /// The name of the value. /// The default value to return if not present or. - /// If true, and the key already exists, then the value + /// If true, and the key already exists, then the value /// for this key will be overwritten. Otherwise the value will be added to that /// key public void SetValue(string key, string value, bool overwrite = true) @@ -241,12 +241,7 @@ public class RequestFormFile /// The image, or null if conversion fails. public SKImage? AsImage() { - // Using SkiaSharp as it handles more formats and mostly cross-platform. - if (data == null) - return null; - - var skiaImage = SKImage.FromEncodedData(data); - return skiaImage; + return ImageUtils.GetImage(data); } } diff --git a/src/SDK/NET/Analysis/BackendResponses.cs b/src/SDK/NET/Analysis/BackendResponses.cs index b75d7bd8..22b8e833 100644 --- a/src/SDK/NET/Analysis/BackendResponses.cs +++ b/src/SDK/NET/Analysis/BackendResponses.cs @@ -14,19 +14,36 @@ public class BackendResponseBase public int code { get; set; } = 200; /// - /// The number of milliseconds required to perform the AI inference operation(s) for - /// this response. + /// Gets or sets the optional command associated with this request + /// + public string? command { get; set; } + + /// + /// Gets or sets the Id of the Module handling this request + /// + public string? moduleId { get; set; } + + /// + /// Gets or sets the execution provider (hardware or library) that handles the AI + /// acceleration. + /// + public string? executionProvider { get; set; } + + /// + /// Gets or sets the number of milliseconds required to perform the AI inference operation(s) + /// for this response. /// public long inferenceMs { get; set; } /// - /// The number of milliseconds required to perform the AI processing for this response. - /// This includes the inference, as well as any pre- and post-processing. + /// Gets or sets the number of milliseconds required to perform the AI processing for this + /// response. This includes the inference, as well as any pre- and post-processing. /// public long processMs { get; set; } /// - /// The number of milliseconds required to run the full task in processing this response. + /// Gets or sets the number of milliseconds required to run the full task in processing this + /// response. /// public long analysisRoundTripMs { get; set; } } diff --git a/src/SDK/NET/Analysis/CommandQueueWorker.cs b/src/SDK/NET/Analysis/CommandQueueWorker.cs index 54af5311..44743d2f 100644 --- a/src/SDK/NET/Analysis/CommandQueueWorker.cs +++ b/src/SDK/NET/Analysis/CommandQueueWorker.cs @@ -18,7 +18,7 @@ public abstract class CommandQueueWorker : BackgroundService private readonly string _halfPrecision = "enable"; // Can be enable, disable or force private readonly string _logVerbosity = "info"; // Can be Quiet, Info or Loud - private readonly BackendClient _codeprojectAI; + private readonly BackendClient _aiClient; private readonly CancellationTokenSource _cancellationTokenSource = new(); private readonly ILogger _logger; @@ -86,7 +86,7 @@ public CommandQueueWorker(ILogger logger, IConfiguration configuration) var token = _cancellationTokenSource.Token; #if DEBUG - _codeprojectAI = new BackendClient($"http://localhost:{port}/", TimeSpan.FromSeconds(30), token); + _aiClient = new BackendClient($"http://localhost:{port}/", TimeSpan.FromSeconds(30), token); /* _logger.LogInformation($"CPAI_PORT: {port}"); @@ -95,10 +95,38 @@ public CommandQueueWorker(ILogger logger, IConfiguration configuration) _logger.LogInformation($"CPAI_MODULE_SUPPORT_GPU: {_supportGPU}"); */ #else - _codeprojectAI = new BackendClient($"http://localhost:{port}/", token: token); + _aiClient = new BackendClient($"http://localhost:{port}/", token: token); #endif } + /// + /// Stop the process. + /// + /// The stopping cancellation token. + /// + public override async Task StopAsync(CancellationToken token) + { + _cancelled = true; + + await _aiClient.LogToServer($"Shutting down {_moduleId}", _moduleId!, + LogLevel.Information, string.Empty, token) + .ConfigureAwait(false); + + _cancellationTokenSource.Cancel(); + + await base.StopAsync(token).ConfigureAwait(false); + } + + /// + /// Disposes of this classes resources + /// + public override void Dispose() + { + _cancellationTokenSource?.Dispose(); + base.Dispose(); + GC.SuppressFinalize(this); + } + protected string GetModuleDirectory() { string moduleDir = AppContext.BaseDirectory; @@ -125,6 +153,8 @@ protected string GetModuleDirectory() return moduleDir; } + /* No longer really needed in this form + /// /// Sniff the hardware in use so we can report to the API server /// @@ -143,7 +173,7 @@ protected string GetModuleDirectory() /// happening. Or we dive in and plug into each module some hardware sniffing code. /// /// - /// The detection and selection of CPU/GPU suppport is a very tricky and complex issue. + /// The detection and selection of CPU/GPU support is a very tricky and complex issue. /// /// /// @@ -156,14 +186,14 @@ protected string GetModuleDirectory() /// a subset of the OnnxRuntime API. /// /// - /// OnnxRuntime.DirectML on Windows and WSL theorectically should handle all this, but + /// OnnxRuntime.DirectML on Windows and WSL theoretically should handle all this, but /// if fails to execute some models. /// /// - /// There is a NuGet for OnnxRuntime for OpenVINO, but it is not publically available. + /// There is a NuGet for OnnxRuntime for OpenVINO, but it is not publicly available. /// We were able to use this to verify that the Execution Providers to be used could be /// selected at runtime, so a GPU=Intel|AMD|NVIDIA|M1 could be an option if we can find - /// or build the Execution Providers for our requirements. There are more publically + /// or build the Execution Providers for our requirements. There are more publicly /// available packages for Python and C/C++ than C#/NET. /// /// @@ -174,7 +204,7 @@ protected string GetModuleDirectory() /// With PyTorch and TensorFlow in Python, you need to install the specific flavor(s) /// of PyTorch and/or TensorFlow specific to your CPU and GPU, and have the appropriate /// libraries and drivers for the GPU installed as well. It won't be easy, or - /// necessarily even possible, or practical given dowwnload sizes, to install all the + /// necessarily even possible, or practical given download sizes, to install all the /// packages at install time and then select them at runtime. /// /// @@ -193,37 +223,44 @@ protected string GetModuleDirectory() /// correct version based on sniffed hardware. /// /// - protected async virtual void GetHardwareInfo() + protected virtual void GetHardwareInfo() { - GpuInfo? gpu = await SystemInfo.GetGpuInfo(); + GpuInfo? gpu = SystemInfo.GetGpuInfo(); if (gpu is not null) { HardwareType = "GPU"; ExecutionProvider = gpu.Vendor; } } + */ /// /// Called before the main processing loops are started /// protected virtual void InitModule() { - GetHardwareInfo(); } + /// + /// Processes the request receive from the server queue. + /// + /// The Request data. + /// An object to serialize back to the server. + protected abstract BackendResponseBase ProcessRequest(BackendRequest request); + /// /// Start the process. /// /// The cancellation token. - /// + /// A Task protected override async Task ExecuteAsync(CancellationToken token) { _cancelled = false; await Task.Delay(1_000, token).ConfigureAwait(false); - await _codeprojectAI.LogToServer($"{ModuleName} module started.", $"{ModuleName}", - LogLevel.Information, string.Empty, token); + await _aiClient.LogToServer($"{ModuleName} module started.", $"{ModuleName}", + LogLevel.Information, string.Empty, token).ConfigureAwait(false); InitModule(); @@ -236,8 +273,8 @@ await _codeprojectAI.LogToServer($"{ModuleName} module started.", $"{ModuleName} private async Task ProcessQueue(CancellationToken token) { - Task requestTask = _codeprojectAI.GetRequest(_queueName!, _moduleId!, - token, ExecutionProvider); + Task requestTask = _aiClient.GetRequest(_queueName!, _moduleId!, + token, ExecutionProvider); Task? responseTask = null; BackendRequest? request; @@ -245,18 +282,18 @@ private async Task ProcessQueue(CancellationToken token) { try { - request = await requestTask; - requestTask = _codeprojectAI.GetRequest(_queueName!, _moduleId!, token, - ExecutionProvider); + request = await requestTask.ConfigureAwait(false); + requestTask = _aiClient.GetRequest(_queueName!, _moduleId!, token, + ExecutionProvider); if (request is null) continue; // Special shutdown request string? requestModuleId = request.payload?.GetValue("moduleId"); - if (request.reqtype?.EqualsIgnoreCase("quit") == true && // Or, request.payload.command.EqualsIgnoreCase("quit") ... + if (request.payload?.command?.EqualsIgnoreCase("quit") == true && requestModuleId?.EqualsIgnoreCase(_moduleId) == true) { - await StopAsync(token); + await StopAsync(token).ConfigureAwait(false); return; } @@ -265,25 +302,24 @@ private async Task ProcessQueue(CancellationToken token) stopWatch.Stop(); long processMs = stopWatch.ElapsedMilliseconds; - response.processMs = processMs; - - // We recheck. Maybe hardware utilisation has changed based on the requeest or - // the environment (power, performance, GPU temp) - GetHardwareInfo(); + response.processMs = processMs; + response.moduleId = _moduleId; + response.executionProvider = ExecutionProvider ?? string.Empty; + response.command = request.payload?.command ?? string.Empty; HttpContent content = JsonContent.Create(response, response.GetType()); // Slightly faster as we don't wait for the request to complete before moving // on to the next. if (responseTask is not null) - await responseTask; + await responseTask.ConfigureAwait(false); - responseTask = _codeprojectAI.SendResponse(request.reqid, _moduleId!, content, - token, ExecutionProvider); + responseTask = _aiClient.SendResponse(request.reqid, _moduleId!, content, token); - await _codeprojectAI.LogToServer($"Command completed in {response.processMs} ms.", - $"{ModuleName}", LogLevel.Information, - "command timing", token); + await _aiClient.LogToServer($"Command completed in {response.processMs} ms.", + $"{ModuleName}", LogLevel.Information, + "command timing", token) + .ConfigureAwait(false); } catch (TaskCanceledException) when (_cancelled) { @@ -301,29 +337,5 @@ await _codeprojectAI.LogToServer($"Command completed in {response.processMs} ms. _cancellationTokenSource.Cancel(); } - - /// - /// Processes the request receive from the server queue. - /// - /// The Request data. - /// An object to serialize back to the server. - public abstract BackendResponseBase ProcessRequest(BackendRequest request); - - /// - /// Stop the process. - /// - /// The stopping cancellation token. - /// - public override async Task StopAsync(CancellationToken token) - { - _cancelled = true; - - await _codeprojectAI.LogToServer($"Shutting down {_moduleId}", _moduleId!, - LogLevel.Information, string.Empty, token); - - _cancellationTokenSource.Cancel(); - - await base.StopAsync(token); - } } } diff --git a/src/SDK/NET/Analysis/ImageUtils.cs b/src/SDK/NET/Analysis/ImageUtils.cs new file mode 100644 index 00000000..8d05c700 --- /dev/null +++ b/src/SDK/NET/Analysis/ImageUtils.cs @@ -0,0 +1,69 @@ + +using SkiaSharp; +using SkiaSharp.Views.Desktop; + +namespace CodeProject.AI.SDK +{ + /// + /// Represents an HTTP client to get requests and return responses to the CodeProject.AI server. + /// + public class ImageUtils + { + /// + /// Loads a Bitmap from a file. + /// + /// The file name. + /// The Bitmap, or null. + /// SkiSharp handles more image formats than System.Drawing. + public static SKImage? GetImage(string? filename) + { + if (string.IsNullOrWhiteSpace(filename)) + return null; + + // TODO: Add error handling and port this to Maui + var skiaImage = SKImage.FromEncodedData(filename); + if (skiaImage is null) + return null; + + return skiaImage; //.ToBitmap(); + } + + // Using SkiaSharp as it handles more formats. + public static SKImage? GetImage(byte[]? imageData) + { + if (imageData == null) + return null; + + var skiaImage = SKImage.FromEncodedData(imageData); + if (skiaImage is null) + return null; + + return skiaImage; //.ToBitmap(); + } + + /// + /// Gets an image from a stream + /// + /// The stream + /// A SKImage object + /// + /// With this we don't have to extract the bytes into a byte[], SkiaSharp can work with the + /// stream from the IFormFile directly, and handles multiple formats. A big space and time + /// savings. + /// TODO: update the coded in the NNNQueueWorkers to use this. Will need to update + /// RequestFormFile to hold the stream. This will require RFF and any holder of RFF to be + /// IDisposable. + /// + public static SKImage? GetImage(Stream imageStream) + { + if (imageStream == null) + return null; + + var skiaImage = SKImage.FromEncodedData(imageStream); + if (skiaImage is null) + return null; + + return skiaImage; //.ToBitmap(); + } + } +} \ No newline at end of file diff --git a/src/SDK/NET/Common/SystemInfo.cs b/src/SDK/NET/Common/SystemInfo.cs index 898fc20b..3e849170 100644 --- a/src/SDK/NET/Common/SystemInfo.cs +++ b/src/SDK/NET/Common/SystemInfo.cs @@ -21,9 +21,10 @@ public class CpuCollection: List /// public class CpuInfo { - public string? Name { get; set; } - public uint NumberOfCores { get; set; } - public uint LogicalProcessors { get; set; } + public string? Name { get; set; } + public string? HardwareVendor { get; set; } + public uint NumberOfCores { get; set; } + public uint LogicalProcessors { get; set; } }; /// @@ -31,7 +32,8 @@ public class CpuInfo /// public class MemoryProperties { - public ulong Total { get; set; } + public ulong Total { get; set; } + public ulong Used { get; set; } public ulong Available { get; set; } }; @@ -45,7 +47,7 @@ public class GpuInfo /// /// Gets or sets the card's vendor /// - public string? Vendor { get; set; } + public string? HardwareVendor { get; set; } /// /// Gets or sets the driver version string @@ -81,8 +83,8 @@ public virtual string Description if (TotalMemory > 0) info.Append($" ({SystemInfo.FormatSizeBytes(TotalMemory, 0)})"); - if (!string.IsNullOrWhiteSpace(Vendor)) - info.Append($" ({Vendor})"); + if (!string.IsNullOrWhiteSpace(HardwareVendor)) + info.Append($" ({HardwareVendor})"); if (!string.IsNullOrWhiteSpace(DriverVersion)) info.Append($" Driver: {DriverVersion}"); @@ -96,10 +98,19 @@ public class NvidiaInfo : GpuInfo { public NvidiaInfo() : base() { - Vendor = "NVidia"; + HardwareVendor = "NVIDIA"; } - public string? CudaVersion { get; set; } + /// + /// The CUDA version that the current driver is capable of using + /// + public string? CudaVersionCapability { get; set; } + + /// + /// The actual version of CUDA that's installed + /// + public string? CudaVersionInstalled { get; set; } + public string? ComputeCapacity { get; internal set; } /// @@ -110,13 +121,13 @@ public override string Description { get { - var info = base.Description - + " CUDA: " + CudaVersion + var info = base.Description + + $" CUDA: {CudaVersionInstalled} (capable: {CudaVersionCapability})" + " Compute: " + ComputeCapacity; return info; } - } + } } public enum RuntimeEnvironment @@ -181,9 +192,19 @@ public class SystemInfo { // The underlying object that does the investigation into the properties. // The other properties mostly rely on this creature for their worth. - private static HardwareInfo? _hardwareInfo = null; - private static bool? _isDevelopment; - private static bool? _hasNvidiaCard; + private static HardwareInfo _hardwareInfo = new HardwareInfo(); + private static bool? _isDevelopment; + private static bool? _hasNvidiaCard; + private static bool _isWSL; + + private static TimeSpan _nvidiaInfoRefreshTime = TimeSpan.FromSeconds(10); + private static TimeSpan _systemInfoRefreshTime = TimeSpan.FromSeconds(1); + + private static Task? _monitorSystemUsageTask; + private static Task? _monitoryGpuUsageTask; + private static bool _monitoringStartedWarningIssued; + private static int _cpuUsage; + private static string? _hardwareVendor; /// /// Gets the CPU properties for this system @@ -193,9 +214,11 @@ public class SystemInfo /// /// Gets the Memory properties for this system /// - public static MemoryProperties? Memory { get; private set; } + public static MemoryProperties Memory { get; private set; } = new MemoryProperties(); + /// /// Gets the GPU properties for this system + /// public static GpuInfo? GPU { get; private set; } /// @@ -207,7 +230,7 @@ public class SystemInfo /// Whether or not this system contains an Nvidia card. If the value is /// null it means we've not been able to determine. /// - public static bool? HasNvidiaGPU => _hasNvidiaCard; + public static bool? HasNvidiaGPU => _hasNvidiaCard; /// /// Gets a value indicating whether we are running development code. @@ -247,8 +270,13 @@ public static RuntimeEnvironment RuntimeEnvironment get { if (IsDevelopmentCode || - Environment.GetEnvironmentVariable("ASPNETCORE_ENVIRONMENT").EqualsIgnoreCase("true")) + // Really should use the IHostEnvironment.IsDevelopment method, but needs a reference to + // IHostEnvironment. + Environment.GetEnvironmentVariable("ASPNETCORE_ENVIRONMENT").EqualsIgnoreCase("Development") || + Environment.GetEnvironmentVariable("DOTNET_ENVIRONMENT").EqualsIgnoreCase("Development")) + { return RuntimeEnvironment.Development; + } return RuntimeEnvironment.Production; } @@ -344,34 +372,90 @@ public static string Architecture // if (RuntimeInformation.ProcessArchitecture == System.Runtime.InteropServices.Architecture.Wasm) // return "Wasm"; - return ""; + return string.Empty; + } + } + /// + /// Gets the name of the system under which we're running. Windows, WSL, macOS, Docker, + /// Raspberry Pi, Orange Pi, Jetson, Linux, macOS. + /// + public static string SystemName + { + get + { + if (IsWindows) + return IsWSL? "WSL" : "Windows"; + + if (HardwareVendor == "Raspberry Pi" || HardwareVendor == "Jetson" || + HardwareVendor == "Orange Pi") + return HardwareVendor; + + if (IsDocker) + return "Docker"; + + return OperatingSystem; } } /// - /// Gets the hardware vendor. + /// Gets a value indicating whether we are currently running under WSL. + /// + public static bool IsWSL + { + get { return _isWSL; } + } + + /// + /// Gets the hardware vendor of the current system. /// - public static string Vendor + public static string HardwareVendor { get { - if (OperatingSystem == "macOS") - return "Apple"; + if (_hardwareVendor is not null) + return _hardwareVendor; - if (OperatingSystem == "Linux") + if (IsMacOS) + _hardwareVendor = "Apple"; + + if (IsLinux) { try { - string cpuInfo = File.ReadAllText("/proc/cpuinfo"); + // string cpuInfo = File.ReadAllText("/proc/cpuinfo"); - no good for OrangePi + string cpuInfo = File.ReadAllText("/sys/firmware/devicetree/base/model"); if (cpuInfo.Contains("Raspberry Pi")) - return "Raspberry Pi"; + _hardwareVendor = "Raspberry Pi"; + else if (cpuInfo.ContainsIgnoreCase("Orange Pi")) + _hardwareVendor = "Orange Pi"; } catch {} + + if (_hardwareVendor is null) + { + try + { + string cpuInfo = File.ReadAllText("/proc/device-tree/model"); + if (cpuInfo.Contains("Jetson")) + _hardwareVendor = "NVIDIA Jetson"; + } + catch {} + } } - // Intel and AMD chips... + // Intel and AMD chips are generic, so just report them. + if (_hardwareVendor is null && CPU is not null) + { + if (CPU[0].HardwareVendor == "Intel") + _hardwareVendor = "Intel"; + else if (CPU[0].HardwareVendor == "AMD") + _hardwareVendor = "AMD"; + } - return "Unknown"; + if (_hardwareVendor is null) + _hardwareVendor = "Unknown"; + + return _hardwareVendor; } } @@ -400,6 +484,31 @@ public static string OperatingSystem } } + /// + /// Gets a value indicating whether the current OS is Windows + /// + public static bool IsWindows => RuntimeInformation.IsOSPlatform(OSPlatform.Windows); + + /// + /// Gets a value indicating whether the current OS is Linux + /// + public static bool IsLinux => RuntimeInformation.IsOSPlatform(OSPlatform.Linux); + + /// + /// Gets a value indicating whether the current OS is macOS + /// + public static bool IsMacOS => RuntimeInformation.IsOSPlatform(OSPlatform.OSX); + + /// + /// Gets a value indicating whether the current OS is FreeBSD + /// + public static bool IsFreeBSD => RuntimeInformation.IsOSPlatform(OSPlatform.FreeBSD); + + /// + /// Gets a value indicating whether we are currently running in Docker + /// + public static bool IsDocker => ExecutionEnvironment == ExecutionEnvironment.Docker; + /// /// Returns the Operating System description, with corrections for Windows 11 /// @@ -411,7 +520,7 @@ public static string OperatingSystemDescription // C'mon guys: technically the version may be 10.x, but stick to the branding that // the rest of the world understands. if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && - Environment.OSVersion.Version.Major >= 10 && + Environment.OSVersion.Version.Major >= 10 && Environment.OSVersion.Version.Build >= 22000) return RuntimeInformation.OSDescription.Replace("Windows 10.", "Windows 11 version 10."); @@ -428,41 +537,34 @@ public static string OperatingSystemVersion } /// - /// Static constructor + /// Returns a value indicating whether system resources (Memory, CPU) are being monitored. /// - static SystemInfo() - { - // InitializeAsync(); - } + public static bool IsResourceUsageMonitoring => _monitorSystemUsageTask != null; /// - /// Initializes the SystemInfo class async. This method is here in case we want to avoid a - /// static constructor that makes potentially blocking calls to async methods. The static - /// method would be removed, and this method would be called at the start of whatever app - /// used this class. + /// Initializes the SystemInfo class. /// - /// public static async Task InitializeAsync() { - // Not necessary. - // await Task.Run(() => - // { - try - { - _hardwareInfo = new HardwareInfo(); - _hardwareInfo.RefreshCPUList(false); // false = no CPU %. Saves 21s delay on first use - _hardwareInfo.RefreshMemoryStatus(); - _hardwareInfo.RefreshVideoControllerList(); - } - catch - { - } - // }); + try + { + _hardwareInfo.RefreshCPUList(false); // false = no CPU %. Saves 21s delay on first use + _hardwareInfo.RefreshMemoryStatus(); + _hardwareInfo.RefreshVideoControllerList(); + } + catch + { + } - GPU = await GetGpuInfo(); // <- This await is the reason we have async messiness. - CPU = GetCpuInfo(); - Memory = GetMemoryInfo(); + CPU = GetCpuInfo(); + GPU = GetGpuInfo(); + await GetMemoryInfoAsync().ConfigureAwait(false); + + await CheckForWslAsync().ConfigureAwait(false); + _monitorSystemUsageTask = MonitorSystemUsageAsync(); + _monitoryGpuUsageTask = MonitorNvidiaGpuUsageAsync(); + InitSummary(); } @@ -471,7 +573,7 @@ public static async Task InitializeAsync() /// /// A string object public static string GetSystemInfo() - { + { var info = new StringBuilder(); string? gpuDesc = GPU?.Description; @@ -482,7 +584,12 @@ public static string GetSystemInfo() { var cpus = new StringBuilder(); if (!string.IsNullOrEmpty(CPU[0].Name)) - cpus.Append(CPU[0].Name + "\n "); + { + cpus.Append(CPU[0].Name); + if (!string.IsNullOrWhiteSpace(CPU[0].HardwareVendor)) + cpus.Append($" ({CPU[0].HardwareVendor})"); + cpus.Append("\n "); + } cpus.Append(CPU.Count + " CPU"); if (CPU.Count != 1) @@ -519,12 +626,12 @@ public static string GetSystemInfo() } /// - /// Returns GPU usage info for the current system + /// Returns GPU idle info for the current system /// - public static async ValueTask GetGpuUsageInfo() + public static async ValueTask GetGpuUsageInfoAsync() { - int gpu3DUsage = await GetGpuUsage(); - string gpuMemUsage = FormatSizeBytes(await GetGpuMemoryUsage(), 1); + int gpu3DUsage = await GetGpuUsageAsync().ConfigureAwait(false); + string gpuMemUsage = FormatSizeBytes(await GetGpuMemoryUsageAsync().ConfigureAwait(false), 1); var info = new StringBuilder(); info.AppendLine("System GPU info:"); @@ -539,7 +646,7 @@ public static async ValueTask GetGpuUsageInfo() /// Returns the CPU temperature in C. /// /// The temperature in C - public static async ValueTask GetCpuTemp() + public static async ValueTask GetCpuTempAsync() { return await new ValueTask(0); @@ -550,7 +657,7 @@ public static async ValueTask GetCpuTemp() /// /// Returns Video adapter info for the current system /// - public static async ValueTask GetVideoAdapterInfo() + public static async ValueTask GetVideoAdapterInfoAsync() { var info = new StringBuilder(); @@ -569,164 +676,56 @@ public static async ValueTask GetVideoAdapterInfo() } } - return await new ValueTask(info.ToString().Trim()); + return await new ValueTask(info.ToString().Trim()).ConfigureAwait(false); } /// - /// Gets the current GPU utilisation as a % + /// Gets the CPU Usage for this system. /// - /// A float representing bytes - public async static ValueTask GetCpuUsage() + /// An int representing the percentage of CPU capacity used + /// This method could be (and was) a property, but to stick to the format we had + /// where we use 'Get' to query an instantaneous value it's been switched to a method. + public static int GetCpuUsage() { - int usage = 0; - - // ANNOYANCE: We have Windows, Linux and macOS defined as constants in the Common.targets - // file in /SDK/NET. These constants work great in Windows. Sometimes in Linux. Never in - // macOS on Apple Silicon. So we work around it. - try - { -// #if Windows - if (OperatingSystem == "Windows") - { - // Easier, but this incurs a 21 sec delay at startup, and after 15 min idle. - // _hardwareInfo.RefreshCPUList(true); - // int usage = (int) _hardwareInfo.CpuList.Average(cpu => (float)cpu.PercentProcessorTime); - - List utilization = GetCounters("Processor", - "% Processor Time", - "_Total"); - utilization.ForEach(x => x.NextValue()); - await Task.Delay(1000); - usage = (int)utilization.Sum(x => x.NextValue()); - } -// #elif Linux - else if (OperatingSystem == "Linux") - { - // Easier but not yet tested - // _hardwareInfo.RefreshCPUList(true); - // int usage = (int) _hardwareInfo.CpuList.Average(cpu => (float)cpu.PercentProcessorTime); - - // Output is in the form: - // top - 08:38:12 up 1:20, 0 users, load average: 0.00, 0.00, 0.00 - // Tasks: 5 total, 1 running, 4 sleeping, 0 stopped, 0 zombie - // %Cpu(s): 0.0 us, 0.0 sy, 0.0 ni, ... <-- this line, sum of values 1-3 - - var results = await GetProcessInfo("/bin/bash", "-c \"top -b -n 1\""); - var lines = results["output"]?.Split("\n"); - - if (lines is not null && lines.Length > 2) - { - string pattern = @"(?[\d.]+)\s*us,\s*(?[\d.]+)\s*sy,\s*(?[\d.]+)\s*ni"; - Match match = Regex.Match(lines[2], pattern, RegexOptions.ExplicitCapture); - var userTime = match.Groups["userTime"].Value; - var systemTime = match.Groups["systemTime"].Value; - var niceTime = match.Groups["niceTime"].Value; - - usage = (int)(float.Parse(userTime) + float.Parse(systemTime) + float.Parse(niceTime)); - } - } -// #elif macOS - else if (OperatingSystem == "macOS") - { - // oddly, hardware.info hasn't yet added CPU usage for macOS + CheckMonitoringStarted(); - // Output is in the form: - // CPU usage: 12.33% user, 13.63% sys, 74.2% idle - string pattern = @"CPU usage:\s+(?[\d.]+)%\s+user,\s*(?[\d.]+)%\s*sys,\s*(?[\d.]+)%\s*idle"; - var results = await GetProcessInfo("/bin/bash", "-c \"top -l 1 | grep -E '^CPU'\"", - pattern); - usage = (int)(float.Parse(results["userTime"]) + float.Parse(results["systemTime"])); - } -// #else - else - { - Console.WriteLine("WARNING: Getting CPU usage for unknown OS: " + OperatingSystem); - await Task.Delay(0); - } -// #endif - } - catch (Exception e) - { - Console.WriteLine("Failed to get CPU use: " + e); - } - - return (int) usage; + return _cpuUsage; } /// /// Gets the amount of System memory currently in use /// /// A long representing bytes - public async static ValueTask GetSystemMemoryUsage() + /// This method could be (and was) a property, but to stick to the format we had + /// where we use 'Get' to query an instantaneous value it's been switched to a method. + public static ulong GetSystemMemoryUsage() { - ulong memoryUsed = 0; - - // ANNOYANCE: We have Windows, Linux and macOS defined as constants in the Common.targets - // file in /SDK/NET. These constants work great in Windows. Sometimes in Linux. Never in - // macOS on Apple Silicon. So we work around it. - -// #if Windows - if (OperatingSystem == "Windows") - { - var processes = Process.GetProcesses(); - memoryUsed = (ulong)processes.Sum(p => p.WorkingSet64); - } -// #elif Linux - else if (OperatingSystem == "Linux") - { - // Easier but not yet tested - // _hardwareInfo.RefreshMemoryStatus(); - // return _hardwareInfo.MemoryStatus.TotalPhysical - _hardwareInfo.MemoryStatus.AvailablePhysical; - - // Output is in the form: - // total used free - // Mem: XXX YYY ZZZ <- We want tokens 1 - 3 from this line - // Swap: xxx yyy zzz - - var results = await GetProcessInfo("/bin/bash", "-c \"free -b\""); - var lines = results["output"]?.Split("\n"); + CheckMonitoringStarted(); - if (lines is not null && lines.Length > 1) - { - var memory = lines[1].Split(" ", StringSplitOptions.RemoveEmptyEntries); - memoryUsed = ulong.Parse(memory[2]); - // ulong totalMemory = ulong.Parse(memory[1]); - // ulong memoryFree = ulong.Parse(memory[3]); - } - } -// #elif macOS - else if (OperatingSystem == "macOS") + lock (Memory) { - var results = await GetProcessInfo("/bin/bash", - "-c \"ps -caxm -orss= | awk '{ sum += $1 } END { print sum * 1024 }'\"", - "(?[\\d.]+)"); - ulong.TryParse(results["memused"], out memoryUsed); - } -// #else - else - { - Console.WriteLine("WARNING: Getting memory usage for unknown OS: " + OperatingSystem); - } -// #endif - return memoryUsed; + return Memory.Used; + }; } /// /// Gets the current GPU utilisation as a % /// /// An int representing bytes - public async static ValueTask GetGpuUsage() + public async static ValueTask GetGpuUsageAsync() { - NvidiaInfo? gpuInfo = await ParseNvidiaSmi(); + CheckMonitoringStarted(); + + // NVIDIA cards are continuously monitored. Grab the latest and go. + NvidiaInfo? gpuInfo = GPU as NvidiaInfo; if (gpuInfo is not null) return gpuInfo.Utilization; int usage = 0; -// #if Windows try { - if (OperatingSystem == "Windows") + if (IsWindows) { List utilization = GetCounters("GPU Engine", "Utilization Percentage", @@ -737,28 +736,47 @@ public async static ValueTask GetGpuUsage() // read operation returns 0.0. The recommended delay time between calls to the // NextValue method is one second utilization.ForEach(x => x.NextValue()); - await Task.Delay(1000); + await Task.Delay(_systemInfoRefreshTime).ConfigureAwait(false); usage = (int)utilization.Sum(x => x.NextValue()); } -// #else - else if (Vendor == "Raspberry Pi") + else if (SystemName == "Raspberry Pi") { - var results = await GetProcessInfo("vcgencmd", "get_config core_freq", @"=(?\d+)"); - if (ulong.TryParse(results["maxFreq"], out ulong maxFreq)) + ulong maxFreq = 0; + ulong freq = 0; + + string args = "get_config core_freq"; + var pattern = @"=(?\d+)"; + var results = await GetProcessInfoAsync("vcgencmd", args, pattern).ConfigureAwait(false); + if ((results?.Count ?? 0) > 0 && ulong.TryParse(results!["maxFreq"], out maxFreq)) maxFreq *= 1_000_000; - results = await GetProcessInfo("vcgencmd", "measure_clock core", @"=(?\d+)"); - ulong.TryParse(results["freq"], out ulong freq); + args = "measure_clock core"; + pattern = @"=(?\d+)"; + results = await GetProcessInfoAsync("vcgencmd", args, pattern).ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + ulong.TryParse(results!["freq"], out freq); if (maxFreq > 0) usage = (int)(freq * 100 / maxFreq); } + else if (HardwareVendor == "NVIDIA Jetson") + { + // NVIDIA card, so we won't even reach here + } + else if (IsLinux) // must come after Pi, Jetson + { + // ... + } + else if (IsMacOS) + { + // macOS doesn't provide non-admin access to GPU info + } } catch { } -// #endif + return usage; } @@ -766,55 +784,79 @@ public async static ValueTask GetGpuUsage() /// Gets the amount of GPU memory currently in use /// /// A long representing bytes - public async static ValueTask GetGpuMemoryUsage() + public async static ValueTask GetGpuMemoryUsageAsync() { - NvidiaInfo? gpuInfo = await ParseNvidiaSmi(); + CheckMonitoringStarted(); + + // NVIDIA cards are continuously monitored. Grab the latest and go. + NvidiaInfo? gpuInfo = GPU as NvidiaInfo; if (gpuInfo is not null) return gpuInfo.MemoryUsed; - ulong memoryUsed = 0; + ulong gpuMemoryUsed = 0; try { -// #if Windows - if (OperatingSystem == "Windows") + if (IsWindows) { List counters = GetCounters("GPU Process Memory", "Dedicated Usage", null); - // gpuCounters.ForEach(x => x.NextValue()); - // Thread.Sleep(1000); - - memoryUsed = (ulong)counters.Sum(x => (long)x.NextValue()); + gpuMemoryUsed = (ulong)counters.Sum(x => (long)x.NextValue()); } - if (Vendor == "Raspberry Pi") + else if (SystemName == "Raspberry Pi") + { + /* + vcgencmd get_mem + Where type is: + arm: total memory assigned to arm (incorrect on systems > 16GB) + gpu: total memory assigned to gpu + malloc_total: total memory assigned to gpu malloc heap + malloc: free gpu memory in malloc heap + reloc_total: total memory assigned to gpu relocatable heap + reloc: free gpu memory in relocatable heap + */ + + string args = "get_mem malloc_total"; + var pattern = @"=(?\d+)"; + var results = await GetProcessInfoAsync("vcgencmd", args, pattern).ConfigureAwait(false); + if ((results?.Count ?? 0) > 0 && ulong.TryParse(results!["memused"], out ulong memUsed)) + gpuMemoryUsed = memUsed * 1024 * 1024; + } + else if (SystemName == "NVIDIA Jetson") + { + // NVIDIA card, so we won't even reach here + } + else if (IsLinux) // must come after Pi, Jetson { - var results = await GetProcessInfo("vcgencmd", "get_mem malloc_total", @"=(?\d+)"); - if (ulong.TryParse(results["memused"], out ulong memUsed)) - memoryUsed = memUsed * 1024 * 1024; + // ... + } + else if (IsMacOS) + { + // macOS doesn't provide non-admin access to GPU info } -// #endif } catch { } - return memoryUsed; + return gpuMemoryUsed; } - public async static ValueTask GetGpuInfo() + public static GpuInfo? GetGpuInfo() { - if (Architecture == "Arm64" && OperatingSystem == "macOS") + if (Architecture == "Arm64" && IsMacOS) { return new GpuInfo { - Name = "Apple Silicon", - Vendor = "Apple" + Name = "Apple Silicon", + HardwareVendor = "Apple" }; } - GpuInfo? gpu = await ParseNvidiaSmi(); - if (gpu is null && _hardwareInfo != null) + // We may already have a GPU object set via the continuous NVIDIA monitoring. + GpuInfo? gpu = GPU; + if (gpu is null) { foreach (var videoController in _hardwareInfo.VideoControllerList) { @@ -823,10 +865,10 @@ public async static ValueTask GetGpuMemoryUsage() gpu = new GpuInfo { - Name = videoController.Name, - Vendor = videoController.Manufacturer, - DriverVersion = videoController.DriverVersion, - TotalMemory = videoController.AdapterRAM + Name = videoController.Name, + HardwareVendor = videoController.Manufacturer, + DriverVersion = videoController.DriverVersion, + TotalMemory = videoController.AdapterRAM }; break; @@ -837,31 +879,262 @@ public async static ValueTask GetGpuMemoryUsage() } /// - /// Returns information on the first GPU found (TODO: Extend to multiple GPUs) + /// A task that constantly updates the GPU usage for NVIDIA cards in the background. Note + /// that this method ONLY monitors NVIDIA cards. If there is no NVIDIA card it will return. + /// + /// This method sets the static GPU object with whatever it finds, which is then used in + /// methods like GetGpuUsage. If there's no NVIDIA card, GetGpuUsage will use the NVIDIA + /// info, otherwise it will fall through and perform whatever platform specific lookups it + /// needs to in order to get the non-NVIDIA info it needs. /// - /// An NvidiaInfo object - private async static ValueTask ParseNvidiaSmi() + /// A Task + private static async Task MonitorNvidiaGpuUsageAsync() { - // Do an initial fast check to see if we have an Nvidia card. This saves a process call - // and exception in the case there's not Nvidia card. If _hardwareInfo is null then we + while (true) + { + GpuInfo? gpuInfo; + if (HardwareVendor == "NVIDIA Jetson") + gpuInfo = await ParseJetsonTegraStatsAsync().ConfigureAwait(false); + else + gpuInfo = await ParseNvidiaSmiAsync().ConfigureAwait(false); + + if (gpuInfo is not null) + GPU = gpuInfo; + + if (HasNvidiaGPU == false) + break; + + await Task.Delay(_nvidiaInfoRefreshTime).ConfigureAwait(false); + } + } + + private static void CheckMonitoringStarted() + { + if ( !IsResourceUsageMonitoring && !_monitoringStartedWarningIssued) + { + _monitoringStartedWarningIssued = true; + + // Setting both fore and background colour so this is save in Light and Dark mode. + ConsoleColor oldForeColour = Console.ForegroundColor; + ConsoleColor oldBackColour = Console.BackgroundColor; + Console.BackgroundColor = ConsoleColor.DarkRed; + Console.ForegroundColor = ConsoleColor.White; + + Console.WriteLine("Warning: To monitor CPU and GPU resource usage you must call SystemInfo.Initialize"); + + Console.ForegroundColor = oldForeColour; + Console.BackgroundColor = oldBackColour; + } + } + + /// + /// A task that constantly updates the System usage (CPU and memory) in the background + /// TODO: break this into classes for each OS + /// + /// A Task + private static async Task MonitorSystemUsageAsync() + { + if (IsWindows) + { + var cpuIdleCounter = new PerformanceCounter("Processor", "% Idle Time", "_Total"); + + var idleOld = (int)cpuIdleCounter.NextValue(); + while (true) + { + // CPU% + await Task.Delay(_systemInfoRefreshTime).ConfigureAwait(false); + var idle = (int)cpuIdleCounter.NextValue(); + + // Take the average of previous and current measurements + _cpuUsage = 100 - (idle + idleOld) / 2; + idleOld = idle; + + // Memory Info + await GetMemoryInfoAsync().ConfigureAwait(false); + } + } + else if (HardwareVendor == "NVIDIA Jetson") + { + // Jetson board is continuously monitored, so nothing to do here + } + else if (IsLinux) + { + while (true) + { + // Easier but not yet tested + // _hardwareInfo.RefreshCPUList(true); + // int usage = (int) _hardwareInfo.CpuList.Average(cpu => (float)cpu.PercentProcessorTime); + + // Output is in the form: + // top - 08:38:12 up 1:20, 0 users, load average: 0.00, 0.00, 0.00 + // Tasks: 5 total, 1 running, 4 sleeping, 0 stopped, 0 zombie + // %Cpu(s): 0.0 us, 0.0 sy, 0.0 ni, ... <-- this line, sum of values 1-3 + + var results = await GetProcessInfoAsync("/bin/bash", "-c \"top -b -n 1\"").ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + { + var lines = results!["output"]?.Split("\n"); + + if (lines is not null && lines.Length > 2) + { + string pattern = @"(?[\d.]+)\s*us,\s*(?[\d.]+)\s*sy,\s*(?[\d.]+)\s*ni"; + Match match = Regex.Match(lines[2], pattern, RegexOptions.ExplicitCapture); + var userTime = match.Groups["userTime"].Value; + var systemTime = match.Groups["systemTime"].Value; + var niceTime = match.Groups["niceTime"].Value; + + _cpuUsage = (int)(float.Parse(userTime) + float.Parse(systemTime) + float.Parse(niceTime)); + } + } + + // Memory Info + await GetMemoryInfoAsync().ConfigureAwait(false); + + await Task.Delay(_systemInfoRefreshTime).ConfigureAwait(false); + } + } + else if (IsMacOS) + { + while (true) + { + // oddly, hardware.info hasn't yet added CPU usage for macOS + + // Output is in the form: + // CPU usage: 12.33% user, 13.63% sys, 74.2% idle + string pattern = @"CPU usage:\s+(?[\d.]+)%\s+user,\s*(?[\d.]+)%\s*sys,\s*(?[\d.]+)%\s*idle"; + var results = await GetProcessInfoAsync("/bin/bash", "-c \"top -l 1 | grep -E '^CPU'\"", + pattern).ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + _cpuUsage = (int)(float.Parse(results!["userTime"]) + float.Parse(results["systemTime"])); + + // Memory Info + await GetMemoryInfoAsync().ConfigureAwait(false); + + await Task.Delay(_systemInfoRefreshTime).ConfigureAwait(false); + } + } + else + { + Console.WriteLine("WARNING: Getting CPU usage for unknown OS: " + OperatingSystem); + await Task.Delay(0).ConfigureAwait(false); + } + } + + /// + /// Returns information on the first NVIDIA GPU found on the Jetson boards using the + /// tegrastats utility. If an NVIDIA card is found, the GPU property of this class will + /// contain the info we found. Otherwise, this method will return null and GPU will + /// remain unchanged. + /// + /// A nullable NvidiaInfo object + private async static ValueTask ParseJetsonTegraStatsAsync() + { + if (HasNvidiaGPU == false) + return null; + + NvidiaInfo? gpu = null; + + try + { + // Get CUDA version + // cat /usr/local/cuda/version.txt + // Output: CUDA Version 10.2.89 + string cudaVersion = string.Empty; + string pattern = @"CUDA Version (?[\d.]+)"; + var results = await GetProcessInfoAsync("/bin/bash", "-c \"cat /usr/local/cuda/version.txt\"", + pattern).ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + cudaVersion = results!["cudaVersion"]; + + // Get hardware stats + var info = new ProcessStartInfo("tegrastats"); + info.RedirectStandardOutput = true; + + using var process = Process.Start(info); + if (process?.StandardOutput is null) + return null; + + // We just need one line + string? output = await process.StandardOutput.ReadLineAsync() + .ConfigureAwait(false); + process.Kill(); + + if (!string.IsNullOrWhiteSpace(output)) + { + // format out output is + // RAM 2893/3956MB (lfb 5x2MB) SWAP 233/1978MB (cached 2MB) CPU [21%@102,15%@102,21%@102,19%@102] + // EMC_FREQ 0% GR3D_FREQ 0% PLL@18C CPU@20.5C PMIC@100C GPU@20C AO@26C thermal@20C + // POM_5V_IN 2056/2056 POM_5V_GPU 40/40 POM_5V_CPU 161/161 + pattern = @"RAM (?\d+?)/(?\d+?).*CPU \[(?\d+?)%.*GR3D_FREQ (?\d+?)%"; + Match valueMatch = Regex.Match(output, pattern, RegexOptions.ExplicitCapture); + + ulong.TryParse(valueMatch.Groups[1].Value, out ulong memoryUsedMiB); + ulong.TryParse(valueMatch.Groups[2].Value, out ulong totalMemoryMiB); + int.TryParse(valueMatch.Groups[3].Value, out _cpuUsage); + int.TryParse(valueMatch.Groups[4].Value, out int gpuUsage); + + gpu = new NvidiaInfo + { + Name = "NVIDIA Jetson", + DriverVersion = "", + CudaVersionCapability = cudaVersion, + CudaVersionInstalled = cudaVersion, + Utilization = gpuUsage, + MemoryUsed = memoryUsedMiB * 1024UL * 1024UL, + TotalMemory = totalMemoryMiB * 1024UL * 1024UL, + ComputeCapacity = JetsonComputeCapability(HardwareVendor), + }; + } + } + catch (Exception ex) + { + _hasNvidiaCard = false; + Debug.WriteLine(ex.ToString()); + return null; + } + + return gpu; + } + + /// + /// Returns information on the first NVIDIA GPU found (TODO: Extend to multiple GPUs). If an + /// NVIDIA card is found, the GPU property of this class will contain the info we found. + /// Otherwise, this method will return null and GPU will remain unchanged. + /// + /// A nullable NvidiaInfo object + private async static ValueTask ParseNvidiaSmiAsync() + { + // Do an initial fast check to see if we have an NVIDIA card. This saves a process call + // and exception in the case there's not NVIDIA card. If _hardwareInfo is null then we // just plow on ahead regardless. + /* LET'S NOT. This doesn't work in Docker, even though smi is available. if (_hasNvidiaCard is null && _hardwareInfo is not null) { _hasNvidiaCard = false; foreach (var videoController in _hardwareInfo.VideoControllerList) { - if (videoController.Manufacturer.ContainsIgnoreCase("NVidia")) + if (videoController.Manufacturer.ContainsIgnoreCase("NVIDIA")) _hasNvidiaCard = true; } } - - if (_hasNvidiaCard == false) + */ + + if (HasNvidiaGPU == false) return null; - - // TODO: Cache this value once a second + + NvidiaInfo? gpu = null; try { + string gpuName = string.Empty; + string driverVersion = string.Empty; + string computeCapacity = string.Empty; + string cudaVersion = string.Empty; + string cudaVersionInstalled = string.Empty; + ulong memoryFreeMiB = 0; + ulong totalMemoryMiB = 0; + int gpuUtilPercent = 0; + // Example call and response // nvidia-smi --query-gpu=count,name,driver_version,memory.total,memory.free,utilization.gpu,compute_cap --format=csv,noheader // 1, NVIDIA GeForce RTX 3060, 512.96, 12288 MiB, 10473 MiB, 4 %, 8.6 @@ -869,13 +1142,23 @@ public async static ValueTask GetGpuMemoryUsage() string args = "--query-gpu=count,name,driver_version,memory.total,memory.free,utilization.gpu --format=csv,noheader"; string pattern = @"\d+,\s+(?.+?),\s+(?[\d.]+),\s+(?\d+)\s*MiB,\s+(?\d+)\s*MiB,\s+(?\d+)\s*%"; - var results = await GetProcessInfo("nvidia-smi", args, pattern); + var results = await GetProcessInfoAsync("nvidia-smi", args, pattern).ConfigureAwait(false); + + // Failure to run nvidia-smi = no NVIDIA card installed + if (results is null) + { + _hasNvidiaCard = false; + return null; + } - var gpuName = results["gpuname"]; - var driverVersion = results["driver"]; - ulong.TryParse(results["memfree"], out ulong memoryFreeMiB); - ulong.TryParse(results["memtotal"], out ulong totalMemoryMiB); - int.TryParse(results["gpuUtil"], out int gpuUtilPercent); + if (results.Count > 0) + { + gpuName = results["gpuname"]; + driverVersion = results["driver"]; + ulong.TryParse(results["memfree"], out memoryFreeMiB); + ulong.TryParse(results["memtotal"], out totalMemoryMiB); + int.TryParse(results["gpuUtil"], out gpuUtilPercent); + } ulong memoryUsedMiB = totalMemoryMiB - memoryFreeMiB; @@ -884,8 +1167,9 @@ public async static ValueTask GetGpuMemoryUsage() // 8.6 args = "--query-gpu=compute_cap --format=csv,noheader"; pattern = @"(?[\d\.]*)"; - results = await GetProcessInfo("nvidia-smi", args, pattern); - string computeCapacity = results["computecap"]; + results = await GetProcessInfoAsync("nvidia-smi", args, pattern).ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + computeCapacity = results!["computecap"]; // Get CUDA info. Output is in the form: // Thu Dec 8 08:45:30 2022 @@ -893,30 +1177,49 @@ public async static ValueTask GetGpuMemoryUsage() // | NVIDIA-SMI 512.96 Driver Version: 512.96 CUDA Version: 11.6 | // |-------------------------------+----------------------+----------------------+ pattern = @"Driver Version:\s+(?[\d.]+)\s*CUDA Version:\s+(?[\d.]+)"; - results = await GetProcessInfo("nvidia-smi", "", pattern); - string cudaVersion = results["cuda"]; - - // If we've reached this point we definitely have an Nvidia card. + results = await GetProcessInfoAsync("nvidia-smi", "", pattern).ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + cudaVersion = cudaVersionInstalled = results!["cuda"]; + + + // Get actual installed CUDA info. Form is: + // nvcc: NVIDIA (R) Cuda compiler driver + // Copyright (c) 2005-2022 NVIDIA Corporation + // Built on Tue_May__3_19:00:59_Pacific_Daylight_Time_2022 + // Cuda compilation tools, release 11.7, V11.7.64 + // Build cuda_11.7.r11.7/compiler.31294372_0 + pattern = @"Cuda compilation tools, release [\d.]+, V(?[\d.]+)"; + results = await GetProcessInfoAsync("nvcc", "--version", pattern).ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + cudaVersionInstalled = results!["cuda"]; + + // If we've reached this point we definitely have an NVIDIA card. _hasNvidiaCard = true; - return new NvidiaInfo + gpu = new NvidiaInfo { - Name = gpuName, - DriverVersion = driverVersion, - CudaVersion = cudaVersion, - Utilization = gpuUtilPercent, - MemoryUsed = memoryUsedMiB * 1024 * 1024, - TotalMemory = totalMemoryMiB * 1024 * 1024, - ComputeCapacity = computeCapacity, + Name = gpuName, + DriverVersion = driverVersion, + CudaVersionCapability = cudaVersion, + CudaVersionInstalled = cudaVersionInstalled, + Utilization = gpuUtilPercent, + MemoryUsed = memoryUsedMiB * 1024 * 1024, + TotalMemory = totalMemoryMiB * 1024 * 1024, + ComputeCapacity = computeCapacity, }; } - catch(Exception ex) + catch (Exception ex) { _hasNvidiaCard = false; Debug.WriteLine(ex.ToString()); return null; } - } + + // We need to be careful. In between us setting GPU here, another call to GetGpuInfo may + // have set GPU to null or GpuInfo (Sure, I can't actually picture how, but it's possible) + // return GPU as NvidiaInfo; + return gpu; + } /// /// Format Size from bytes to a Kb, MiB or GiB string, where 1KiB = 1024 bytes. @@ -992,13 +1295,89 @@ public static string FormatSizeBytes(ulong bytes, int rounding, bool useBinaryMu return result; } - private static MemoryProperties GetMemoryInfo() + private static async Task GetMemoryInfoAsync() { - return new MemoryProperties + ulong memoryTotal = 0; + ulong memoryFree = 0; + ulong memoryUsed = 0; + + if (IsWindows) { - Total = _hardwareInfo?.MemoryStatus.TotalPhysical ?? 0, - Available = _hardwareInfo?.MemoryStatus.AvailablePhysical ?? 0 - }; + /* + // An alternative to using _hardwareInfo + var gcMemoryInfo = GC.GetGCMemoryInfo(); + lock (Memory) + { + memoryTotal = (ulong)gcMemoryInfo.TotalAvailableMemoryBytes; + memoryUsed = (ulong)gcMemoryInfo.MemoryLoadBytes; + memoryFree = memoryTotal - memoryUsed; + } + */ + _hardwareInfo.RefreshMemoryStatus(); + + memoryFree = _hardwareInfo?.MemoryStatus?.AvailablePhysical ?? 0; + memoryTotal = _hardwareInfo?.MemoryStatus?.TotalPhysical ?? 0; + memoryUsed = memoryTotal - memoryFree; + } + else if (IsLinux) + { + // Not tested (maybe?) + // _hardwareInfo.RefreshMemoryStatus(); + // memoryFree = _hardwareInfo?.MemoryStatus?.AvailablePhysical ?? 0; + // memoryTotal = _hardwareInfo?.MemoryStatus?.TotalPhysical ?? 0; + // memoryUsed = memoryTotal - memoryFree; + + // Output is in the form: + // total used free + // Mem: XXX YYY ZZZ <- We want tokens 1 - 3 from this line + // Swap: xxx yyy zzz + + var results = await GetProcessInfoAsync("/bin/bash", "-c \"free -b\"").ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + { + var lines = results!["output"]?.Split("\n"); + if (lines is not null && lines.Length > 1) + { + var memory = lines[1].Split(" ", StringSplitOptions.RemoveEmptyEntries); + memoryTotal = ulong.Parse(memory[1]); + memoryUsed = ulong.Parse(memory[2]); + memoryFree = ulong.Parse(memory[3]); + } + } + } + else if (IsMacOS) + { + // _hardwareInfo returns bogus data on macOS + // You can try sysctl hw.memsize hw.physmem hw.usermem + // But it returns (without commas - added for readability): + // hw.memsize: 17,179,869,184 + // hw.physmem: 2,147,483,648 + // hw.usermem: 1,392,398,336 + // On a 16Gb machine. memsize and usermem are fine, but everything else bad + + // This seems to return proper total memory + _hardwareInfo.RefreshMemoryStatus(); + memoryTotal = _hardwareInfo?.MemoryStatus?.TotalPhysical ?? 0; + + // and this returns used + string args = "-c \"ps -caxm -orss= | awk '{ sum += $1 } END { print sum * 1024 }'\""; + var results = await GetProcessInfoAsync("/bin/bash", args, "(?[\\d.]+)").ConfigureAwait(false); + if ((results?.Count ?? 0) > 0) + ulong.TryParse(results!["memused"], out memoryUsed); + + memoryFree = memoryTotal - memoryUsed; + } + else + { + Console.WriteLine("WARNING: Getting memory usage for unknown OS: " + OperatingSystem); + } + + lock (Memory) + { + Memory.Total = memoryTotal; + Memory.Used = memoryUsed; + Memory.Available = memoryFree; + } } private static CpuCollection GetCpuInfo() @@ -1015,6 +1394,19 @@ private static CpuCollection GetCpuInfo() LogicalProcessors = cpu.NumberOfLogicalProcessors }; + if (Architecture == "Arm64" && OperatingSystem == "macOS") + cpuInfo.HardwareVendor = "Apple"; + else if (cpu.Name.Contains("Raspberry")) + cpuInfo.HardwareVendor = "Raspberry Pi"; + else if (cpu.Name.Contains("Jetson")) + cpuInfo.HardwareVendor = "NVIDIA"; + else if (cpu.Name.Contains("Intel")) + cpuInfo.HardwareVendor = "Intel"; + else if (cpu.Name.Contains("AMD")) + cpuInfo.HardwareVendor = "AMD"; + else if (SystemName == "Orange Pi") + cpuInfo.HardwareVendor = "Rockchip"; + cpus.Add(cpuInfo); } } @@ -1026,6 +1418,20 @@ private static CpuCollection GetCpuInfo() return cpus; } + private async static Task CheckForWslAsync() + { + _isWSL = false; + + if (IsLinux) + { + // Output is in the form: + // Linux MachineName 5.15.90.1-microsoft-standard-WSL2 #1 SMP Fri Jan 27 02:56:13... + var results = await GetProcessInfoAsync("/bin/bash", "-c \"uname -a\"", null).ConfigureAwait(false); + if (results is not null) + _isWSL = results["output"]?.ContainsIgnoreCase("-microsoft-standard-WSL") == true; + } + } + private static void InitSummary() { Summary = new @@ -1050,15 +1456,15 @@ private static void InitSummary() : new Object[] { GPU is NvidiaInfo? new { Name = GPU.Name, - Vendor = GPU.Vendor, + Vendor = GPU.HardwareVendor, Memory = GPU.TotalMemory, DriverVersion = GPU.DriverVersion, - CUDAVersion = (GPU as NvidiaInfo)?.CudaVersion, + CUDAVersion = (GPU as NvidiaInfo)?.CudaVersionCapability, ComputeCapacity = (GPU as NvidiaInfo)?.ComputeCapacity } : new { Name = GPU.Name, - Vendor = GPU.Vendor, + Vendor = GPU.HardwareVendor, Memory = GPU.TotalMemory, DriverVersion = GPU.DriverVersion } @@ -1091,7 +1497,6 @@ private static List GetCounters(string category, string metr .SelectMany(counterName => perfCategory.GetCounters(counterName)) .Where(counter => counter.CounterName.Equals(metricName)) .ToList(); - } /// @@ -1113,9 +1518,9 @@ private static List GetCounters(string category, string metr /// "output" is always added as the first item in the dictionary, and is the full text output /// by the process call. The names "major", and "minor" were pulled from the regex pattern. /// - private async static ValueTask> GetProcessInfo(string command, - string args, - string? pattern = null) + private async static ValueTask?> GetProcessInfoAsync(string command, + string args, + string? pattern = null) { var values = new Dictionary(); @@ -1128,7 +1533,8 @@ private async static ValueTask> GetProcessInfo(string if (process?.StandardOutput is null) return values; - string? output = await process.StandardOutput.ReadToEndAsync() ?? string.Empty; + string? output = await process.StandardOutput.ReadToEndAsync() + .ConfigureAwait(false) ?? string.Empty; // Raw output values["output"] = output; @@ -1152,13 +1558,35 @@ private async static ValueTask> GetProcessInfo(string } } } +#if DEBUG catch (Exception e) { Console.WriteLine(e); + return null; } - +#else + catch + { + return null; + } +#endif return values; } + + private static string JetsonComputeCapability(string hardwareName) + { + return hardwareName switch + { + "Jetson AGX Orin" => "8.7", + "Jetson Orin NX" => "8.7", + "Jetson Orin Nano" => "8.7", + "Jetson AGX Xavier" => "7.2", + "Jetson Xavier NX" => "7.2", + "Jetson TX2" => "6.2", + "Jetson Nano" => "5.3", + _ => "5.3" + }; + } } } #pragma warning restore CA1416 // Validate platform compatibility \ No newline at end of file diff --git a/src/SDK/NET/Common/Text.cs b/src/SDK/NET/Common/Text.cs index ba8672c7..19b80eee 100644 --- a/src/SDK/NET/Common/Text.cs +++ b/src/SDK/NET/Common/Text.cs @@ -83,12 +83,21 @@ public static string StripXTermColors(string text) // Strip the reset code. [0m text = Regex.Replace(text, "\u001b\\[0m", string.Empty, RegexOptions.Compiled); - // Bonus: Strip the 'spin' animation. |,/,-,\ + backspace - text = Regex.Replace(text, "(\\||\\-|\\\\|\\/|\\s)[\\b]", string.Empty, RegexOptions.Compiled); - return text; } + /// + /// Removes the spinner animation characters from a string. + /// + /// The string + /// A string + public static string StripSpinnerChars(string text) + { + // Strip the 'spin' animation. |,/,-,\ + backspace + // text = Regex.Replace(text, "([-\\\\\\|\\/])?[\\b]", string.Empty, RegexOptions.Compiled); + return Regex.Replace(text, "([\\-\\\\\\|\\/])?", string.Empty, RegexOptions.Compiled); + } + /// /// Corrects the direction of the slashes in a directory path so it's correct for the /// current OS. @@ -109,9 +118,9 @@ public static string FixSlashes(string? path) /// The path /// The max length of the resultant string /// A string - public static string ShrinkPath(string path, int maxLength) + public static string? ShrinkPath(string? path, int maxLength) { - if (path.Length <= maxLength) + if (path is null || path.Length <= maxLength) return path; var parts = new List(path.Split(new char[] { '\\', '/' })); diff --git a/src/SDK/Python/Python.pyproj b/src/SDK/Python/Python.pyproj index a54f3144..d03973bc 100644 --- a/src/SDK/Python/Python.pyproj +++ b/src/SDK/Python/Python.pyproj @@ -15,13 +15,12 @@ False False True + True - 10.0 - @@ -34,15 +33,12 @@ - - + - - - + \ No newline at end of file diff --git a/src/SDK/Python/analysis/codeprojectai.py b/src/SDK/Python/analysis/codeprojectai.py index 04fb7791..62edf191 100644 --- a/src/SDK/Python/analysis/codeprojectai.py +++ b/src/SDK/Python/analysis/codeprojectai.py @@ -229,7 +229,7 @@ async def main_init(self): await self.log_async(LogMethod.Info | LogMethod.Server, { "message": self.module_name + " started.", - "loglevel": "information" + "loglevel": "trace" }) await asyncio.gather(*tasks) @@ -322,7 +322,7 @@ async def main_loop(self, task_id) -> None: send_response_task = asyncio.create_task(self.send_response(data.request_id, output)) except Exception: - print(f"An exception occured sending the inference response (#reqid {data.request_id})") + print(f"An exception occurred sending the inference response (#reqid {data.request_id})") # reset for next command that we retrieved start_time = time.perf_counter() diff --git a/src/SDK/Python/common.py b/src/SDK/Python/common.py index 12954da2..04515e80 100644 --- a/src/SDK/Python/common.py +++ b/src/SDK/Python/common.py @@ -1,5 +1,6 @@ from typing import Dict, List, Union +import os # Define a Json type to allow type hints to be sensible. # See https://adamj.eu/tech/2021/06/14/python-type-hints-3-somewhat-unexpected-uses-of-typing-any-in-pythons-standard-library/ @@ -8,6 +9,44 @@ ] JSON = Union[_PlainJSON, Dict[str, "JSON"], List["JSON"]] +def timedelta_format(td_object): + """Formats a time delta value in human readable format""" + + seconds = int(td_object.total_seconds()) + periods = [ + # label, #seconds, spacer, pluralise, 0-pad Always show + ('yr', 60*60*24*365, ', ', True, False, False), # years + ('mth', 60*60*24*30, ', ', True, False, False), # months + ('d', 60*60*24, ' ', False, False, False), # days + ('', 60*60, ':', False, False, True), # hours + ('', 60, ':', False, True, True), # minutes + ('', 1, '', False, True, True) # seconds + ] + + result='' + for label, period_seconds, spacer, pluralise, zero_pad, show_always in periods: + if show_always or seconds > period_seconds: + period_value, seconds = divmod(seconds, period_seconds) + if pluralise and period_value != 0: + label += 's' + if zero_pad: + result += f"{period_value:02}{label}{spacer}" + else: + result += f"{period_value}{label}{spacer}" + + return result + +def get_folder_size(folder): + # eg. print "Size: " + str(getFolderSize(".")) + total_size = os.path.getsize(folder) + for item in os.listdir(folder): + itempath = os.path.join(folder, item) + if os.path.isfile(itempath): + total_size += os.path.getsize(itempath) + elif os.path.isdir(itempath): + total_size += get_folder_size(itempath) + return total_size + def shorten(text: str, max_length: int) -> str: """ diff --git a/src/SDK/Python/module_logging.py b/src/SDK/Python/module_logging.py index e8c247a8..a31d0d4e 100644 --- a/src/SDK/Python/module_logging.py +++ b/src/SDK/Python/module_logging.py @@ -217,6 +217,18 @@ async def do_log(self, logMethod: LogMethod, data: JSON) -> None: if logMethod & LogMethod.Info or self.defaultLogging & LogMethod.Info: if no_server_log: + loglevel = loglevel.lower() + if loglevel == "critical": + entry = "critical: " + entry + elif loglevel == "error": + entry = "error: " + entry + elif loglevel == "warning": + entry = "warning: " + entry + elif loglevel == "debug": + entry = "debug: " + entry + elif loglevel == "trace": + entry = "trace: " + entry + print(entry, file=sys.stdout, flush=True) if not unimportant: diff --git a/src/SDK/Python/module_options.py b/src/SDK/Python/module_options.py index d4f44c13..e2921c92 100644 --- a/src/SDK/Python/module_options.py +++ b/src/SDK/Python/module_options.py @@ -8,7 +8,7 @@ def _get_env_var(name: str, default: any = "") -> any: value = os.getenv(name, "") if value == "" and default != "": value = default - print(f"{name} not found. Setting to default {str(default)}") + print(f"Debug: {name} not found. Setting to default {str(default)}") return value @@ -17,11 +17,17 @@ class ModuleOptions: Helper methods to access options passed to modules """ + # TODO: make these instance, not class variables (ie do all of this inside + # an __init__ method). This allows us to import this class without this + # machinery being invoked until the caller needs it + # You can't call ModuleOptions.getEnvVariable at the root of this class, # so the only option is to pull the guts of this method out and hack. @staticmethod def getEnvVariable(name: str, default: any = "") -> any: + """ Returns the value of the environment with the given name """ return _get_env_var(name, default) + # Needed in a moment... current_working_dir = os.getcwd() @@ -52,6 +58,9 @@ def getEnvVariable(name: str, default: any = "") -> any: # How many tasks to spin up for a module parallelism = _get_env_var("CPAI_MODULE_PARALLELISM", "0"); + # How much RAM is needed to perform tasks in this module? + required_MB = _get_env_var("CPAI_MODULE_REQUIRED_MB", "0"); + # Whether to *allow* support for GPU. Doesn't mean it's possibly it can or will # support GPU. More often used to disable GPU when a GPU causes problems support_GPU = _get_env_var("CPAI_MODULE_SUPPORT_GPU", "True") diff --git a/src/SDK/Python/module_runner.py b/src/SDK/Python/module_runner.py index 587a0b64..2ca83943 100644 --- a/src/SDK/Python/module_runner.py +++ b/src/SDK/Python/module_runner.py @@ -3,6 +3,7 @@ import json import os import platform +from platform import uname import sys import time import traceback @@ -10,7 +11,7 @@ # TODO: All I/O should be async, non-blocking so that logging doesn't impact # the throughput of the requests. Switching to HTTP2 or some persisting -# connection mechanism would speed thing up as well. +# connection mechanism would speed things up as well. # The purpose of inserting the path is so the Python import system looks in the @@ -46,28 +47,43 @@ async def initialise(self) -> None: async def process(self, data: RequestData) -> JSON: """ - Called each time a request is retrieved from this module's queue is to be - processed. To be overridden by child classes + Called each time a request is retrieved from this module's queue is to + be processed. To be overridden by child classes self - This ModuleRunner - data - The RequestData retrieved from the processing queue. It contains all - the info needed for this module to satisfy the request + data - The RequestData retrieved from the processing queue. It contains + all the info needed for this module to satisfy the request returns: A JSON package containing the results of this request. """ pass + def status(self, data: RequestData = None) -> JSON: + """ + Called when this module has been asked to provide its current status. + Helpful for modules that have long running operations such as training + or generative AI. + """ + pass + + def selftest(self) -> JSON: + """ + Called to run general tests against this module to ensure it's in good + working order. Typically this should run unit and/or integration tests + and report back the results. Used for post-install checks. + """ + pass + def shutdown(self) -> None: """ - Called when this module has been asked to shutdown. To be overridden by child - classes + Called when this module has been asked to shutdown. To be overridden by + child classes """ pass def __init__(self): - # - # """ - # Constructor. - # """ + """ + Constructor. + """ # Constants self._error_pause_secs = 1.0 # For general errors @@ -81,6 +97,7 @@ def __init__(self): self._current_error_pause_secs = 0 self._hasTorchCuda = None + self._hasTorchDirectML = None self._hasTorchHalfPrecision = None self._hasTorchMPS = None self._hasONNXRuntime = None @@ -88,12 +105,13 @@ def __init__(self): self._hasOpenVINO = None self._hasPaddleGPU = None self._hasCoralTPU = None + self._hasFastDeployRockNPU = None # Public fields ------------------------------------------------------- # A note about the use of ModuleOptions. ModuleOptions is simply a way # to hide all the calls to _get_env_var behind a simple class. While - # there is a lot of repitition in self.property = ModuleOptions.property, + # there is a lot of repetition in self.property = ModuleOptions.property, # it means we have the means of keeping the initial values the module # had at launch separate from the working values which may change during. # It's tempting to remove all values that ModuleOptions supplies, and @@ -119,6 +137,7 @@ def __init__(self): self.launched_by_server = ModuleOptions.launched_by_server # Hardware / accelerator info + self.required_MB = int(ModuleOptions.required_MB or 0) self.support_GPU = ModuleOptions.support_GPU self.accel_device_name = ModuleOptions.accel_device_name self.half_precision = ModuleOptions.half_precision @@ -137,13 +156,19 @@ def __init__(self): # What system are we running on? self.system = { 'Linux': 'Linux', 'Darwin': 'macOS', 'Windows': 'Windows'}[platform.system()] + self.in_WSL = self.system == 'Linux' and 'microsoft-standard-WSL' in uname().release - # Further tests for RaspberryPi + # Further tests for Micro devices if self.system == 'Linux': try: import io with io.open('/sys/firmware/devicetree/base/model', 'r') as m: - if 'raspberry pi' in m.read().lower(): self.system = 'RaspberryPi' + model_info = m.read().lower() + if 'raspberry pi' in model_info: + self.system = 'Raspberry Pi' + elif 'orange pi' in model_info: + self.system = 'Orange Pi' + except Exception: pass # Need to hold off until we're ready to create the main logging loop. @@ -172,36 +197,11 @@ def __init__(self): # Private fields self._base_queue_url = self.base_api_url + "queue/" - - """ - @staticmethod - def supports_half_precision(gpu_card_name: str) -> bool: - " "" - Returns a True or False depending on whether the aupplies card - supports half-precision operations. - This is terribly flaky. The docs seem to suggest anything with - a compute capability >= 6.0 supports half, so we'll roll with that - Half-precision is supported by Pascal architecture and above. - https://en.wikipedia.org/wiki/CUDA#GPUs_supported - GPUs by architecture - https://www.eatyourbytes.com/list-of-gpus-by-processing-power-half-precision/ - half precision speeds - " "" - no_half = ["TU102","TU104","TU106","TU116", "TU117", - "GeForce 960", - "GeoForce GT 1030", "GeForce GTX 1050","GeForce GTX 1060", - "GeForce GTX 1060","GeForce GTX 1070","GeForce GTX 1080", - "GeForce RTX 2060", "GeForce RTX 2070", "GeForce RTX 2080", - "GeForce GTX 1650", "GeForce GTX 1660", "MX550", "MX450", - "Quadro RTX 8000", "Quadro RTX 6000", "Quadro RTX 5000", "Quadro RTX 4000" - # "Quadro P1000", - this works with half! - "Quadro P620", "Quadro P400", - "T1000", "T600", "T400","T1200","T500","T2000", - "Tesla T4"] - - return not any(check_name in gpu_card_name for check_name in no_half) - """ @property def hasTorchCuda(self): + """ Is CUDA support via PyTorch available? """ + if self._hasTorchCuda == None: self._hasTorchCuda = False try: @@ -210,8 +210,24 @@ def hasTorchCuda(self): except: pass return self._hasTorchCuda + @property + def hasTorchDirectML(self): + """ Is DirectML support via PyTorch available? """ + + if self._hasTorchDirectML == None: + self._hasTorchDirectML = False + if self.in_WSL or self.system == "Windows": + try: + import torch + import torch_directml + self._hasTorchDirectML = True + except: pass + return self._hasTorchDirectML + @property def hasTorchHalfPrecision(self): + """ Can this (assumed) NVIDIA GPU support half-precision operations? """ + if self._hasTorchHalfPrecision == None: self._hasTorchHalfPrecision = False try: @@ -219,11 +235,40 @@ def hasTorchHalfPrecision(self): # capability 6.0 and above import torch self._hasTorchHalfPrecision = torch.cuda.get_device_capability()[0] >= 6 + + # Except...that's not the case in practice. Below are the cards that + # also seem to have issues + if self._hasTorchHalfPrecision: + problem_childs = [ + # FAILED: + # GeForce GTX 1650, GeForce GTX 1660 + # T400, T600, T1000 + + # WORKING: + # Quadro P400, P600 + # GeForce GT 1030, GeForce GTX 1050 Ti, 1060, 1070, and 1080 + # GeForce RTX 2060 and 2070 (and we assume GeForce RTX 2080) + # Quadro RTX 4000 (and we assume Quadro RTX 5, 6, and 8000) + # Tesla T4 + + # Pascal - Compute Capability 6.1 + "MX450", "MX550", # unknown + + # Turing - Compute Capability 7.5 + "GeForce GTX 1650", "GeForce GTX 1660", # known failures + "T400", "T500", "T600", "T1000", "T1200", "T2000", # T400, T600, T1000 known failures + "TU102", "TU104", "TU106", "TU116", "TU117" # unknown + ] + card_name = torch.cuda.get_device_name() + + self._hasTorchHalfPrecision = not any(check_name in card_name for check_name in problem_childs) except: pass return self._hasTorchHalfPrecision @property def hasONNXRuntime(self): + """ Is the ONNX runtime available? """ + if self._hasONNXRuntime == None: self._hasONNXRuntime = False try: @@ -235,6 +280,8 @@ def hasONNXRuntime(self): @property def hasONNXRuntimeGPU(self): + """ Is the ONNX runtime available and is there a GPU that will support it? """ + if self._hasONNXRuntimeGPU == None: self._hasONNXRuntimeGPU = False try: @@ -245,6 +292,8 @@ def hasONNXRuntimeGPU(self): @property def hasOpenVINO(self): + """ Is OpenVINO available? """ + if self._hasOpenVINO == None: self._hasOpenVINO = False try: @@ -256,6 +305,8 @@ def hasOpenVINO(self): @property def hasTorchMPS(self): + """ Are we running on Apple Silicon and is MPS support in PyTorch available? """ + if self._hasTorchMPS == None: self._hasTorchMPS = False if self.cpu_vendor == 'Apple' and self.cpu_arch == 'arm64': @@ -267,6 +318,8 @@ def hasTorchMPS(self): @property def hasPaddleGPU(self): + """ Is PaddlePaddle available and is there a GPU that supports it? """ + if self._hasPaddleGPU == None: self._hasPaddleGPU = False try: @@ -277,6 +330,8 @@ def hasPaddleGPU(self): @property def hasCoralTPU(self): + """ Is there a Coral.AI TPU connected and are the libraries in place to support it? """ + if self._hasCoralTPU == None: self._hasCoralTPU = False @@ -300,23 +355,37 @@ def hasCoralTPU(self): # On Windows, the interpreter.__init__ method accepts experimental # delegates. These are used in self._interpreter.ModifyGraphWithDelegate, # which fails on Windows - import platform - if platform.system() != "Windows": - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - delegates = [load_delegate(delegate)] - self._hasCoralTPU = len(delegates) > 0 - return self._hasCoralTPU + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + delegates = [load_delegate(delegate)] + self._hasCoralTPU = len(delegates) > 0 + + return self._hasCoralTPU except Exception as ex: pass return self._hasCoralTPU + @property + def hasFastDeployRockNPU(self): + """ Is the Rockchip NPU present (ie. on a Orange Pi) and supported by + the fastdeploy library? """ + + if self._hasFastDeployRockNPU == None: + self._hasFastDeployRockNPU = False + try: + from fastdeploy import RuntimeOption + RuntimeOption().use_rknpu2() + self._hasFastDeployRockNPU = True + except: pass + + return self._hasFastDeployRockNPU + @property def execution_provider(self): - """ Gets the execution provider """ + """ Gets the execution provider (eg. CPU, GPU, TPU, NPU etc) """ return self._execution_provider @execution_provider.setter @@ -337,7 +406,6 @@ def execution_provider(self, provider): def start_loop(self): - """ Starts the tasks that will run the execution loops that check the command queue and forwards commands to the module. Each task runs @@ -345,6 +413,23 @@ def start_loop(self): command queue and sending commands to the (same) callback function. """ + # SMOKE TEST: + # If this module has been called from the command line and a smoke test + # has been requested, then we'll run that test and exit immediately, + # rather than firing up the loop to handle messages. + # We could call this from the __init__ method to be cleaner, but child + # modules would then need to ensure they called super.__init__ at the + # *end* of their __init__ call, rather than at the start, and that's + # just fragile. + + if len(sys.argv) > 1 and sys.argv[1] == "--selftest": + self._logger = ModuleLogger(self.port, self.server_root_path) + self.initialise() + self.selftest() + quit() + + # No smoke test, so on to the main show + try: asyncio.run(self.main_init()) except Exception as ex: @@ -377,10 +462,9 @@ def start_loop(self): async def main_init(self): - """ Initialises the set of tasks for this module. Each task will contain a - a loop that will queury the command queue and forward commands to the + a loop that will query the command queue and forward commands to the callback function. This method also sets up the shared logging task. @@ -397,7 +481,7 @@ async def main_init(self): self._logger.log(LogMethod.Info | LogMethod.Server, { "filename": __file__, - "loglevel": "information", + "loglevel": "trace", "method": "main_init", "message": f"Running init for {self.module_name}" }) @@ -430,7 +514,7 @@ async def main_init(self): await self.log_async(LogMethod.Info | LogMethod.Server, { "message": self.module_name + " started.", - "loglevel": "information" + "loglevel": "trace" }) await asyncio.gather(*tasks) @@ -439,6 +523,17 @@ async def main_init(self): # Main loop async def main_loop(self, task_id) -> None: + """ + This is the main request processing loop. This method continually polls + the queue that this module is servicing, and each time it sees a request + it will grab the request data, send it to the `process` method, then + gather the results and post them back to the queue. The server is + responsible for placing requests from the calling client onto the queue, + and then taking responses off the queue and returning them to the client. + + Special requests, such as quit, status and selftest are handled + carefully. + """ get_command_task = asyncio.create_task(self.get_command(task_id)) send_response_task = None @@ -452,52 +547,58 @@ async def main_loop(self, task_id) -> None: if len(queue_entries) == 0: continue - # print(f"Found a queue entry") - # In theory we may get back multiple command requests. In practice # it's always just 1 at a time. At the moment. for queue_entry in queue_entries: - data: RequestData = RequestData(queue_entry) + + suppress_timing_log = False - # Special shutdown request - if data.command and data.command.lower() == "quit" and \ - self.module_id == data.get_value("moduleId"): + data: RequestData = RequestData(queue_entry) - await self.log_async(LogMethod.Info | LogMethod.File | LogMethod.Server, { - "process": self.module_name, - "filename": __file__, - "method": "main_loop", - "loglevel": "info", - "message": "Shutting down" - }) - self._cancelled = True - break + # The method to call to process this request + method_to_call = self.process - process_name = f"Queue request for {self.module_name}" + # Special requests if data.command: - process_name += f" command '{data.command}' (#reqid {data.request_id})" - - timer: Tuple[str, float] = self.start_timer(process_name) + + if self.module_id == data.get_value("moduleId") and data.command.lower() == "quit": + await self.log_async(LogMethod.Info | LogMethod.File | LogMethod.Server, { + "process": self.module_name, + "filename": __file__, + "method": "main_loop", + "loglevel": "info", + "message": "Shutting down" + }) + self._cancelled = True + break + elif data.command.lower() == "status": + method_to_call = self.status + suppress_timing_log = True + elif data.command.lower() == "selftest": + method_to_call = self.selftest + + if not suppress_timing_log: + process_name = f"Rec'd request for {self.module_name}" + if data.command: + process_name += f" command '{data.command}'" + process_name += f" (#reqid {data.request_id})" + timer: Tuple[str, float] = self.start_timer(process_name) output: JSON = {} try: # Overriding issue here: We need to await self.process in the # asyncio loop. This means we can't just 'await self.process' - # print(f"About to run process") - - if asyncio.iscoroutinefunction(self.process): + if asyncio.iscoroutinefunction(method_to_call): # if process is async, then it's a coroutine. In this # case we create an awaitable asyncio task to execute # this method. - callbacktask = asyncio.create_task(self.process(data)) + callbacktask = asyncio.create_task(method_to_call(data)) else: # If the method is not async, then we wrap it in an # awaitable method which we await. loop = asyncio.get_running_loop() - callbacktask = loop.run_in_executor(None, self.process, data) - - # print(f"Process task created") + callbacktask = loop.run_in_executor(None, method_to_call, data) # Await output = await callbacktask @@ -524,21 +625,24 @@ async def main_loop(self, task_id) -> None: }) finally: - self.end_timer(timer, "command timing") + if not suppress_timing_log: + self.end_timer(timer, "command timing", data.command) try: if send_response_task != None: # print("awaiting old send task") await send_response_task - # Legacy code. Deprecated - output["code"] = 200 if output["success"] == True else 500 + output["code"] = 200 if output["success"] == True else 500 # Deprecated + output["command"] = data.command or '' + output["moduleId"] = self.module_id + output["executionProvider"] = self.execution_provider or 'CPU' # print("creating new send task") send_response_task = asyncio.create_task(self.send_response(data.request_id, output)) except Exception: - print(f"An exception occured sending the inference response (#reqid {data.request_id})") + print(f"An exception occurred sending the inference response (#reqid {data.request_id})") # Cleanup self.shutdown() @@ -559,7 +663,7 @@ def start_timer(self, desc: str) -> Tuple[str, float]: return (desc, time.perf_counter()) - def end_timer(self, timer : Tuple[str, float], label: str = "timing") -> None: + def end_timer(self, timer : Tuple[str, float], label: str = "timing", command: str = None) -> None: """ Ends a timing session and logs the time taken along with the initial description if the variable logTimingEvents = True @@ -568,7 +672,7 @@ def end_timer(self, timer : Tuple[str, float], label: str = "timing") -> None: (desc, start_time) = timer elapsedMs = (time.perf_counter() - start_time) * 1000 - if (self._log_timing_events): + if self._log_timing_events and not command in { "status" }: # exclude some timing events self.log(LogMethod.Info|LogMethod.Server, { "message": f"{desc} took {elapsedMs:.0f}ms", "loglevel": "information", @@ -624,17 +728,17 @@ async def get_command(self, task_id) -> "list[str]": if self.execution_provider: url += "&executionProvider=" + self.execution_provider - # Send the request to query the queue and wait up to 30 seconds - # for a response. We're basically long-polling here + # Send a request to query the queue and wait up to 30 seconds for a + # response. We're basically long-polling here async with self._request_session.get( url, timeout = 30 - #, verify = False + #, verify = False ) as session_response: if session_response.ok: content = await session_response.text() - if (content): + if content: # This method allows multiple commands to be returned, but to # keep things simple we're only ever returning a single command @@ -818,6 +922,10 @@ async def send_response(self, request_id : str, body : JSON) -> bool: async def call_api(self, method:str, files=None, data=None) -> str: + """ + Provides the means to make a call to a CodeProject.AI API. Handy if this + module wishes to make use of another module's functionality + """ url = self.base_api_url + method @@ -835,7 +943,10 @@ async def call_api(self, method:str, files=None, data=None) -> str: def report_error(self, exception: Exception, filename: str, message: str = None) -> None: - + """ + Shortcut method provided solely to allow a module to report an error + """ + if not message and exception: message = "".join(traceback.TracebackException.from_exception(exception).format()) diff --git a/src/SDK/Python/request_data.py b/src/SDK/Python/request_data.py index dfc65e3f..80a3e5ce 100644 --- a/src/SDK/Python/request_data.py +++ b/src/SDK/Python/request_data.py @@ -1,13 +1,12 @@ import base64 import io -import json -import base64 from io import BytesIO +import json from PIL import Image -# from common import JSON +from common import JSON # from logging import LogMethod class RequestData: @@ -17,26 +16,47 @@ class RequestData: """ # Constructor - def __init__(self, json_request_data): + def __init__(self, json_request_data: str = None): self._verbose_exceptions = True - self.request_data = json.JSONDecoder().decode(json_request_data) - - self.request_id = self.request_data.get("reqid", "") # No longer needed, and same as command - - self.payload = self.request_data["payload"] - self.queue_name = self.payload.get("queue","N/A") - self.value_list = self.payload.get("values", None) - self.files = self.payload.get("files", None) - self.segments = self.payload.get("urlSegments", None) - self.command = self.payload.get("command", None) + if json_request_data: + request_data = json.JSONDecoder().decode(json_request_data) + self.request_id = request_data.get("reqid", "") + self.payload = request_data["payload"] + else: + self.request_id = "" + + if not self.payload: + self.payload = { + "queue": "N/A", + "urlSegments": None, + "command": None, + "files" : [ ], + "values" : [ ] + } + + self._queue_name = self.payload.get("queue" "N/A") + self._segments = self.payload.get("urlSegments", None) + self._command = self.payload.get("command", None) + self.value_list = self.payload.get("values", None) + self.files = self.payload.get("files", None) - def encode_image(self, image: Image, image_format: str = "PNG") -> str: + @staticmethod + def clamp(value, min_value, max_value) -> any: + """ Clamps a value between min_value and max_value inclusive """ + return max(min(max_value, value), min_value) + + @staticmethod + def restrict(value, values, default_value) -> str: + """ Restricts a string to a set of values """ + return value if value in values else default_value + + @staticmethod + def encode_image(image: Image, image_format: str = "PNG") -> str: """ Encodes an Image as a base64 encoded string """ - with BytesIO() as buffered: image.save(buffered, format=image_format) img_dataB64_bytes : bytes = base64.b64encode(buffered.getvalue()) @@ -44,12 +64,83 @@ def encode_image(self, image: Image, image_format: str = "PNG") -> str: return img_dataB64 + @staticmethod + def encode_file_contents(file_name: str) -> str: + """ + Reads the content of a binary file and returns the Base64 encoding of + the file contents. + On error, returns None. + """ + try: + # Open the binary file and read its contents + with open(file_name, 'rb') as f: + file_contents = f.read() + f.close() + + # Encode the binary data as a base64 string + encoded_file_contents = base64.b64encode(file_contents).decode('ascii') + + return encoded_file_contents + except: + return None + + @property + def queue(self) -> str: + """ Gets the name of the queue """ + return self._queue + + @queue.setter + def queue(self, queue_name) -> None: + """ Sets the name of the queue """ + self._queue = queue_name + self.payload["queue"] = queue_name + + @property + def command(self) -> str: + """ Gets the command to be sent to the module """ + return self._command + + @command.setter + def command(self, command_name) -> None: + """ Sets the command to be sent to the module """ + self._command = command_name + self.payload["command"] = command_name + + @property + def urlSegments(self): + """ Gets the segments of the URL that was used to make the API call """ + return self._urlSegments + + @urlSegments.setter + def urlSegments(self, segments) -> None: + """ Sets the segments of the URL that was used to make the API call """ + self._urlSegments = segments + self.payload["segments"] = segments + + def json(self) -> JSON: + json_request_data = { + "reqid": "", + "payload": self.payload + } + request_data_str = json.JSONEncoder().encode(json_request_data) + return request_data_str + + def add_value(self, key: str, value: any) -> None: + if not key: + return None + self.payload["values"].append({key: value}) + + def add_file(self, file_name: str) -> None: + if not file_name: + return + self.payload["files"].append({ "data": RequestData.encode_file_contents(file_name) }) + def get_image(self, index : int) -> Image: """ Gets an image from the requests 'files' array that was passed in as part of a HTTP POST. Param: index - the index of the image to return - Returns: An image if succesful; None otherwise. + Returns: An image if successful; None otherwise. NOTE: It's probably worth helping out users by sniffing EXIF data and rotating images prior to passing them to modules. This could be done @@ -90,8 +181,7 @@ def get_image(self, index : int) -> Image: """ return None - - def get_value(self, key : str, defaultValue : str = None) -> any: + def get_value(self, key : str, defaultValue : str = None) -> str: """ Gets a value from the HTTP request Form send by the client Param: key - the name of the key holding the data in the form collection @@ -103,11 +193,9 @@ def get_value(self, key : str, defaultValue : str = None) -> any: ** WE ONLY RETURN THE FIRST VALUE HERE ** """ - # self.log(LogMethod.Info, {"message": f"Getting request for module {self.module_id}"}) - try: # value_list is a list. Note that in a HTML form, each element may - # have multiple values + # have multiple values if self.value_list is None: return defaultValue @@ -121,3 +209,33 @@ def get_value(self, key : str, defaultValue : str = None) -> any: if self._verbose_exceptions: print(f"Error getting get_request_value: {str(ex)}") return defaultValue + + def get_int(self, key : str, defaultValue : int = None) -> int: + + value = self.get_value(key) + if value is None: + return defaultValue + + try: + return int(value) + except: + return defaultValue + + def get_float(self, key : str, defaultValue : float = None) -> float: + + value = self.get_value(key) + if value is None: + return defaultValue + + try: + return float(value) + except: + return defaultValue + + def get_bool(self, key : str, defaultValue : bool = None) -> bool: + + value = self.get_value(key) + if value is None: + return defaultValue + + return value.lower() in [ 'y', 'yes', 't', 'true', 'on', '1' ] \ No newline at end of file diff --git a/src/SDK/Python/requirements.txt b/src/SDK/Python/requirements.txt index f5b6b055..60c36d8d 100644 --- a/src/SDK/Python/requirements.txt +++ b/src/SDK/Python/requirements.txt @@ -2,6 +2,8 @@ ## CodeProject.AI +# pipdeptree # Installing pipdeptree, a PIP dependency tree utility + # aiohttp 3.8.3 requires charset-normalizer<3.0,>=2.0, so force the issue charset-normalizer<3.0 # Installing Charset normalizer @@ -9,4 +11,4 @@ aiohttp # Installing aiohttp, the Async IO HTTP library aiofiles # Installing aiofiles, the Async IO Files library py-cpuinfo # Installing py-cpuinfo to allow us to query CPU info requests # Installing Requests, the HTTP library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library diff --git a/src/SDK/Scripts/clean.bat b/src/SDK/Scripts/clean.bat index bdeef726..fb1fc81e 100644 --- a/src/SDK/Scripts/clean.bat +++ b/src/SDK/Scripts/clean.bat @@ -68,11 +68,16 @@ if /i "%cleanBuild%" == "true" ( call "!pwd!\utils.bat" WriteLine "Cleaning Build " "White" "Blue" call "!pwd!\utils.bat" WriteLine - call :CleanSubDirs "!rootDir!\src" "obj" + call :CleanSubDirs "!rootDir!\src" "obj" "ObjectDetection" + call :CleanSubDirs "!rootDir!\src\API" "bin" + call :CleanSubDirs "!rootDir!\src\SDK" "bin" + call :CleanSubDirs "!rootDir!\src\modules\ObjectDetectionNet" "bin" + call :CleanSubDirs "!rootDir!\src\modules\PortraitFilter" "bin" + call :CleanSubDirs "!rootDir!\src\modules\SentimentAnalysis" "bin" call :CleanSubDirs "!rootDir!\Installers\Windows" "bin" call :CleanSubDirs "!rootDir!\Installers\Windows" "obj" call :CleanSubDirs "!rootDir!\demos" "bin" - call :CleanSubDirs "!rootDir!\demos" "obj" + call :CleanSubDirs "!rootDir!\demos" "obj" "Objects" call :CleanSubDirs "!rootDir!\tests" "bin" call :CleanSubDirs "!rootDir!\tests" "obj" ) @@ -83,7 +88,19 @@ if /i "%cleanInstallCurrentOS%" == "true" ( call "!pwd!\utils.bat" WriteLine "Cleaning Windows Install " "White" "Blue" call "!pwd!\utils.bat" WriteLine - call :CleanSubDirs "!rootDir!\src\runtimes\bin" "windows" + call :CleanSubDirs "!rootDir!\src\runtimes\bin" "windows" + + call :CleanSubDirs "!rootDir!\src\modules\ALPR\bin" "windows" + call :CleanSubDirs "!rootDir!\src\modules\BackgroundRemover\bin" "windows" + call :CleanSubDirs "!rootDir!\src\modules\Cartooniser\bin" "windows" + call :CleanSubDirs "!rootDir!\src\modules\FaceProcessing\bin" "windows" + call :CleanSubDirs "!rootDir!\src\modules\ObjectDetectionTFLite\bin" "windows" + call :CleanSubDirs "!rootDir!\src\modules\ObjectDetectionYolo\bin" "windows" + call :CleanSubDirs "!rootDir!\src\modules\OCR\bin" "windows" + call :CleanSubDirs "!rootDir!\src\modules\SceneClassifier\bin" "windows" + call :CleanSubDirs "!rootDir!\src\modules\YOLOv5-3.1\bin" "windows" + + call :CleanSubDirs "!rootDir!\src\modules\FaceProcessing" "datastore" ) if /i "%cleanInstallAll%" == "true" ( @@ -92,6 +109,7 @@ if /i "%cleanInstallAll%" == "true" ( call "!pwd!\utils.bat" WriteLine "Cleaning install for other platforms " "White" "Blue" call "!pwd!\utils.bat" WriteLine + call :CleanSubDirs "!rootDir!\src\modules" "bin" call :CleanSubDirs "!rootDir!\src\runtimes" "bin" ) @@ -105,7 +123,6 @@ if /i "%cleanAssets%" == "true" ( call :CleanSubDirs "!rootDir!\src\modules\BackgroundRemover" "models" call :CleanSubDirs "!rootDir!\src\modules\Cartooniser" "weights" call :CleanSubDirs "!rootDir!\src\modules\FaceProcessing" "assets" - call :CleanSubDirs "!rootDir!\src\modules\FaceProcessing" "datastore" call :CleanSubDirs "!rootDir!\src\modules\ObjectDetectionTFLite" "assets" call :CleanSubDirs "!rootDir!\src\modules\ObjectDetectionNet" "assets" call :CleanSubDirs "!rootDir!\src\modules\ObjectDetectionNet" "custom-models" diff --git a/src/SDK/Scripts/clean.sh b/src/SDK/Scripts/clean.sh index 35b5572d..71e3f482 100644 --- a/src/SDK/Scripts/clean.sh +++ b/src/SDK/Scripts/clean.sh @@ -196,8 +196,12 @@ if [ "$cleanBuild" == "true" ]; then writeLine "Cleaning Build " "White" "Blue" writeLine - cleanSubDirs "${rootDir}/src" "bin" "runtimes/bin" cleanSubDirs "${rootDir}/src" "obj" "ObjectDetection" + cleanSubDirs "${rootDir}/src/API" "bin" + cleanSubDirs "${rootDir}/src/SDK" "bin" + cleanSubDirs "${rootDir}/src/modules/ObjectDetectionNet" "bin" + cleanSubDirs "${rootDir}/src/modules/PortraitFilter" "bin" + cleanSubDirs "${rootDir}/src/modules/SentimentAnalysis" "bin" cleanSubDirs "${rootDir}/Installers/windows" "bin" cleanSubDirs "${rootDir}/Installers/windows" "obj" cleanSubDirs "${rootDir}/demos" "bin" @@ -214,6 +218,16 @@ if [ "$cleanInstallCurrentOS" == "true" ]; then cleanSubDirs "${rootDir}/src/runtimes/bin" "${platform}" + cleanSubDirs "${rootDir}/src/runtimes/ALPR/bin" "$os" + cleanSubDirs "${rootDir}/src/runtimes/BackgroundRemover/bin" "$os" + cleanSubDirs "${rootDir}/src/runtimes/Cartooniser/bin" "$os" + cleanSubDirs "${rootDir}/src/runtimes/FaceProcessing/bin" "$os" + cleanSubDirs "${rootDir}/src/runtimes/ObjectDetectionTFLite/bin" "$os" + cleanSubDirs "${rootDir}/src/runtimes/ObjectDetectionYolo/bin" "$os" + cleanSubDirs "${rootDir}/src/runtimes/OCR/bin" "$os" + cleanSubDirs "${rootDir}/src/runtimes/SceneClassifier/bin" "$os" + cleanSubDirs "${rootDir}/src/runtimes/YOLOv5-3.1/bin" "$os" + cleanSubDirs "${rootDir}/src/modules/FaceProcessing" "datastore" fi @@ -223,6 +237,7 @@ if [ "$cleanInstallAll" == "true" ]; then writeLine "Cleaning install for all platforms " "White" "Blue" writeLine + cleanSubDirs "${rootDir}/src/modules" "bin" cleanSubDirs "${rootDir}/src/runtimes" "bin" fi @@ -232,18 +247,20 @@ if [ "$cleanAssets" == "true" ]; then writeLine "Cleaning assets " "White" "Blue" writeLine - cleanSubDirs "${rootDir}/src/modules/ALPR" "paddleocr" - cleanSubDirs "${rootDir}/src/modules/BackgroundRemover" "models" - cleanSubDirs "${rootDir}/src/modules/Cartooniser" "weights" - cleanSubDirs "${rootDir}/src/modules/FaceProcessing" "assets" - cleanSubDirs "${rootDir}/src/modules/ObjectDetectionNet" "assets" - cleanSubDirs "${rootDir}/src/modules/ObjectDetectionNet" "custom-models" - cleanSubDirs "${rootDir}/src/modules/ObjectDetectionNet" "LocalNugets" - cleanSubDirs "${rootDir}/src/modules/ObjectDetectionYolo" "assets" - cleanSubDirs "${rootDir}/src/modules/ObjectDetectionYolo" "custom-models" - cleanSubDirs "${rootDir}/src/modules/OCR" "paddleocr" - cleanSubDirs "${rootDir}/src/modules/YOLOv5-3.1" "assets" - cleanSubDirs "${rootDir}/src/modules/YOLOv5-3.1" "custom-models" + cleanSubDirs "${rootDir}/src/modules/ALPR" "paddleocr" + cleanSubDirs "${rootDir}/src/modules/BackgroundRemover" "models" + cleanSubDirs "${rootDir}/src/modules/Cartooniser" "weights" + cleanSubDirs "${rootDir}/src/modules/FaceProcessing" "assets" + cleanSubDirs "${rootDir}/src/modules/ObjectDetectionTFLite" "assets" + cleanSubDirs "${rootDir}/src/modules/ObjectDetectionNet" "assets" + cleanSubDirs "${rootDir}/src/modules/ObjectDetectionNet" "custom-models" + cleanSubDirs "${rootDir}/src/modules/ObjectDetectionNet" "LocalNugets" + cleanSubDirs "${rootDir}/src/modules/ObjectDetectionYolo" "assets" + cleanSubDirs "${rootDir}/src/modules/ObjectDetectionYolo" "custom-models" + cleanSubDirs "${rootDir}/src/modules/OCR" "paddleocr" + cleanSubDirs "${rootDir}/src/modules/SceneClassifier" "assets" + cleanSubDirs "${rootDir}/src/modules/YOLOv5-3.1" "assets" + cleanSubDirs "${rootDir}/src/modules/YOLOv5-3.1" "custom-models" fi if [ "$cleanDownloadCache" == "true" ]; then diff --git a/src/SDK/Scripts/install_CUDnn.bat b/src/SDK/Scripts/install_CUDnn.bat index f89499dd..7c8414a8 100644 --- a/src/SDK/Scripts/install_CUDnn.bat +++ b/src/SDK/Scripts/install_CUDnn.bat @@ -9,20 +9,20 @@ :: :: What this script does: :: -:: 1. Downloads the cuDNN package (v8.5.0.96 forCUDA 11) +:: 1. Downloads the cuDNN package (v8.9.4.96 for CUDA 11) :: :: 2. Creates a folder "C:\Program Files\NVIDIA\CUDNN\v8.5" and extracts the cuDNN package :: into that folder. There will be bin, lib and include folders, plus a LICENSE file. :: :: 3. Adds this path to the PATH environment variable: -:: setx /M PATH = Path + "%PATH%;C:\Program Files\NVIDIA\CUDNN\v8.5\bin" +:: setx /M PATH = Path + "%PATH%;C:\Program Files\NVIDIA\CUDNN\v8.9\bin" :: :: 4. Downloads ZLib from WinImage (http://www.winimage.com/zLibDll/zlib123dllx64.zip) and extracts :: into a folder. Since it's being used by cuDNN it's easier to just extract into the -:: cuDNN folder: "C:\Program Files\NVIDIA\CUDNN\v8.5\zlib +:: cuDNN folder: "C:\Program Files\NVIDIA\CUDNN\v8.9\zlib :: :: 5. Add this path to the PATH environment variable: -:: setx /M PATH "%PATH%;C:\Program Files\NVIDIA\CUDNN\v8.5\zlib\dll_x64" +:: setx /M PATH "%PATH%;C:\Program Files\NVIDIA\CUDNN\v8.9\zlib\dll_x64" :: :: What you need to do: just double click this bat file in Windows @@ -31,7 +31,8 @@ cls setlocal enabledelayedexpansion set cuDNNLocation=https://developer.nvidia.com/rdp/cudnn-download -set cuDNNArchiveName=cudnn-windows-x86_64-8.5.0.96_cuda11-archive +REM set cuDNNArchiveName=cudnn-windows-x86_64-8.5.0.96_cuda11-archive +set cuDNNArchiveName=cudnn-windows-x86_64-8.9.4.25_cuda11-archive set cuDNNArchiveDownloadUrl=https://codeproject-ai.s3.ca-central-1.amazonaws.com/sense/installer/dev/ set cuDNNPattern=cudnn-windows-x86_64-*.zip set cuDNNRegex=cudnn-windows-x86_64-([0-9]*).([0-9]*).([0-9]*).([0-9]*)_cuda11-archive @@ -58,7 +59,7 @@ set zLibInstalled=false :: but before we start lets ensure we attempt to have at least one version present. IF not exist "!cuDNNPattern!" ( - echo No cuDNN archive found. Downloading... + echo No cuDNN archive found. Downloading !cuDNNArchiveName!.zip... powershell -command "Start-BitsTransfer -Source '!cuDNNArchiveDownloadUrl!!cuDNNArchiveName!.zip' -Destination '!cuDNNArchiveName!.zip'" ) diff --git a/src/SDK/Scripts/install_cuDNN.sh b/src/SDK/Scripts/install_cuDNN.sh index 4895005c..36ae993b 100644 --- a/src/SDK/Scripts/install_cuDNN.sh +++ b/src/SDK/Scripts/install_cuDNN.sh @@ -2,38 +2,209 @@ # # Ubuntu / WSL cuDNN install script # https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#wsl +# + +# This script is intended to be called from setup.sh, which includes architecture +# and os vars as well as writeline methods +# +# echo "========================================================================" +# echo "" +# echo " Setting up cuDNN and CUDA for CodeProject.AI Server " +# echo "" +# echo "========================================================================" +# echo "" +# +# if [ $(uname -m) == 'arm64' ] || [ $(uname -m) == 'aarch64' ]; then +# architecture='arm64' +# else +# architecture='x86_64' +# fi +# +# if [ $(uname -n) == "raspberrypi" ]; then +# systemName='Raspberry Pi' +# elif [ $(uname -n) == "nano" ]; then +# systemName='Jetson' +# elif [[ $(uname -a) =~ microsoft-standard-WSL ]]; then +# systemName='WSL' +# elif [[ $OSTYPE == 'darwin'* ]]; then +# systemName="macOS" +# else +# systemName='Linux' +# fi + +writeLine "Setting up cuDNN and CUDA for CodeProject.AI Server" $color_info + +linux_driver="530.30.02" # > 450.80.02 for linux +cudnn_version="8.9.4.*" # latest, works with CUDA 11.8+ +cuda_version="11.8" # 12.1 +cuda_version_dash="11-8" # 12-1 +cuda_version_full="11.8.0" # 12.1.1 + +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) # eg "ubuntu20.04" +OS_name="${distribution//./}" # eg "ubuntu2204" + + +# Updating Signing keys + +write " - Removing old signing key..." $color_mute +apt-key del 7fa2af80 >/dev/null 2>/dev/null +writeLine "Done" $color_success + +write " - Downloading new key..." $color_mute + +keyring="cuda-keyring_1.0-1_all.deb" + +if [ ! -d "${downloadPath}" ]; then mkdir -p "${downloadPath}"; fi +if [ ! -d "${downloadPath}/CUDA" ]; then mkdir -p "${downloadPath}//CUDA"; fi + +pushd "${downloadPath}/CUDA" >/dev/null 2>/dev/null +if [ ! -f "$keyring" ]; then + if [ "${architecture}" == "arm64" ]; then + wget $wgetFlags https://developer.download.nvidia.com/compute/cuda/repos/${OS_name}/sbsa/${keyring} + elif [ "${systemName}" == 'WSL' ]; then + wget $wgetFlags https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/${keyring} + else + wget $wgetFlags https://developer.download.nvidia.com/compute/cuda/repos/${OS_name}/x86_64/${keyring} + fi + status=$? + if [ $status -ne 0 ]; then + writeLine "Unable to download ${keyring}" "$color_error" + fi +fi +writeLine "Done" $color_success + +if [ -f "$keyring" ]; then + write " - Installing key..." $color_mute + dpkg -E -G -i ${keyring} >/dev/null 2>/dev/null # don't install same or older package + writeLine "Done" $color_success +fi +popd >/dev/null 2>/dev/null + + +# Install the CUDA SDK -echo "========================================================================" -echo "" -echo " Setting up cuDNN for CodeProject.AI Server " -echo "" -echo "========================================================================" -echo "" +write " - Installing libgomp1..." $color_mute +sudo apt install libgomp1 -y >/dev/null 2>/dev/null & +spin $! +writeLine "Done" $color_success -# Remove Outdated Signing Key: -sudo apt-key del 7fa2af80 +# The only practical cases here are: Native Linux on x86 or arm64 with CUDA, +# or WSL. Docker already contains the libs, macOS doesn't support CUDA. RPi +# doesn't support CUDA and Jetson gets CUDA via Jetpack -# Install the newcuda-keyring package -if grep -qi microsoft /proc/version; then - # Ubuntu under WSL - wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-keyring_1.0-1_all.deb +installer_repo="https://developer.download.nvidia.com/compute/cuda/${cuda_version_full}/local_installers/" +if [ "${architecture}" == "arm64" ]; then + pin="cuda-${OS_name}.pin" + pin_repo="https://developer.download.nvidia.com/compute/cuda/repos/${OS_name}/sbsa/" + installer="cuda-repo-${OS_name}-${cuda_version_dash}-local_${cuda_version_full}-${linux_driver}-1_arm64.deb" + installed_ring="/var/cuda-repo-${OS_name}-${cuda_version_dash}-local/cuda-*-keyring.gpg" +elif [ "${systemName}" == 'WSL' ]; then + pin="cuda-wsl-ubuntu.pin" + pin_repo="https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/" + installer="cuda-repo-wsl-ubuntu-${cuda_version_dash}-local_${cuda_version_full}-1_amd64.deb" + installed_ring="/var/cuda-repo-wsl-ubuntu-${cuda_version_dash}-local/cuda-*-keyring.gpg" else - # Native Ubuntu. Use ubuntu1804, ubuntu2004 or ubuntu2204. Get this from lsb_release - version=$(cut -f2 <<< $(lsb_release -r)) - version="${version//./}" - wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${version}/x86_64/cuda-keyring_1.0-1_all.deb + pin="cuda-${OS_name}.pin" + pin_repo="https://developer.download.nvidia.com/compute/cuda/repos/${OS_name}/x86_64/" + installer="cuda-repo-${OS_name}-${cuda_version_dash}-local_${cuda_version_full}-${linux_driver}-1_amd64.deb" + installed_ring="/var/cuda-repo-${OS_name}-${cuda_version_dash}-local/cuda-*-keyring.gpg" +fi + +pushd "${downloadPath}/CUDA" >/dev/null 2>/dev/null +if [ ! -f "${pin}" ]; then + write " - Downloading ${pin}..." $color_mute + wget $wgetFlags ${pin_repo}${pin} & + spin $! + if [ -f "$pin" ]; then + writeLine "Done" "$color_success" + else + writeLine "Unable to download ${pin}" "$color_error" + fi +fi +if [ -f "$pin" ]; then + write " - Installing cuda-repository-pin..." $color_mute + sudo cp ${pin} /etc/apt/preferences.d/cuda-repository-pin-600 >/dev/null 2>/dev/null + writeLine "Done" "$color_success" +fi + +if [ ! -f "${installer}" ]; then + write " - Downloading ${installer}..." $color_mute + wget $wgetFlags ${installer_repo}${installer} & + spin $! + if [ -f "$installer" ]; then + writeLine "Done" "$color_success" + else + writeLine "Unable to download ${installer}" "$color_error" + fi +fi +if [ -f "$installer" ]; then + write " - Installing cuda-*-keyring.gpg..." $color_mute + sudo dpkg -E -G -i "${installer}" >/dev/null 2>/dev/null + status=$? + if [ $status -ne 0 ]; then + writeLine "Unable to install ${installer}" "$color_error" + else + sudo cp "${installed_ring}" /usr/share/keyrings/ >/dev/null 2>/dev/null + writeLine "Done" "$color_success" + fi fi +popd "${downloadPath}/CUDA" >/dev/null 2>/dev/null + +write " - Installing CUDA library..." $color_mute +sudo apt-get update -y >/dev/null 2>/dev/null & +spin $! +sudo apt-get install cuda -y >/dev/null 2>/dev/null & +spin $! +writeLine "Done" $color_success + + +# Now Install cuDNN + +# Ensure zlib is installed +write " - Installing zlib1g..." $color_mute +sudo apt-get install zlib1g -y >/dev/null 2>/dev/null & +spin $! +writeLine "Done" $color_success + +# Enable the repo +write " - Enabling the CUDA repository..." "$color_mute" +if [ "${architecture}" == "arm64" ]; then + sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/${OS_name}/arm64/3bf863cc.pub >/dev/null 2>/dev/null & + spin $! + sudo add-apt-repository -y "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS_name}/arm64/ /" >/dev/null 2>/dev/null & + spin $! +else + sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/${OS_name}/x86_64/3bf863cc.pub >/dev/null 2>/dev/null & + spin $! + sudo add-apt-repository -y "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS_name}/x86_64/ /" >/dev/null 2>/dev/null & + spin $! +fi +writeLine "Done" $color_success + +# install the cuDNN library +write " - Installing cuDNN libraries..." $color_mute +sudo apt-get update -y >/dev/null 2>/dev/null & +spin $! +sudo apt-get install libcudnn8=${cudnn_version}-1+cuda${cuda_version} -y >/dev/null 2>/dev/null & +spin $! +sudo apt-get install libcudnn8-dev=${cudnn_version}-1+cuda${cuda_version} -y >/dev/null 2>/dev/null & +spin $! +writeLine "Done" $color_success -sudo dpkg -i cuda-keyring_1.0-1_all.deb -# Update the Apt repository cache: -sudo apt-get update +write " - Exporting PATHs..." $color_mute +export PATH=/usr/local/cuda-${cuda_version}/bin${PATH:+:${PATH}} +export LD_LIBRARY_PATH=/usr/local/cuda-${cuda_version}/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} +writeLine "Done" $color_success -# Install CUDA SDK: -sudo apt-get install cuda -y -# To include all GDS packages: -sudo apt-get install nvidia-gds -y +# And finally, include all GDS packages: +write " - Installing nvidia-gds..." $color_mute +sudo apt-get install nvidia-gds -y >/dev/null 2>/dev/null & +spin $! +writeLine "Done" $color_success -export PATH=/usr/local/cuda-12.0/bin${PATH:+:${PATH}} -export LD_LIBRARY_PATH=/usr/local/cuda-12.0/lib64 ${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} +writeLine "==================================================================" $color_warn +writeLine "A number of packages have been installed and are no longer needed." $color_warn +writeLine "Use 'sudo apt autoremove' to remove them." $color_warn +writeLine "==================================================================" $color_warn diff --git a/src/SDK/Scripts/stop_all.bat b/src/SDK/Scripts/stop_all.bat index 27d95d35..1d7d1970 100644 --- a/src/SDK/Scripts/stop_all.bat +++ b/src/SDK/Scripts/stop_all.bat @@ -18,15 +18,21 @@ if %loops% GEQ 50 goto endModules rem wmic process where "ExecutablePath like '!srcDir!\\modules%%'" get ExecutablePath REM Kill processes - wmic process where "ExecutablePath like '%srcDir%\\modules%%\\venv\\%%'" delete + REM wmic process where "Name like 'python%' and ExecutablePath like '%src\\runtimes%%'" delete + wmic process where "ExecutablePath like '%srcDir%\\runtimes%%'" delete wmic process where "ExecutablePath like '%srcDir%\\modules%%'" delete REM Count how many are left. - Set /a Number=0 - For /f %%j in ('wmic process where "ExecutablePath like ^'!srcDir!\\modules%%^'" get ExecutablePath ^| Find /c "modules"') Do Set /a Number=%%j + Set /a NumModules=0 + For /f %%j in ('wmic process where "ExecutablePath like ^'!srcDir!\\modules%%^'" get ExecutablePath ^| Find /c "modules"') Do Set /a NumModules=%%j - if %Number% EQU 0 goto endModules + Set /a NumRuntimes=0 + For /f %%j in ('wmic process where "ExecutablePath like ^'!srcDir!\\runtimes%%^'" get ExecutablePath ^| Find /c "runtimes"') Do Set /a NumRuntimes=%%j + if %NumModules% EQU 0 ( + if %NumRuntimes% EQU 0 goto endModules + ) + set /a loops+=1 echo Running loop again: attempt !loops! diff --git a/src/SDK/Scripts/utils.bat b/src/SDK/Scripts/utils.bat index 400b5522..0b259b8d 100644 --- a/src/SDK/Scripts/utils.bat +++ b/src/SDK/Scripts/utils.bat @@ -316,7 +316,8 @@ shift & goto :%~1 REM Delete all but the zip file from the downloads dir FOR %%I IN ("!downloadPath!\!moduleDir!\*") DO ( IF /i "%%~xI" neq ".zip" ( - DEL "%%I" rem >NUL 2>&1 + rem echo deleting %%I + DEL "%%I" >NUL 2>&1 rem echo cleaning "%%~nxI" ) ) @@ -359,7 +360,10 @@ shift & goto :%~1 call :Write "!message!" "!color_primary!" - call :WriteLine "Checking '!downloadToDir!!dirToSaveTo!\!fileToGet!'" "!color_info!" + if /i "%verbosity%" neq "quiet" ( + call :WriteLine "Checking '!downloadToDir!!dirToSaveTo!\!fileToGet!'" "!color_info!" + ) + if exist "!downloadToDir!!dirToSaveTo!\!fileToGet!" ( call :Write "already exists..." "!color_info!" ) else ( @@ -388,34 +392,101 @@ shift & goto :%~1 call :Write "Expanding..." "!color_info!" + if /i "%verbosity%" neq "quiet" ( + call :WriteLine "Heading to !downloadToDir!!dirToSaveTo!" "!color_info!" + ) + pushd "!downloadToDir!!dirToSaveTo!" + call :ExtractToDirectory "!fileToGet!" + + if errorlevel 1 ( + popd + exit /b 1 + ) + + REM REM Try tar first. If that doesn't work, fall back to powershell (slow) + REM set tarExists=true + REM tar -xf "!fileToGet!" >NUL 2>&1 + REM REM error 9009 means "command not found" + REM if errorlevel 9009 set tarExists=false + REM + REM REM If we don't have tar, use powershell + REM if "!tarExists!" == "false" ( + REM call :Write "(no tar - Using PowerShell)..." "!color_info!" + REM + REM REM Expand-Archive is really, really slow + REM REM powershell -command "Expand-Archive -Path '!fileToGet!' -DestinationPath '.' -Force" + REM powershell -command "Add-Type -assembly System.IO.Compression.Filesystem; [System.IO.Compression.ZipFile]::ExtractToDirectory('!fileToGet!', '.')" + REM + REM if errorlevel 1 ( + REM popd + REM exit /b 1 + REM ) + REM ) + REM + REM REM Remove the downloaded zip + REM REM del /s /f /q "!fileToGet!" >NUL 2>&1 + + popd + + call :WriteLine "Done." "!color_success!" + + exit /b + +:ExtractToDirectory + SetLocal EnableDelayedExpansion + + REM Param 1: The archive to expand. eg packages_for_gpu.zip + set archiveName=%1 + set archiveName=!archiveName:"=! + + REM Param 2: Delete the archive after expansion? only 'true' means true. + set deleteAfter=%2 + set deleteAfter=!deleteAfter:"=! + + set filenameWithoutExtension=%~n1 + + if /i "%verbosity%" neq "quiet" ( + cd + call :WriteLine "Extracting !archiveName!" "!color_info!" + ) + REM Try tar first. If that doesn't work, fall back to powershell (slow) - set tarExists=true + set tarSuccessful=true + tar -xf "!archiveName!" >NUL 2>&1 - tar -xf "!fileToGet!" >NUL 2>&1 - if "%errorlevel%" == "9009" set tarExists=false + REM mkdir pretty_name && tar xf ugly_name.tar -C pretty_name --strip-components 1 + + REM error 9009 means "command not found" + if errorlevel 9009 set tarSuccessful=false + if errorlevel 1 set tarSuccessful=false REM If we don't have tar, use powershell - if "!tarExists!" == "false" ( - call :Write "(no tar - Using PowerShell)..." "!color_info!" + if "!tarSuccessful!" == "false" ( + call :Write "Tar failed - moving to PowerShell..." "!color_info!" - powershell -command "Expand-Archive -Path '!fileToGet!' -Force" - if errorlevel 1 ( - popd - exit /b 1 - ) - ) + REM This fails if the tar left debris. We need to force overwrite + rem powershell -command "Add-Type -assembly System.IO.Compression.Filesystem; [System.IO.Compression.ZipFile]::ExtractToDirectory('!archiveName!', '.')" - REM Remove the downloaded zip - REM del /s /f /q "!fileToGet!" >NUL 2>&1 + REM Cannot seem to get the call to the ZipFileExtension method correct + rem powershell -command "[System.IO.Compression.ZipFile]::ExtractToDirectory('!archiveName!', '.', $true)" - popd + REM Expand-Archive is really, really slow, but it's our only hope here + powershell -command "Expand-Archive -Path '!archiveName!' -DestinationPath '.' -Force" - call :WriteLine "Done." "!color_success!" + if errorlevel 1 exit /b 1 + ) + REM Remove the archive + if "!deleteAfter!" == "true" ( + if /i "%verbosity%" neq "quiet" call :WriteLine "Deleting !archiveName!" "!color_info!" + del /s /f /q "!archiveName!" >NUL 2>&1 + ) + exit /b + :SetupDotNet SetLocal EnableDelayedExpansion @@ -500,12 +571,16 @@ shift & goto :%~1 call :WriteLine "Present" "!color_success!" ) else ( set baseDir=!downloadPath!\!platform!\ - if not exist "!baseDir!" mkdir "!baseDir!" - if not exist "!baseDir!\!pythonName!" mkdir "!baseDir!\!pythonName!" + if not exist "!baseDir!" mkdir "!baseDir!" + if not exist "!baseDir!!pythonName!" mkdir "!baseDir!!pythonName!" if not exist "!pythonInstallPath!" ( + + if not exist "!runtimesPath!\bin" mkdir "!runtimesPath!\bin" + if not exist "!runtimesPath!\bin\!os!" mkdir "!runtimesPath!\bin\!os!" + if not exist "!runtimesPath!\bin\!os!\!pythonName!" mkdir "!runtimesPath!\bin\!os!\!pythonName!" - rem Params are: S3 storage bucket | fileToGet | downloadToDir | dirToSaveTo | message + rem Params are: S3 storage bucket | fileToGet | downloadToDir | dirToSaveTo | message call :DownloadAndExtract "%storageUrl%" "!pythonName!.zip" "!baseDir!" "!pythonName!" "Downloading Python !pythonVersion! interpreter..." if errorlevel 1 exit /b 1 @@ -519,7 +594,7 @@ shift & goto :%~1 REM but if you have issues, make sure you delete the venv directory before REM retrying. call :Write "Creating Virtual Environment..." - if exist "!virtualEnvPath!" ( + if exist "!virtualEnvPath!\\pyvenv.cfg" ( call :WriteLine "Python !pythonVersion! Already present" %color_success% ) else ( if /i "%verbosity%" neq "quiet" call :WriteLine "Virtual Environment doesn't exist. Creating at !virtualEnvPath!" @@ -549,7 +624,7 @@ shift & goto :%~1 SetLocal EnableDelayedExpansion if /i "!offlineInstall!" == "true" ( - call :WriteLine "Offline Installation: Unable to download and install Python packages." %color_error% + call :WriteLine "Offline Installation: Skipping download and installation of Python packages." %color_error% exit /b ) @@ -629,6 +704,16 @@ shift & goto :%~1 ) ) + if /i "!hasROCm!" == "true" ( + if exist "!requirementsDir!\requirements.windows.!architecture!.rocm.txt" ( + set requirementsFilename=requirements.windows.!architecture!.rocm.txt + ) else if exist "!requirementsDir!\requirements.windows.rocm.txt" ( + set requirementsFilename=requirements.windows.rocm.txt + ) else if exist "!requirementsDir!\requirements.rocm.txt" ( + set requirementsFilename=requirements.rocm.txt + ) + ) + if "!requirementsFilename!" == "" ( if exist "!requirementsDir!\requirements.windows.!architecture!.gpu.txt" ( set requirementsFilename=requirements.windows.!architecture!.gpu.txt diff --git a/src/SDK/Scripts/utils.sh b/src/SDK/Scripts/utils.sh index 992cf8c8..786d3ddd 100644 --- a/src/SDK/Scripts/utils.sh +++ b/src/SDK/Scripts/utils.sh @@ -281,7 +281,7 @@ function checkForTool () { # Ensure Brew is installed if ! command -v brew &> /dev/null; then writeLine "Installing brew..." $color_info - /bin/bash -c '$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)' + /bin/bash -c '$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)' > /dev/null if [ $? -ne 0 ]; then quit 10 # failed to install required tool @@ -289,10 +289,10 @@ function checkForTool () { fi writeLine "Installing ${name}..." $color_info - brew install ${name} + brew install ${name} > /dev/null else writeLine "Installing ${name}..." $color_info - sudo apt install ${name} + sudo apt install ${name} -y > /dev/null fi if [ $? -ne 0 ]; then @@ -405,7 +405,7 @@ function setupDotNet () { currentLinuxDistro=`echo $currentLinuxDistro | tr '[:upper:]' '[:lower:]'` local currentLinuxVersion=$(lsb_release -r | cut -f2) - if [ "${hardware}" == "RaspberryPi" ]; then + if [ "${systemName}" == "Raspberry Pi" ]; then sudo bash "${sdkScriptsPath}/dotnet-install-rpi.sh" ${sdkInstallVersion} if [ $? -ne 0 ]; then @@ -443,7 +443,8 @@ function setupDotNet () { quit 3 # required runtime missing, needs installing else writeLine "Please download and install the .NET SDK. For macOS Intel machines use:" - writeLine "https://dotnet.microsoft.com/en-us/download/dotnet/thank-you/sdk-${requestedNetVersion}-macos-x64-installer" + writeLine "https://dotnet.microsoft.com/en-us/download/dotnet/thank-you/sdk-7.0.400-macos-x64-installer" + # writeLine "https://dotnet.microsoft.com/en-us/download/dotnet/thank-you/sdk-${requestedNetVersion}-macos-x64-installer" quit 3 # required runtime missing, needs installing fi quit 100 # impossible code path @@ -459,7 +460,7 @@ function setupPython () { if [ "$offlineInstall" == "true" ]; then writeLine "Offline Installation: Unable to download and install Python." $color_error - return + return 6 # unable to download required asset fi # M1 macs are trouble for python @@ -521,19 +522,18 @@ function setupPython () { # 1. Install Python. Using deadsnakes for Linux (not macOS), so be aware if # you have concerns about potential late adoption of security patches. - if [ $verbosity == "loud" ]; then + if [ $verbosity == "loud" ]; then writeLine "Python install path is ${installPath}" $color_info - fi + fi - if [ ! -d "${installPath}" ]; then + if [ ! -d "${installPath}" ]; then mkdir -p "${installPath}" - fi + fi - pythonCmd="python${pythonVersion}" - if command -v $pythonCmd &> /dev/null; then + globalPythonCmd="python${pythonVersion}" + if command -v $globalPythonCmd &> /dev/null; then writeLine "Python ${pythonVersion} is already installed" $color_success - else - + else # For macOS we'll use brew to install python if [ "$os" == "macos" ]; then @@ -570,12 +570,6 @@ function setupPython () { fi fi - # Note that we only need the specific location of the python - # interpreter to setup the virtual environment. After it's - # setup, all python calls are relative to the same venv no - # matter the location of the original python interpreter - pythonCmd="/usr/local/opt/python@${pythonVersion}/bin/python${pythonVersion}" - else # We have a x64 version of python for macOS (Intel) in our S3 bucket @@ -592,16 +586,18 @@ function setupPython () { brew install python@${pythonVersion} fi - # Brew specific path - pythonCmd="/usr/local/opt/python@${pythonVersion}/bin/python${pythonVersion}" - fi + # Note that we only need the system-wide location of the python + # interpreter to setup the virtual environment. After it's setup, + # all python calls are made using the venv's python + globalPythonCmd="/usr/local/opt/python@${pythonVersion}/bin/python${pythonVersion}" + writeLine "Done" $color_success # macOS: With my M1 chip and Rosetta I make installing Python a real PITA. # Raspberry Pi: Hold my beer - elif [ "${hardware}" == "RaspberryPi" ]; then + elif [ "${systemName}" == "Raspberry Pi" ]; then pushd "${pathToInstallerBase}" > /dev/null @@ -646,6 +642,7 @@ function setupPython () { mkdir --parents downloads/Python cd downloads/Python + if [ ! -f "Python-${pythonPatchVersion}.tar.xz" ]; then # curl https://www.python.org/ftp/python/${pythonPatchVersion}/Python-${pythonPatchVersion}.tar.xz | tar -xf curl https://www.python.org/ftp/python/${pythonPatchVersion}/Python-${pythonPatchVersion}.tar.xz --output Python-${pythonPatchVersion}.tar.xz @@ -673,7 +670,7 @@ function setupPython () { sudo make install cd .. sudo rm -r openssl-1.1.1c - sudo apt-get install libssl-dev + sudo apt-get install libssl-dev -y # Bulld Python cd Python-${pythonPatchVersion} @@ -751,7 +748,7 @@ function setupPython () { fi fi - if ! command -v $pythonCmd &> /dev/null; then + if ! command -v $globalPythonCmd &> /dev/null; then return 2 # failed to install required runtime fi @@ -816,12 +813,12 @@ function setupPython () { fi if [ "$os" == "macos" ]; then - ${pythonCmd} -m venv "${installPath}/venv" + $globalPythonCmd -m venv "${installPath}/venv" else - #echo ${pythonCmd} - #echo ${installPath} + #echo $globalPythonCmd + #echo $installPath - ${pythonCmd} -m venv "${installPath}/venv" & + $globalPythonCmd -m venv "${installPath}/venv" & spin $! # process ID of the python install call fi @@ -879,31 +876,19 @@ function installPythonPackages () { # Version with ".'s removed local pythonName="python${pythonVersion/./}" - pythonCmd="python${pythonVersion}" + pythonCmd="./python${pythonVersion}" - # hasCUDA is actually already set in /src/setup.sh, but no harm in keeping this check here + # hasCUDA is actually already set in /src/setup.sh, but no harm in keeping this check here. + # Note that CUDA is only available on non-macOS systems hasCUDA='false' - - # Brew doesn't set PATH by default (nor do we need it to) which means we - # just have to be careful - if [ "$os" == "macos" ]; then - - # If running "PythonX.Y" doesn't actually work, then let's adjust the - # command to point to where we think the python launcher should be - python${pythonVersion} --version >/dev/null 2>/dev/null - if [ $? -ne 0 ]; then - # writeLine "Did not find python in default location" - pythonCmd="/usr/local/opt/python@${pythonVersion}/bin/python${pythonVersion}" - fi - else - # CUDA is only available on non-macOS systems + if [ "$os" == "linux" ]; then if [ "$supportCUDA" == "true" ]; then write 'Checking for CUDA...' # nvidia=$(lspci | grep -i '.* vga .* nvidia .*') # if [[ ${nvidia,,} == *' nvidia '* ]]; then # force lowercase compare - if [[ -x nvidia-smi ]]; then + if [ -x "$(command -v nvidia-smi)" ]; then nvidia=$(nvidia-smi | grep -i -E 'CUDA Version: [0-9]+.[0-9]+') > /dev/null 2>&1 if [[ ${nvidia} == *'CUDA Version: '* ]]; then hasCUDA='true' @@ -948,6 +933,16 @@ function installPythonPackages () { fi fi + if [ "$hasROCm" == "true" ]; then + if [ -f "${requirementsDir}/requirements.${os}.${architecture}.rocm.txt" ]; then + requirementsFilename="requirements.${os}.${architecture}.rocm.txt" + elif [ -f "${requirementsDir}/requirements.${os}.rocm.txt" ]; then + requirementsFilename="requirements.${os}.rocm.txt" + elif [ -f "${requirementsDir}/requirements.rocm.txt" ]; then + requirementsFilename="requirements.rocm.txt" + fi + fi + if [ "$requirementsFilename" == "" ]; then if [ -f "${requirementsDir}/requirements.${os}.${architecture}.gpu.txt" ]; then requirementsFilename="requirements.${os}.${architecture}.gpu.txt" @@ -993,47 +988,48 @@ function installPythonPackages () { # For speeding up debugging if [ "${skipPipInstall}" == "true" ]; then return; fi + # We'll head into the venv's bin directory which should contain the python interpreter pushd "${virtualEnv}/bin" >/dev/null + if [ "$os" == "macos" ]; then + # Running "PythonX.Y" should work, but may not. Check, and if it doesn't work then set the + # pythonCmd var to point to the absolute pather where we think the python launcher should be + $pythonCmd --version >/dev/null 2>/dev/null + if [ $? -ne 0 ]; then + writeLine "Setting python command to point to global install location" + pythonCmd="/usr/local/opt/python@${pythonVersion}/bin/python${pythonVersion}" + fi + fi + # Before installing packages, check to ensure PIP is installed and up to # date. This slows things down a bit, but it's worth it in the end. if [ "${verbosity}" == "quiet" ]; then # Ensure we have pip (no internet access - ensures we have the current - # python compatible version. + # python compatible version). write 'Ensuring PIP is installed...' $color_primary - - if [ "$os" == "macos" ]; then - # sudo $pythonCmd -m ensurepip 2>/dev/null & - $pythonCmd -m ensurepip >/dev/null 2>/dev/null & - else - $pythonCmd -m ensurepip >/dev/null 2>/dev/null & - fi + $pythonCmd -m ensurepip >/dev/null 2>/dev/null & spin $! writeLine 'Done' $color_success write 'Updating PIP...' $color_primary - if [ "$os" == "macos" ]; then - $pythonCmd -m pip install --upgrade pip >/dev/null 2>/dev/null & - else - $pythonCmd -m pip install --upgrade pip >/dev/null 2>/dev/null & - fi + $pythonCmd -m pip install --upgrade pip >/dev/null 2>/dev/null & spin $! writeLine 'Done' $color_success else writeLine 'Ensuring PIP is installed and up to date...' $color_primary - if [ "$os" == "macos" ]; then - # regarding the warning: See https://github.com/Homebrew/homebrew-core/issues/76621 - if [ $(versionCompare "${pythonVersion}" '3.10.2') == "-1" ]; then - writeLine "Ignore the DEPRECATION warning. See https://github.com/Homebrew/homebrew-core/issues/76621 for details" $color_info - fi - fi + # if [ "$os" == "macos" ]; then + # # regarding the warning: See https://github.com/Homebrew/homebrew-core/issues/76621 + # if [ $(versionCompare "${pythonVersion}" '3.10.2') == "-1" ]; then + # writeLine "Ignore the DEPRECATION warning. See https://github.com/Homebrew/homebrew-core/issues/76621 for details" $color_info + # fi + # fi if [ "$os" == "macos" ]; then - # sudo $pythonCmd -m ensurepip + # sudo $globalPythonCmd -m ensurepip $pythonCmd -m ensurepip - $pythonCmd -m pip install pip + $pythonCmd -m pip install --upgrade pip else sudo $pythonCmd -m ensurepip $pythonCmd -m pip install --upgrade pip @@ -1471,20 +1467,41 @@ else architecture='x86_64' fi -if [ $(uname -n) == "raspberrypi" ]; then - hardware='RaspberryPi' -else - hardware='Unknown' -fi - if [[ $OSTYPE == 'darwin'* ]]; then platform='macos' os="macos" + os_name=$(awk '/SOFTWARE LICENSE AGREEMENT FOR macOS/' '/System/Library/CoreServices/Setup Assistant.app/Contents/Resources/en.lproj/OSXSoftwareLicense.rtf' | awk -F 'macOS ' '{print $NF}' | awk '{print substr($0, 0, length($0)-1)}') # eg "Big Sur" + os_vers=$(sw_vers -productVersion) # eg "11.1" for macOS Big Sur + + systemName=$os + if [[ "$architecture" == 'arm64' ]]; then platform='macos-arm64'; fi else os='linux' platform='linux' + os_name=$(. /etc/os-release;echo $ID) # eg "ubuntu" + os_vers=$(. /etc/os-release;echo $VERSION_ID) # eg "22.04" for Ubuntu 22.04 + if [[ "$architecture" == 'arm64' ]]; then platform='linux-arm64'; fi + + modelInfo="" + if [ -f "/sys/firmware/devicetree/base/model" ]; then + modelInfo=$(tr -d '\0' /dev/null 2>&1 + fi + + if [[ "${modelInfo}" == *"Raspberry Pi"* ]]; then # elif [ $(uname -n) == "raspberrypi" ]; then + systemName='Raspberry Pi' + elif [[ "${modelInfo}" == *"Orange Pi"* ]]; then # elif [ $(uname -n) == "orangepi5" ]; then + systemName='Orange Pi' + elif [ $(uname -n) == "nano" ]; then + systemName='Jetson' + elif [ "$inDocker" == "true" ]; then + systemName='Docker' + elif [[ $(uname -a) =~ microsoft-standard-WSL ]]; then + systemName='WSL' + else + systemName=$os + fi fi # See if we can spot if it's a dark or light background @@ -1514,7 +1531,7 @@ else darkmode='true' terminalBg=$(gsettings get org.gnome.desktop.background primary-color) - if [ "${terminalBg}" != "no schemas installed" ]; then + if [ "${terminalBg}" != "no schemas installed" ] && [ "${terminalBg}" != "" ]; then terminalBg="${terminalBg%\'}" # remove first ' terminalBg="${terminalBg#\'}" # remove last ' terminalBg=`echo $terminalBg | tr '[:lower:]' '[:upper:]'` # uppercase-ing @@ -1538,6 +1555,8 @@ else # echo "terminalBg = ${terminalBg}, darkmode = ${darkmode}, luminosity = ${luma}" fi + else + writeLine "(No schemas means: we can't detect if you're in light or dark mode)" $color_info fi fi diff --git a/src/SDK/install.bat b/src/SDK/install.bat index f3a70654..190a563f 100644 --- a/src/SDK/install.bat +++ b/src/SDK/install.bat @@ -49,6 +49,7 @@ rem if /i "%hasCUDA%"=="true" call ../install_CUDnn.bat :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/SDK/install.sh b/src/SDK/install.sh index de9ca7a7..d0897ed3 100644 --- a/src/SDK/install.sh +++ b/src/SDK/install.sh @@ -30,36 +30,16 @@ if [ $? -ne 0 ]; then quit 1; fi installPythonPackages 3.9 "${modulePath}/Python" "Shared" if [ $? -ne 0 ]; then quit 1; fi -# Ensure cuDNN is installed. Note this is only for linux since macs no longer support nVidia -hasCUDA="false" # (disabled for now, pending testing) -if [ "$os" == "linux" ] && [ "$hasCUDA" == "true" ]; then +# Ensure CUDA and cuDNN is installed. Note this is only for native linux since +# macOS no longer supports NVIDIA, WSL (Linux under Windows) uses the Windows +# drivers, and docker images already contain the necessary SDKs and libraries +if [ "$os" == "linux" ] && [ "$hasCUDA" == "true" ] && [ "${inDocker}" == "false" ] && \ + [ "${systemName}" != "Jetson" ] && [ "${systemName}" != "Raspberry Pi" ] && \ + [ "${systemName}" != "Orange Pi" ] ; then - # Ensure zlib is installed - sudo apt-get install zlib1g - - # Download tar from https://developer.nvidia.com/cudnn - tar -xvf cudnn-linux-x86_64-8.x.x.x_cudaX.Y-archive.tar.xz - sudo cp cudnn-*-archive/include/cudnn*.h /usr/local/cuda/include - sudo cp -P cudnn-*-archive/lib/libcudnn* /usr/local/cuda/lib64 - sudo chmod a+r /usr/local/cuda/include/cudnn*.h /usr/local/cuda/lib64/libcudnn* - - # Ensure nVidia Project Manager is installed - - # Enable the repo - OS="ubuntu2004" # ubuntu1804, ubuntu2004, or ubuntu2204. - wget https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin - - sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600 - sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/3bf863cc.pub - sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/ /" - sudo apt-get update - - # install the cuDNN library - cudnn_version="8.5.0.*" - cuda_version="cuda11.7" # cuda10.2, cuda11.7 or cuda11.8 - - sudo apt-get install libcudnn8=${cudnn_version}-1+${cuda_version} - sudo apt-get install libcudnn8-dev=${cudnn_version}-1+${cuda_version} + # Install CUDA and cuDNN + correctLineEndings "${sdkScriptsPath}/install_cuDNN.sh" + source "${sdkScriptsPath}/install_cuDNN.sh" fi @@ -68,7 +48,7 @@ fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -77,6 +57,8 @@ fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/create_packages.bat b/src/create_packages.bat index 07dd07e6..80e51db8 100644 --- a/src/create_packages.bat +++ b/src/create_packages.bat @@ -16,6 +16,10 @@ set verbosity=quiet :: Show output in wild, crazy colours set useColor=true +:: Set this to false (or call script with --no-dotnet) to exclude .NET packages +:: This saves time to allow for quick packaging of the easier, non-compiled modules +set includeDotNet=true + :: Basic locations :: The path to the directory containing the install scripts. Will end in "\" @@ -39,10 +43,13 @@ set modulesDir=modules set arg_value=%~2 if not "!arg_name!" == "" ( if not "!arg_name:--no-color=!" == "!arg_name!" set useColor=false - REM if not "!arg_name:pathToInstall=!" == "!arg_name!" set installerScriptsPath=!arg_value! + if not "!arg_name:--no-dotnet=!" == "!arg_name!" set includeDotNet=false + if not "!arg_name:pathToInstall=!" == "!arg_name!" ( + set installerScriptsPath=!arg_value! + shift + ) ) shift - shift if not "!arg_name!"=="" goto param_loop :: In Development, this script is in the /src folder. In Production there is no @@ -116,34 +123,43 @@ for /f "delims=" %%D in ('dir /a:d /b "!modulesPath!"') do ( if exist "!packageModulePath!\package.bat" ( - call "!sdkScriptsPath!\utils.bat" Write "Packaging module !packageModuleId!..." "White" + set doPackage=true - pushd "!packageModulePath!" + if "!includeDotNet!" == "false" if "!packageModuleId!" == "ObjectDetectionNet" set doPackage=false + if "!includeDotNet!" == "false" if "!packageModuleId!" == "PortraitFilter" set doPackage=false + if "!includeDotNet!" == "false" if "!packageModuleId!" == "SentimentAnalysis" set doPackage=false - REM Read the version from the modulesettings.json file and then pass this - REM version to the package.bat file. - call "!sdkScriptsPath!\utils.bat" GetVersionFromModuleSettings "modulesettings.json" "Version" - set packageVersion=!jsonValue! - rem echo packageVersion is !packageVersion! + if "!doPackage!" == "false" ( + call "!sdkScriptsPath!\utils.bat" WriteLine "Skipping packaging module !packageModuleId!..." "Red" + ) else ( - rem Create module download package - call package.bat !packageModuleId! !packageVersion! - if errorlevel 1 call "!sdkScriptsPath!\utils.bat" WriteLine "Error in package.bat for !packageModuleDir!" "Red" + call "!sdkScriptsPath!\utils.bat" Write "Packaging module !packageModuleId!..." "White" - popd - - rem Move package into modules download cache - rem echo Moving !packageModulePath!\!packageModuleId!-!version!.zip to !downloadPath!\modules\ - move /Y !packageModulePath!\!packageModuleId!-!packageVersion!.zip !downloadPath!\modules\ >NUL 2>&1 + pushd "!packageModulePath!" - if errorlevel 1 ( - call "!sdkScriptsPath!\utils.bat" WriteLine "Error" "Red" - ) else ( - set success=false - call "!sdkScriptsPath!\utils.bat" WriteLine "Done" "DarkGreen" - ) + REM Read the version from the modulesettings.json file and then pass this + REM version to the package.bat file. + call "!sdkScriptsPath!\utils.bat" GetVersionFromModuleSettings "modulesettings.json" "Version" + set packageVersion=!jsonValue! + rem echo packageVersion is !packageVersion! + + rem Create module download package + call package.bat !packageModuleId! !packageVersion! + if errorlevel 1 call "!sdkScriptsPath!\utils.bat" WriteLine "Error in package.bat for !packageModuleDir!" "Red" - REM goto:eof + popd + + rem Move package into modules download cache + rem echo Moving !packageModulePath!\!packageModuleId!-!version!.zip to !downloadPath!\modules\ + move /Y !packageModulePath!\!packageModuleId!-!packageVersion!.zip !downloadPath!\modules\ >NUL 2>&1 + + if errorlevel 1 ( + call "!sdkScriptsPath!\utils.bat" WriteLine "Error" "Red" + ) else ( + set success=false + call "!sdkScriptsPath!\utils.bat" WriteLine "Done" "DarkGreen" + ) + ) ) ) diff --git a/src/modules/ALPR/ALPR.py b/src/modules/ALPR/ALPR.py index e812e7c7..a5f46414 100644 --- a/src/modules/ALPR/ALPR.py +++ b/src/modules/ALPR/ALPR.py @@ -1,7 +1,6 @@ # Import our general libraries import io import math -import re import time from typing import Tuple @@ -19,12 +18,24 @@ from options import Options from paddleocr import PaddleOCR -ocr = None -no_plate_found = 'Characters Not Found' +# Constants +debug_image = False +debug_log = False +no_plate_found = 'Characters Not Found' + +# Globals +ocr = None +previous_label = None +prev_avg_char_height = None +prev_avg_char_width = None +resize_width_factor = None +resize_height_factor = None + def init_detect_platenumber(opts: Options) -> None: - global ocr + global ocr, resize_width_factor, resize_height_factor + ocr = PaddleOCR(lang = opts.language, use_gpu = opts.use_gpu, show_log = opts.log_verbosity == LogVerbosity.Loud, @@ -34,7 +45,11 @@ def init_detect_platenumber(opts: Options) -> None: rec_algorithm = opts.algorithm, cls_model_dir = opts.cls_model_dir, det_model_dir = opts.det_model_dir, - rec_model_dir = opts.rec_model_dir) + rec_model_dir = opts.rec_model_dir, + use_angle_cls = False) + + resize_width_factor = opts.OCR_rescale_factor + resize_height_factor = opts.OCR_rescale_factor async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: Image) -> JSON: @@ -44,6 +59,9 @@ async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: Returns a tuple containing the Json description of what was found, along """ + global previous_label, prev_avg_char_width, prev_avg_char_height + global resize_width_factor, resize_height_factor + outputs = [] pillow_image = image @@ -81,10 +99,11 @@ async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: numpy_image = np.array(pillow_image) # Remember: numpy is left handed when it comes to indexes - orig_image_size: Size = Size(numpy_image.shape[1], numpy_image.shape[0]) + orig_image_size: Size = Size(width = numpy_image.shape[1], height = numpy_image.shape[0]) # Correct the colour space - numpy_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) + # numpy_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) + numpy_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2GRAY) # If a plate is found we'll pass this onto OCR for plate_detection in detect_plate_response["predictions"]: @@ -93,25 +112,16 @@ async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: # The coordinates... (relative to the original image) plate_rect = Rect(plate_detection["x_min"], plate_detection["y_min"], plate_detection["x_max"], plate_detection["y_max"]) - # The image itself... (Its coordinates are now relativen to itself) + # The image itself... (Its coordinates are now relative to itself) numpy_plate = numpy_image[plate_rect.top:plate_rect.bottom, plate_rect.left:plate_rect.right] - # Pre-processing of the extracted plate to give the OCR a better chance of success - - # Run it through a super-resolution module to improve readability (TBD, but could be slow) - # numpy_plate = super_resolution(numpy_plate) - - # resize image if required - if opts.OCR_rescale_factor != 1: - numpy_plate = tool.resize_image(numpy_plate, opts.OCR_rescale_factor * 100) - # Store the size of the scaled plate before we start rotating it - scaled_plate_size = Size(numpy_plate.shape[1], numpy_plate.shape[0]) + plate_size = Size(numpy_plate.shape[1], numpy_plate.shape[0]) # Work out the angle we need to rotate the image to de-skew it (or use the manual override). if opts.auto_plate_rotate: plate_rotate_deg = tool.compute_skew(numpy_plate) - elif opts.plate_rotate_deg: + else: plate_rotate_deg = opts.plate_rotate_deg # If we need to rotate, then check to ensure that rotating the image won't chop off @@ -120,21 +130,16 @@ async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: if plate_rotate_deg: # We start with the assumption that we have a plate that is not displayed level. Once - # the plate is deskewed, the bounding box of the rotated plate will be *smaller* than + # the plate is de-skewed, the bounding box of the rotated plate will be *smaller* than # the skewed plate. # Calculate the corrected (smaller) bounding box if we were to rotate the image - rotated_size: Size = tool.largest_rotated_rect(scaled_plate_size, math.radians(plate_rotate_deg)) + rotated_size: Size = tool.largest_rotated_rect(plate_size, math.radians(plate_rotate_deg)) # Calculate the space between the corresponding edges of the (smaller) rotated plate # and the original plate. - # buffer: Size = (scaled_plate_size - rotated_size) / 2.0 - buffer: Size = (scaled_plate_size - rotated_size).__div__(2.0) - - # Scale this back to the original plate dimensions (remember we scaled the plate) - if opts.OCR_rescale_factor != 1: - # buffer = buffer / opts.OCR_rescale_factor - buffer = buffer.__div__(opts.OCR_rescale_factor) + # buffer: Size = (plate_size - rotated_size) / 2.0 + buffer: Size = (plate_size - rotated_size).__div__(2.0) # 'buffer' represents the width/height of an area along the edges of the original image. # @@ -149,7 +154,7 @@ async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: # ============================= # # If a plate is detected in the original image, and part of that plate extends into - # (or over) the buffer area then it means that, on rotatiom, part of the image could + # (or over) the buffer area then it means that, on rotation, part of the image could # be cut off. We should avoid that since it could cut off characters. # Calculate the region inside the extracted plate that the rotated plate image must fit @@ -167,22 +172,72 @@ async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: if plate_rotate_deg: numpy_plate = tool.rotate_image(numpy_plate, plate_rotate_deg, rotated_size) - # numpy_plate = cv2.GaussianBlur(numpy_plate, (5,5), 0) - # numpy_plate = cv2.medianBlur(numpy_plate, 3) + if opts.OCR_optimization: + # Based on the previous observation we'll adjust the resize factor so that we get + # closer and closer to an "optimal" factor that produces images whose characters + # match our optimum character size. + # Assumptions: + # 1. Most plates we detect will be more or less the same size. Obviously an issue if + # you are scanning plates both near and far + # 2. The aspect ratio of the license plate text (width:height) is around 3:5 + if previous_label and previous_label != no_plate_found: + + if prev_avg_char_width < opts.OCR_optimal_character_width and resize_width_factor < 50: + resize_width_factor += 0.02 + + if prev_avg_char_width > opts.OCR_optimal_character_width and resize_width_factor > 0.03: + resize_width_factor -= 0.02 + + if prev_avg_char_height < opts.OCR_optimal_character_height and resize_height_factor < 50: + resize_height_factor += 0.02 + + if prev_avg_char_height > opts.OCR_optimal_character_height and resize_height_factor > 0.03: + resize_height_factor -= 0.02 + + numpy_plate = cv2.resize(numpy_plate, None, fx = resize_width_factor, + fy = resize_height_factor, interpolation = cv2.INTER_CUBIC) + else: + if opts.OCR_rescale_factor != 1: + numpy_plate = tool.resize_image(numpy_plate, opts.OCR_rescale_factor * 100) + + if debug_log: + with open("log.txt", "a") as text_file: + text_file.write(f"{resize_height_factor}x{resize_width_factor} - {avg_char_height}x{avg_char_width}\n\n") + + """ + dimensions: Size = Size(numpy_plate.shape[1], numpy_plate.shape[0]) + dimensions.integerize() + + # Exaggerate the width to make line detection more prominent + dimensions.width *= 1.5 + """ + + # Pre-processing of the extracted plate to give the OCR a better chance of success + + # Run it through a super-resolution module to improve readability (TBD, but could be slow) + # numpy_plate = super_resolution(numpy_plate) + + # resize image if required + # if opts.OCR_rescale_factor != 1: + # numpy_plate = tool.resize_image(numpy_plate, opts.OCR_rescale_factor * 100) # perform otsu thresh (best to use binary inverse since opencv contours work better with white text) # ret, numpy_plate = cv2.threshold(numpy_plate, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV) # ret, numpy_plate = cv2.threshold(numpy_plate, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - - # Read the plate. This may require multiple attempts + # numpy_plate = cv2.GaussianBlur(numpy_plate, (5,5), 0) + # numpy_plate = cv2.medianBlur(numpy_plate, 3) + # numpy_plate = cv2.bilateralFilter(numpy_plate,9,75,75) # Read plate - (label, confidence, plateInferenceMs) = await read_plate_chars_PaddleOCR(module_runner, numpy_plate) + (label, confidence, avg_char_width, avg_char_height, plateInferenceMs) = \ + await read_plate_chars_PaddleOCR(module_runner, numpy_plate) inferenceMs += plateInferenceMs + # Read the plate. This may require multiple attempts # If we had no success reading the original plate, apply some image enhancement # and try again + """ if label == no_plate_found: # If characters are not found try gamma correction and equalize # numpy_plate = cv2.fastNlMeansDenoisingColored(numpy_plate, None, 10, 10, 7, 21) @@ -192,10 +247,22 @@ async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: # cv2.imwrite("alpr-enhanced.jpg", numpy_plate) # Read plate, 2nd attempt - (label, confidence, plateInferenceMs) = await read_plate_chars_PaddleOCR(module_runner, numpy_plate) + (label, confidence, avg_char_width, avg_char_height, plateInferenceMs) = \ + await read_plate_chars_PaddleOCR(module_runner, numpy_plate) inferenceMs += plateInferenceMs + """ + + if debug_image: + filename = "/Program Files/CodeProject/AI/Server/wwwroot/alpr.jpg" + cv2.imwrite(filename, numpy_plate) if label and confidence: + # Store to help with adjusting for next detection + previous_label = label + prev_avg_char_width = avg_char_width + prev_avg_char_height = avg_char_height + + # return what we found detection = { "confidence": confidence, "label": "Plate: " + label, @@ -203,35 +270,37 @@ async def detect_platenumber(module_runner: ModuleRunner, opts: Options, image: "x_min": plate_rect.left, "y_min": plate_rect.top, "x_max": plate_rect.right, - "y_max": plate_rect.bottom, + "y_max": plate_rect.bottom } outputs.append(detection) + else: + # Next loop around we don't want to needlessly adjust resize factors + previous_label = no_plate_found return { "predictions": outputs, "inferenceMs": inferenceMs } -async def read_plate_chars_PaddleOCR(module_runner: ModuleRunner, image: Image) -> Tuple[str, float, float]: - +async def read_plate_chars_PaddleOCR(module_runner: ModuleRunner, image: Image) -> Tuple[str, float, int, int, float]: + """ This uses PaddleOCR for reading the plates. Note that the image being passed in should be a tightly cropped licence plate, so we're looking for the largest text box and will assume that's the plate number. - Returns (plate label, confidence, inference time (ms)) + Returns (plate label, confidence, avg char height (px), avg char width (px), inference time (ms)) """ - pattern = re.compile('[^a-zA-Z0-9]+') inferenceTimeMs: int = 0 try: start_time = time.perf_counter() - ocr_response = ocr.ocr(image, cls=True) + ocr_response = ocr.ocr(image, cls=False) inferenceTimeMs = int((time.perf_counter() - start_time) * 1000) # Note that ocr_response[0][0][0][0] could be a float with value 0 ('false'), or in some # other universe maybe it's a string. To be really careful we would have a test like # if hasattr(ocr_response[0][0][0][0], '__len__') and (not isinstance(ocr_response[0][0][0][0], str)) if not ocr_response or not ocr_response[0] or not ocr_response[0][0] or not ocr_response[0][0][0]: - return no_plate_found, 0, inferenceTimeMs + return no_plate_found, 0, 0, 0, inferenceTimeMs # Seems that different versions of paddle return different structures, OR # paddle returns different structures depending on its mood. We're expecting @@ -245,35 +314,18 @@ async def read_plate_chars_PaddleOCR(module_runner: ModuleRunner, image: Image) detections = ocr_response if isinstance(ocr_response[0][0][0][0], float) else ocr_response[0] - # Find the biggest textbox and assume that's the plate number - (plate_label, plate_confidence, max_area) = (None, 0.0, 0) - - for detection in detections: - bounding_box = detection[0] # [ topleft, topright, bottom right, bottom left ], each is [x,y] - classification = detection[1] - - label = classification[0] - confidence = classification[1] - - if label and confidence: - # We won't assume the points are in a particular order (though we know they are) - x_min = int(min(point[0] for point in bounding_box)) # = int(bounding_box[0][0]), - y_min = int(min(point[1] for point in bounding_box)) # = int(bounding_box[0][1]), - x_max = int(max(point[0] for point in bounding_box)) # = int(bounding_box[3][0]), - y_max = int(max(point[1] for point in bounding_box)) # = int(bounding_box[3][1]), + if debug_log: + with open("log.txt", "a") as text_file: + text_file.write(str(ocr_response) + "\n" + "\n") - area = math.fabs((y_max - y_min) * (x_max - x_min)) - - if area > max_area: - max_area = area - plate_label = pattern.sub('', label) - plate_confidence = confidence + # Find the biggest textbox and assume that's the plate number + plate_label, plate_confidence, avg_char_width, avg_char_height = tool.merge_text_detections(detections) if not plate_label: - return no_plate_found, 0, inferenceTimeMs + return no_plate_found, 0, 0, 0, inferenceTimeMs - return plate_label, plate_confidence, inferenceTimeMs + return plate_label, plate_confidence, avg_char_height, avg_char_width, inferenceTimeMs except Exception as ex: - module_runner.report_error_aync(ex, __file__) - return None, 0, inferenceTimeMs \ No newline at end of file + module_runner.report_error_async(ex, __file__) + return None, 0, 0, 0, inferenceTimeMs \ No newline at end of file diff --git a/src/modules/ALPR/ALPR.pyproj b/src/modules/ALPR/ALPR.pyproj index 984686b2..5919b6ca 100644 --- a/src/modules/ALPR/ALPR.pyproj +++ b/src/modules/ALPR/ALPR.pyproj @@ -25,9 +25,9 @@ - - - + + + @@ -70,9 +70,9 @@ - - - + + + diff --git a/src/modules/ALPR/install.bat b/src/modules/ALPR/install.bat index 1833d5cc..89520913 100644 --- a/src/modules/ALPR/install.bat +++ b/src/modules/ALPR/install.bat @@ -25,23 +25,11 @@ rem if errorlevel 1 exit /b 1 call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.7 "%absoluteAppRootDir%\SDK\Python" "Local" rem if errorlevel 1 exit /b 1 -:: We have a patch to apply! -call "!sdkScriptsPath!\utils.bat" WriteLine "Applying patch for PaddlePaddle" "!color_info!" -if /i "!hasCUDA!" == "true" ( - copy /Y "!modulePath!\patch\paddle2.3.2.post116\image.py" "!modulePath!\bin\%os%\python37\venv\Lib\site-packages\paddle\dataset\" -) else ( - copy /Y "!modulePath!\patch\paddle2.3.2\image.py" "!modulePath!\bin\%os%\python37\venv\Lib\site-packages\paddle\dataset\" -) - :: Download the ALPR models and store in /paddleocr call "%sdkScriptsPath%\utils.bat" GetFromServer "paddleocr-models.zip" "paddleocr" "Downloading ALPR models..." if errorlevel 1 exit /b 1 -:: Cleanup if you wish -:: rmdir /S %downloadPath% - - :: -- Install script cheatsheet -- :: :: Variables available: @@ -56,6 +44,7 @@ if errorlevel 1 exit /b 1 :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/ALPR/install.sh b/src/modules/ALPR/install.sh index c54bd5f6..5a5337f4 100644 --- a/src/modules/ALPR/install.sh +++ b/src/modules/ALPR/install.sh @@ -15,56 +15,21 @@ if [ "$1" != "install" ]; then exit 1 fi -# Work needs to be done to get Paddle to install on the Raspberry Pi -if [ "${hardware}" == "RaspberryPi" ]; then - writeLine 'Unable to install PaddleOCR on RaspberryPi. Quitting.' 'Red' +# Work needs to be done to get Paddle to install on the Raspberry Pi and Orange Pi +if [ "${systemName}" == "Raspberry Pi" ] || [ "${systemName}" == "Orange Pi" ] || [ "${systemName}" == "Jetson" ]; then + writeLine 'Unable to install PaddleOCR on Raspberry Pi, Orange Pi or Jetson. Quitting.' 'Red' else - # *** IF YOU WISH TO USE GPU ON LINUX *** - # Before you do anything you need to ensure CUDA is installed in Ubuntu. - # These steps need to be done outside of our setup scripts - - message=" - *** IF YOU WISH TO USE GPU ON LINUX Please ensure you have CUDA installed *** - # The steps are: (See https://chennima.github.io/cuda-gpu-setup-for-paddle-on-windows-wsl) - - sudo apt install libgomp1 - - # Install CUDA - - sudo apt-key del 7fa2af80 - wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin - sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600 - wget https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda-repo-wsl-ubuntu-11-7-local_11.7.0-1_amd64.deb - sudo dpkg -i cuda-repo-wsl-ubuntu-11-7-local_11.7.0-1_amd64.deb - - sudo cp /var/cuda-repo-wsl-ubuntu-11-7-local/cuda-B81839D3-keyring.gpg /usr/share/keyrings/ - - sudo apt-get update - sudo apt-get -y install cuda - - # Now Install cuDNN - - sudo apt-get install zlib1g - - # => Go to https://developer.nvidia.com/cudnn, sign in / sign up, agree to terms - # and download 'Local Installer for Linux x86_64 (Tar)'. This will download a - # file similar to 'cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz' - # - # In the downloads folder do: - - tar -xvf cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz - sudo cp cudnn-*-archive/include/cudnn*.h /usr/local/cuda/include - sudo cp -P cudnn-*-archive/lib/libcudnn* /usr/local/cuda/lib64 - sudo chmod a+r /usr/local/cuda/include/cudnn*.h /usr/local/cuda/lib64/libcudnn* - - # and you'll be good to go - " - # print message + # Ensure CUDA and cuDNN is installed. Note this is only for native linux since + # macOS no longer supports NVIDIA, WSL (Linux under Windows) uses the Windows + # drivers, and docker images already contain the necessary SDKs and libraries + if [ "$os" == "linux" ] && [ "$hasCUDA" == "true" ] && [ "${inDocker}" == "false" ]; then + correctLineEndings "${sdkScriptsPath}/install_cuDNN.sh" + source "${sdkScriptsPath}/install_cuDNN.sh" + fi # Install python and the required dependencies. - # Note that PaddlePaddle requires Python3.8 or below setupPython 3.8 "Local" if [ $? -ne 0 ]; then quit 1; fi installPythonPackages 3.8 "${modulePath}" "Local" @@ -76,24 +41,11 @@ else getFromServer "paddleocr-models.zip" "paddleocr" "Downloading OCR models..." if [ $? -ne 0 ]; then quit 1; fi - # We have a patch to apply for linux. - if [ "${platform}" = "linux" ]; then - if [ "${hasCUDA}" != "true" ]; then - # writeLine 'Applying PaddlePaddle patch' - # https://www.codeproject.com/Tips/5347636/Getting-PaddleOCR-and-PaddlePaddle-to-work-in-Wind - # NOT Needed for Ubuntu 20.04 WSL under Win10 - # cp ${modulePath}/patch/paddle2.4.0rc0/image.py ${modulePath}/bin/${platform}/python38/venv/lib/python3.8/site-packages/paddle/dataset/. - - writeLine 'Applying PaddleOCR patch' - # IS needed due to a newer version of Numpy deprecating np.int - cp ${modulePath}/patch/paddleocr2.6.0.1/db_postprocess.py ${modulePath}/bin/${os}/python38/venv/lib/python3.8/site-packages/paddleocr/ppocr/postprocess/. - fi - fi - - # We have a patch to apply for macOS-arm64 due to numpy upgrade that deprecates np.int that we can't downgrade - if [ "${os}" == "macos" ]; then + if [ "${systemName}" != "Raspberry Pi" ] && [ "${systemName}" != "Orange Pi" ] && [ "${systemName}" != "Jetson" ]; then + # We have a patch to apply for linux and macOS due to a numpy upgrade that + # deprecates np.int that we can't downgrade writeLine 'Applying PaddleOCR patch' - cp ${modulePath}/patch/paddleocr2.6.0.1/db_postprocess.py ${modulePath}/bin/${os}/python38/venv/lib/python3.8/site-packages/paddleocr/ppocr/postprocess/. + cp ${modulePath}/patch/paddleocr-2.6.0.1/db_postprocess.py ${modulePath}/bin/${os}/python38/venv/lib/python3.8/site-packages/paddleocr/ppocr/postprocess/. fi # To test paddle was setup correctly, open a python prompt and do: @@ -109,7 +61,7 @@ fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -118,6 +70,8 @@ fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/ALPR/modulesettings.json b/src/modules/ALPR/modulesettings.json index e87053fd..b54680d3 100644 --- a/src/modules/ALPR/modulesettings.json +++ b/src/modules/ALPR/modulesettings.json @@ -3,19 +3,22 @@ "ALPR": { "Name": "License Plate Reader", - "Version": "2.2", + "Version": "2.5", // Publishing info - "Description": "Detects and readers licence plates using YOLO object detection and the PaddleOCR toolkit", + "Description": "Detects and readers single-line and multi-line licence plates using YOLO object detection and the PaddleOCR toolkit", "Platforms": [ "windows", "linux", "macos", "macos-arm64" ], // Issues installing PaddlePaddle on linux-arm64 "License": "SSPL", "LicenseUrl": "https://www.mongodb.com/licensing/server-side-public-license", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-11-01" }, + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-11-01" }, { "ModuleVersion": "2.1", "ServerVersionRange": [ "2.0.9", "2.0.9" ], "ReleaseDate": "2022-12-01" }, - { "ModuleVersion": "2.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + { "ModuleVersion": "2.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" }, + { "ModuleVersion": "2.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-20", "ReleaseNotes": "Updated module settings", "Importance": "Minor" }, + { "ModuleVersion": "2.4", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-10", "ReleaseNotes": "PaddlePaddle install more reliable", "Importance": "Minor" }, + { "ModuleVersion": "2.5", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-06-04", "ReleaseNotes": "Updated PaddlePaddle" } ], // Launch instructions @@ -37,7 +40,10 @@ "PLATE_CONFIDENCE": 0.7, // Confidence required in detecting a plate in the first place "PLATE_ROTATE_DEG": 0, // If non-zero, rotate plate before OCR (+ve = counterclockwise) "AUTO_PLATE_ROTATE": true, - "PLATE_RESCALE_FACTOR": 2 + "PLATE_RESCALE_FACTOR": 2, + "OCR_OPTIMIZATION": true, + "OCR_OPTIMAL_CHARACTER_HEIGHT": 60, + "OCR_OPTIMAL_CHARACTER_WIDTH": 36 }, "RouteMaps": [ diff --git a/src/modules/ALPR/options.py b/src/modules/ALPR/options.py index 2c5c1f20..e0bd7385 100644 --- a/src/modules/ALPR/options.py +++ b/src/modules/ALPR/options.py @@ -16,7 +16,12 @@ def __init__(self): self.auto_plate_rotate = str(ModuleOptions.getEnvVariable("AUTO_PLATE_ROTATE", "True")).lower() == "true" # increase size of plate 2X before attempting OCR - self.OCR_rescale_factor = int(ModuleOptions.getEnvVariable("PLATE_RESCALE_FACTOR", 2)) + self.OCR_rescale_factor = float(ModuleOptions.getEnvVariable("PLATE_RESCALE_FACTOR", 2.0)) + + # OCR optimization + self.OCR_optimization = str(ModuleOptions.getEnvVariable("OCR_OPTIMIZATION", "True")).lower() == "true" + self.OCR_optimal_character_height = int(ModuleOptions.getEnvVariable("OCR_OPTIMAL_CHARACTER_HEIGHT", 60)) + self.OCR_optimal_character_width = int(ModuleOptions.getEnvVariable("OCR_OPTIMAL_CHARACTER_WIDTH", 36)) # PaddleOCR settings self.use_gpu = ModuleOptions.support_GPU # We'll disable this if we can't find GPU libraries @@ -29,19 +34,4 @@ def __init__(self): self.det_model_dir = 'paddleocr/en_PP-OCRv3_det_infer' self.rec_model_dir = 'paddleocr/en_PP-OCRv3_rec_infer' - """ - def cleanDetectedDir(self) -> None: - # make sure the detected directory exists - if not os.path.exists(self.detectedDir): - os.mkdir(self.detectedDir) - - # delete all the files in the output directory - filelist = os.listdir(self.detectedDir) - for filename in filelist: - try: - filepath = os.path.join(self.detectedDir, filename) - os.remove(filepath) - except: - pass - """ diff --git a/src/modules/ALPR/patch/paddle2.3.2.post116/image.py b/src/modules/ALPR/patch/paddle2.3.2.post116/image.py deleted file mode 100644 index bdc254c0..00000000 --- a/src/modules/ALPR/patch/paddle2.3.2.post116/image.py +++ /dev/null @@ -1,419 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This file contains some common interfaces for image preprocess. -Many users are confused about the image layout. We introduce -the image layout as follows. - -- CHW Layout - - - The abbreviations: C=channel, H=Height, W=Width - - The default layout of image opened by cv2 or PIL is HWC. - PaddlePaddle only supports the CHW layout. And CHW is simply - a transpose of HWC. It must transpose the input image. - -- Color format: RGB or BGR - - OpenCV use BGR color format. PIL use RGB color format. Both - formats can be used for training. Noted that, the format should - be keep consistent between the training and inference period. -""" - -from __future__ import print_function - -import six -import numpy as np -# FIXME(minqiyang): this is an ugly fix for the numpy bug reported here -# https://github.com/numpy/numpy/issues/12497 -if six.PY3: - import subprocess - import sys - import os - interpreter = sys.executable - # Note(zhouwei): if use Python/C 'PyRun_SimpleString', 'sys.executable' - # will be the C++ executable on Windows - #if sys.platform == 'win32' and 'python.exe' not in interpreter: - # interpreter = sys.exec_prefix + os.sep + 'python.exe' - import_cv2_proc = subprocess.Popen( - [interpreter, "-c", "import cv2"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - out, err = import_cv2_proc.communicate() - retcode = import_cv2_proc.poll() - if retcode != 0: - cv2 = None - else: - import cv2 -else: - try: - import cv2 - except ImportError: - cv2 = None -import os -import tarfile -import six.moves.cPickle as pickle - -__all__ = [] - - -def _check_cv2(): - if cv2 is None: - import sys - sys.stderr.write( - '''Warning with paddle image module: opencv-python should be imported, - or paddle image module could NOT work; please install opencv-python first.''' - ) - return False - else: - return True - - -def batch_images_from_tar(data_file, - dataset_name, - img2label, - num_per_batch=1024): - """ - Read images from tar file and batch them into batch file. - - :param data_file: path of image tar file - :type data_file: string - :param dataset_name: 'train','test' or 'valid' - :type dataset_name: string - :param img2label: a dic with image file name as key - and image's label as value - :type img2label: dic - :param num_per_batch: image number per batch file - :type num_per_batch: int - :return: path of list file containing paths of batch file - :rtype: string - """ - batch_dir = data_file + "_batch" - out_path = "%s/%s_%s" % (batch_dir, dataset_name, os.getpid()) - meta_file = "%s/%s_%s.txt" % (batch_dir, dataset_name, os.getpid()) - - if os.path.exists(out_path): - return meta_file - else: - os.makedirs(out_path) - - tf = tarfile.open(data_file) - mems = tf.getmembers() - data = [] - labels = [] - file_id = 0 - for mem in mems: - if mem.name in img2label: - data.append(tf.extractfile(mem).read()) - labels.append(img2label[mem.name]) - if len(data) == num_per_batch: - output = {} - output['label'] = labels - output['data'] = data - pickle.dump( - output, - open('%s/batch_%d' % (out_path, file_id), 'wb'), - protocol=2) - file_id += 1 - data = [] - labels = [] - if len(data) > 0: - output = {} - output['label'] = labels - output['data'] = data - pickle.dump( - output, open('%s/batch_%d' % (out_path, file_id), 'wb'), protocol=2) - - with open(meta_file, 'a') as meta: - for file in os.listdir(out_path): - meta.write(os.path.abspath("%s/%s" % (out_path, file)) + "\n") - return meta_file - - -def load_image_bytes(bytes, is_color=True): - """ - Load an color or gray image from bytes array. - - Example usage: - - .. code-block:: python - - with open('cat.jpg') as f: - im = load_image_bytes(f.read()) - - :param bytes: the input image bytes array. - :type bytes: str - :param is_color: If set is_color True, it will load and - return a color image. Otherwise, it will - load and return a gray image. - :type is_color: bool - """ - assert _check_cv2() is True - - flag = 1 if is_color else 0 - file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8) - img = cv2.imdecode(file_bytes, flag) - return img - - -def load_image(file, is_color=True): - """ - Load an color or gray image from the file path. - - Example usage: - - .. code-block:: python - - im = load_image('cat.jpg') - - :param file: the input image path. - :type file: string - :param is_color: If set is_color True, it will load and - return a color image. Otherwise, it will - load and return a gray image. - :type is_color: bool - """ - assert _check_cv2() is True - - # cv2.IMAGE_COLOR for OpenCV3 - # cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version - # cv2.IMAGE_GRAYSCALE for OpenCV3 - # cv2.CV_LOAD_IMAGE_GRAYSCALE for older OpenCV Version - # Here, use constant 1 and 0 - # 1: COLOR, 0: GRAYSCALE - flag = 1 if is_color else 0 - im = cv2.imread(file, flag) - return im - - -def resize_short(im, size): - """ - Resize an image so that the length of shorter edge is size. - - Example usage: - - .. code-block:: python - - im = load_image('cat.jpg') - im = resize_short(im, 256) - - :param im: the input image with HWC layout. - :type im: ndarray - :param size: the shorter edge size of image after resizing. - :type size: int - """ - assert _check_cv2() is True - - h, w = im.shape[:2] - h_new, w_new = size, size - if h > w: - h_new = size * h // w - else: - w_new = size * w // h - im = cv2.resize(im, (w_new, h_new), interpolation=cv2.INTER_CUBIC) - return im - - -def to_chw(im, order=(2, 0, 1)): - """ - Transpose the input image order. The image layout is HWC format - opened by cv2 or PIL. Transpose the input image to CHW layout - according the order (2,0,1). - - Example usage: - - .. code-block:: python - - im = load_image('cat.jpg') - im = resize_short(im, 256) - im = to_chw(im) - - :param im: the input image with HWC layout. - :type im: ndarray - :param order: the transposed order. - :type order: tuple|list - """ - assert len(im.shape) == len(order) - im = im.transpose(order) - return im - - -def center_crop(im, size, is_color=True): - """ - Crop the center of image with size. - - Example usage: - - .. code-block:: python - - im = center_crop(im, 224) - - :param im: the input image with HWC layout. - :type im: ndarray - :param size: the cropping size. - :type size: int - :param is_color: whether the image is color or not. - :type is_color: bool - """ - h, w = im.shape[:2] - h_start = (h - size) // 2 - w_start = (w - size) // 2 - h_end, w_end = h_start + size, w_start + size - if is_color: - im = im[h_start:h_end, w_start:w_end, :] - else: - im = im[h_start:h_end, w_start:w_end] - return im - - -def random_crop(im, size, is_color=True): - """ - Randomly crop input image with size. - - Example usage: - - .. code-block:: python - - im = random_crop(im, 224) - - :param im: the input image with HWC layout. - :type im: ndarray - :param size: the cropping size. - :type size: int - :param is_color: whether the image is color or not. - :type is_color: bool - """ - h, w = im.shape[:2] - h_start = np.random.randint(0, h - size + 1) - w_start = np.random.randint(0, w - size + 1) - h_end, w_end = h_start + size, w_start + size - if is_color: - im = im[h_start:h_end, w_start:w_end, :] - else: - im = im[h_start:h_end, w_start:w_end] - return im - - -def left_right_flip(im, is_color=True): - """ - Flip an image along the horizontal direction. - Return the flipped image. - - Example usage: - - .. code-block:: python - - im = left_right_flip(im) - - :param im: input image with HWC layout or HW layout for gray image - :type im: ndarray - :param is_color: whether input image is color or not - :type is_color: bool - """ - if len(im.shape) == 3 and is_color: - return im[:, ::-1, :] - else: - return im[:, ::-1] - - -def simple_transform(im, - resize_size, - crop_size, - is_train, - is_color=True, - mean=None): - """ - Simply data argumentation for training. These operations include - resizing, croping and flipping. - - Example usage: - - .. code-block:: python - - im = simple_transform(im, 256, 224, True) - - :param im: The input image with HWC layout. - :type im: ndarray - :param resize_size: The shorter edge length of the resized image. - :type resize_size: int - :param crop_size: The cropping size. - :type crop_size: int - :param is_train: Whether it is training or not. - :type is_train: bool - :param is_color: whether the image is color or not. - :type is_color: bool - :param mean: the mean values, which can be element-wise mean values or - mean values per channel. - :type mean: numpy array | list - """ - im = resize_short(im, resize_size) - if is_train: - im = random_crop(im, crop_size, is_color=is_color) - if np.random.randint(2) == 0: - im = left_right_flip(im, is_color) - else: - im = center_crop(im, crop_size, is_color=is_color) - if len(im.shape) == 3: - im = to_chw(im) - - im = im.astype('float32') - if mean is not None: - mean = np.array(mean, dtype=np.float32) - # mean value, may be one value per channel - if mean.ndim == 1 and is_color: - mean = mean[:, np.newaxis, np.newaxis] - elif mean.ndim == 1: - mean = mean - else: - # elementwise mean - assert len(mean.shape) == len(im) - im -= mean - - return im - - -def load_and_transform(filename, - resize_size, - crop_size, - is_train, - is_color=True, - mean=None): - """ - Load image from the input file `filename` and transform image for - data argumentation. Please refer to the `simple_transform` interface - for the transform operations. - - Example usage: - - .. code-block:: python - - im = load_and_transform('cat.jpg', 256, 224, True) - - :param filename: The file name of input image. - :type filename: string - :param resize_size: The shorter edge length of the resized image. - :type resize_size: int - :param crop_size: The cropping size. - :type crop_size: int - :param is_train: Whether it is training or not. - :type is_train: bool - :param is_color: whether the image is color or not. - :type is_color: bool - :param mean: the mean values, which can be element-wise mean values or - mean values per channel. - :type mean: numpy array | list - """ - im = load_image(filename, is_color) - im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean) - return im diff --git a/src/modules/ALPR/patch/paddleocr2.6.0.1/db_postprocess.py b/src/modules/ALPR/patch/paddleocr-2.6.0.1/db_postprocess.py similarity index 100% rename from src/modules/ALPR/patch/paddleocr2.6.0.1/db_postprocess.py rename to src/modules/ALPR/patch/paddleocr-2.6.0.1/db_postprocess.py diff --git a/src/modules/ALPR/patch/paddle2.4.0rc0/image.py b/src/modules/ALPR/patch/paddlepaddle-2.4.0rc0/image.py similarity index 100% rename from src/modules/ALPR/patch/paddle2.4.0rc0/image.py rename to src/modules/ALPR/patch/paddlepaddle-2.4.0rc0/image.py diff --git a/src/modules/ALPR/patch/paddle2.3.2/image.py b/src/modules/ALPR/patch/paddlepaddle-2.4.2/image.py similarity index 94% rename from src/modules/ALPR/patch/paddle2.3.2/image.py rename to src/modules/ALPR/patch/paddlepaddle-2.4.2/image.py index 51aea8b7..e941dfe8 100644 --- a/src/modules/ALPR/patch/paddle2.3.2/image.py +++ b/src/modules/ALPR/patch/paddlepaddle-2.4.2/image.py @@ -43,19 +43,21 @@ interpreter = sys.executable # Note(zhouwei): if use Python/C 'PyRun_SimpleString', 'sys.executable' # will be the C++ execubable on Windows - #if sys.platform == 'win32' and 'python.exe' not in interpreter: - # interpreter = sys.exec_prefix + os.sep + 'python.exe' - import_cv2_proc = subprocess.Popen( - [interpreter, "-c", "import cv2"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) + if sys.platform == 'win32' and 'python.exe' not in interpreter: + interpreter = sys.exec_prefix + os.sep + 'python.exe' + import_cv2_proc = subprocess.Popen([interpreter, "-c", "import cv2"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True) out, err = import_cv2_proc.communicate() retcode = import_cv2_proc.poll() if retcode != 0: cv2 = None else: - import cv2 + try: + import cv2 + except ImportError: + cv2 = None else: try: import cv2 @@ -121,10 +123,9 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - pickle.dump( - output, - open('%s/batch_%d' % (out_path, file_id), 'wb'), - protocol=2) + pickle.dump(output, + open('%s/batch_%d' % (out_path, file_id), 'wb'), + protocol=2) file_id += 1 data = [] labels = [] @@ -132,8 +133,9 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - pickle.dump( - output, open('%s/batch_%d' % (out_path, file_id), 'wb'), protocol=2) + pickle.dump(output, + open('%s/batch_%d' % (out_path, file_id), 'wb'), + protocol=2) with open(meta_file, 'a') as meta: for file in os.listdir(out_path): diff --git a/src/modules/ALPR/requirements.linux.arm64.txt b/src/modules/ALPR/requirements.linux.arm64.txt index 0cf62237..04c98d06 100644 --- a/src/modules/ALPR/requirements.linux.arm64.txt +++ b/src/modules/ALPR/requirements.linux.arm64.txt @@ -10,7 +10,7 @@ paddleocr==2.6.0.1 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddl # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy==1.23.3 # Installing NumPy, a package for scientific computing diff --git a/src/modules/ALPR/requirements.linux.cuda-no-luck.txt b/src/modules/ALPR/requirements.linux.cuda-no-luck.txt index 94c27c27..e628e3e1 100644 --- a/src/modules/ALPR/requirements.linux.cuda-no-luck.txt +++ b/src/modules/ALPR/requirements.linux.cuda-no-luck.txt @@ -30,7 +30,7 @@ paddleocr==2.6.0.1 # Installing PaddleOCR, the OCR toolkit based on # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy==1.23.3 # Installing NumPy, a package for scientific computing diff --git a/src/modules/ALPR/requirements.linux.txt b/src/modules/ALPR/requirements.linux.txt index 9413babe..a8092d9f 100644 --- a/src/modules/ALPR/requirements.linux.txt +++ b/src/modules/ALPR/requirements.linux.txt @@ -10,7 +10,7 @@ paddleocr==2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddl # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy>=1.23.3 # Installing NumPy, a package for scientific computing diff --git a/src/modules/ALPR/requirements.macos.arm64.txt b/src/modules/ALPR/requirements.macos.arm64.txt index c4009604..2657639c 100644 --- a/src/modules/ALPR/requirements.macos.arm64.txt +++ b/src/modules/ALPR/requirements.macos.arm64.txt @@ -12,6 +12,6 @@ numpy # Installing NumPy, a package for scientific computing # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library # end of file \ No newline at end of file diff --git a/src/modules/ALPR/requirements.macos.txt b/src/modules/ALPR/requirements.macos.txt index 7ac3680e..7b1d3152 100644 --- a/src/modules/ALPR/requirements.macos.txt +++ b/src/modules/ALPR/requirements.macos.txt @@ -6,7 +6,7 @@ paddleocr==2.6.0.1 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddl # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy # Installing NumPy, a package for scientific computing diff --git a/src/modules/ALPR/requirements.txt b/src/modules/ALPR/requirements.txt index ddc70a22..b8fd3309 100644 --- a/src/modules/ALPR/requirements.txt +++ b/src/modules/ALPR/requirements.txt @@ -1,15 +1,14 @@ -#! Python3.9 +#! Python3.7 # We install a specific version of PaddlePaddle because we have a patch to apply -paddlepaddle==2.3.2 # Installing PaddelPaddle, the Deep Learning platform +paddlepaddle==2.5.0 # Installing PaddelPaddle, the Deep Learning platform # PaddleOCR is famously painful to install. This works well for Windows, no GPU, -# using paddle2.3.2 paddleocr==2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddle # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy # Installing NumPy, a package for scientific computing diff --git a/src/modules/ALPR/requirements.windows.cuda.txt b/src/modules/ALPR/requirements.windows.cuda.txt index a786ddc2..b3ec58aa 100644 --- a/src/modules/ALPR/requirements.windows.cuda.txt +++ b/src/modules/ALPR/requirements.windows.cuda.txt @@ -1,14 +1,12 @@ #! Python3.9 # PaddlePaddle is painful to install. This works on Windows / CUDA ---find-links https://www.paddlepaddle.org.cn/whl/windows/mkl/avx/stable.html -paddlepaddle-gpu==2.3.2.post116 # Installing PaddelPaddle, the R and D deep learning platform - -paddleocr==2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddle +paddlepaddle-gpu==2.5.0 # Installing PaddlePaddle, the Deep Learning platform +paddleocr==2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddle # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy # Installing NumPy, a package for scientific computing diff --git a/src/modules/ALPR/utils/tools.py b/src/modules/ALPR/utils/tools.py index 01b606f0..4d02c428 100644 --- a/src/modules/ALPR/utils/tools.py +++ b/src/modules/ALPR/utils/tools.py @@ -1,6 +1,7 @@ import cv2 import math +import re import numpy as np from PIL import Image @@ -12,6 +13,7 @@ # Could switch to numpy, so keep this abstracted for now ImageType = Image + def resize_image(image: ImageType, scale_percent: int) -> ImageType: """ Resize an image by the given percent @@ -139,11 +141,12 @@ def compute_skew(src_img: ImageType) -> float: dimensions: Size = Size(src_img.shape[1], src_img.shape[0]) dimensions.integerize() - # Exxagerate the width to make line detection more prominent - dimensions.width *= 4 + # Exaggerate the width to make line detection more prominent + dimensions.width *= 8 + dimensions.height *= 2 - src_img = cv2.resize(src_img, dimensions.as_tuple(), interpolation = cv2.INTER_CUBIC) - gray_img = cv2.cvtColor(src_img,cv2.COLOR_BGR2GRAY) + gray_img = cv2.resize(src_img, dimensions.as_tuple(), interpolation = cv2.INTER_CUBIC) + # gray_img = cv2.cvtColor(src_img,cv2.COLOR_BGR2GRAY) median_img = cv2.medianBlur(gray_img, 5) edges = cv2.Canny(median_img, threshold1 = 60, threshold2 = 90, apertureSize = 3, L2gradient = True) @@ -221,3 +224,57 @@ def equalize(image: ImageType) -> ImageType: image_eq = cv2.merge((b_image_eq, g_image_eq, r_image_eq)) return image_eq + +def merge_text_detections(bounding_boxes) -> Tuple[str, float, int, int]: + + pattern = re.compile('[^a-zA-Z0-9]+') + tallest_box = None + tallest_box_height = 0 + large_boxes = [] + confidence_sum = 0 + count = 0 + large_boxes_count = 0 + avg_char_width = 0 + avg_char_height = 0 + + # Find the tallest bounding box + for box, (text, confidence) in bounding_boxes: + box_height = int(box[3][1] - box[0][1]) + + if text and (tallest_box is None or box_height > tallest_box_height): + tallest_box = box + tallest_box_text = text + tallest_box_height = box_height + + box_width = int(tallest_box[2][0] - tallest_box[3][0]) + avg_char_width = int(box_width / len(text)) + avg_char_height = box_height + + # Find large boxes and calculate the average confidence. A large box is anything + # that is at least 80% the height of the tallest box + for box, (_, confidence) in bounding_boxes: + box_height = int(box[3][1] - box[0][1]) + + if box_height >= tallest_box_height * 0.8: + large_boxes.append(box) + confidence_sum += confidence + count += 1 + + average_confidence = confidence_sum / count if count > 0 else 0 + + # Merge all text from large boxes + merged_text = '' + for box, (text, _) in bounding_boxes: + if box in large_boxes: + large_boxes_count += 1 + if count > 1 and large_boxes_count < count: + text = text + ' ' + merged_text += text + + merged_text = pattern.sub(' ', merged_text) + + if debug_log == True: + with open("logbox.txt", "a") as text_file: + text_file.write(f"Avg char (w,h): {avg_char_height} x {avg_char_width} - {tallest_box_text}\n\n") + + return merged_text, average_confidence, avg_char_height, avg_char_width diff --git a/src/modules/BackgroundRemover/install.bat b/src/modules/BackgroundRemover/install.bat index f909fc54..333d7969 100644 --- a/src/modules/BackgroundRemover/install.bat +++ b/src/modules/BackgroundRemover/install.bat @@ -43,6 +43,7 @@ if errorlevel 1 exit /b 1 :: moduleDir - the name of the directory containing this module :: modulePath - the path to this module (%modulesPath%\%moduleDir%) :: platform - "windows" for this script +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/BackgroundRemover/install.sh b/src/modules/BackgroundRemover/install.sh index ca7fad14..408569fe 100644 --- a/src/modules/BackgroundRemover/install.sh +++ b/src/modules/BackgroundRemover/install.sh @@ -39,7 +39,7 @@ if [ $? -ne 0 ]; then quit 1; fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -48,6 +48,8 @@ if [ $? -ne 0 ]; then quit 1; fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/BackgroundRemover/modulesettings.json b/src/modules/BackgroundRemover/modulesettings.json index ec910a9e..4400383b 100644 --- a/src/modules/BackgroundRemover/modulesettings.json +++ b/src/modules/BackgroundRemover/modulesettings.json @@ -3,7 +3,7 @@ "BackgroundRemover": { "Name": "Background Remover", - "Version": "1.2", + "Version": "1.4", // Publishing info "Description": "Automatically removes the background from a picture", @@ -12,10 +12,12 @@ "LicenseUrl": "https://www.mongodb.com/licensing/server-side-public-license", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-11-01" }, + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-11-01" }, { "ModuleVersion": "1.1", "ServerVersionRange": [ "1.6.9", "2.0.8" ], "ReleaseDate": "2022-11-01" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1.0", "2.1.6" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1.0", "2.1.6" ], "ReleaseDate": "2023-04-20", "ReleaseNotes": "Install improved for GPU enabled systems" }, + { "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-08-05", "ReleaseNotes": "Bugs in error reporting corrected", "Importance": "Minor" } ], // Launch instructions diff --git a/src/modules/BackgroundRemover/rembg_adapter.py b/src/modules/BackgroundRemover/rembg_adapter.py index 2016766c..518cff39 100644 --- a/src/modules/BackgroundRemover/rembg_adapter.py +++ b/src/modules/BackgroundRemover/rembg_adapter.py @@ -21,12 +21,14 @@ def remove(data: Union[PILImage # the image import sys import time -# Import the CodeProject.AI SDK. This will add to the PATH var for future imports +# Import the CodeProject.AI SDK. This will add to the PATH var for +# future imports sys.path.append("../../SDK/Python") from request_data import RequestData from module_runner import ModuleRunner from common import JSON +# Import the method of the module we're wrapping from PIL import Image # Import the method of the module we're wrapping @@ -35,22 +37,26 @@ def remove(data: Union[PILImage # the image class rembg_adapter(ModuleRunner): def initialise(self) -> None: + """ Initialises the module """ if self.support_GPU: if self.hasONNXRuntimeGPU: self.execution_provider = "ONNX" def process(self, data: RequestData) -> JSON: + """ Processes a request from the client and returns the results""" try: img: Image = data.get_image(0) use_alphamatting: bool = data.get_value("use_alphamatting", "false") == "true" + # Make the call to the AI code we're wrapping, and time it start_time = time.perf_counter() (processed_img, inferenceTime) = remove(img, use_alphamatting) + processMs = int((time.perf_counter() - start_time) * 1000) return { - "success": True, - "imageBase64": data.encode_image(processed_img), - "processMs" : int((time.perf_counter() - start_time) * 1000), + "success": True, + "imageBase64": RequestData.encode_image(processed_img), + "processMs" : processMs, "inferenceMs" : inferenceTime } diff --git a/src/modules/BackgroundRemover/requirements.linux.txt b/src/modules/BackgroundRemover/requirements.linux.txt index 66430264..d5ab3e9c 100644 --- a/src/modules/BackgroundRemover/requirements.linux.txt +++ b/src/modules/BackgroundRemover/requirements.linux.txt @@ -12,7 +12,7 @@ filetype # Installing FileType, a package to infer file types and MIME types gdown # Installing gdown, a package for downloading from Google Drive folders numpy # Installing NumPy, a package for scientific computing pymatting # Installing pymatting, a Python Library for Alpha Matting -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library flatbuffers # Installing FlatBuffers serialization format support diff --git a/src/modules/BackgroundRemover/requirements.macos.arm64.txt b/src/modules/BackgroundRemover/requirements.macos.arm64.txt index 9d7b1ad5..43b6ee41 100644 --- a/src/modules/BackgroundRemover/requirements.macos.arm64.txt +++ b/src/modules/BackgroundRemover/requirements.macos.arm64.txt @@ -7,7 +7,7 @@ gdown # Installing gdown, a package for downloading from Google Drive folde numpy==1.23.5 # Installing NumPy, a package for scientific computing pymatting # Installing pymatting, a Python Library for Alpha Matting -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library flatbuffers # Installing FlatBuffers serialization format support onnxruntime # Installing the ONNX runtime. diff --git a/src/modules/BackgroundRemover/requirements.macos.txt b/src/modules/BackgroundRemover/requirements.macos.txt index 9e41303b..f8528d04 100644 --- a/src/modules/BackgroundRemover/requirements.macos.txt +++ b/src/modules/BackgroundRemover/requirements.macos.txt @@ -5,7 +5,7 @@ gdown # Installing gdown, a package for downloading from Google Drive folde # We need this numpy version due to a "initialization of _internal failed without raising an exception" issue numpy==1.23.1 # Installing NumPy, a package for scientific computing pymatting # Installing pymatting, a Python Library for Alpha Matting -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library onnxruntime # Installing the ONNX runtime diff --git a/src/modules/BackgroundRemover/requirements.txt b/src/modules/BackgroundRemover/requirements.txt index f1cd8f3e..93c8a325 100644 --- a/src/modules/BackgroundRemover/requirements.txt +++ b/src/modules/BackgroundRemover/requirements.txt @@ -3,7 +3,7 @@ ## Using the latest filetype # Installing FileType, a package to infer file types and MIME types gdown # Installing gdown, a package for downloading from Google Drive folders -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library ## IF there is an error regarding numba needing a higher version of numpy then use this numpy==1.22.4 # Installing NumPy, a package for scientific computing diff --git a/src/modules/BackgroundRemover/requirements.windows.gpu.txt b/src/modules/BackgroundRemover/requirements.windows.gpu.txt index 93f0b2ee..5bd22d83 100644 --- a/src/modules/BackgroundRemover/requirements.windows.gpu.txt +++ b/src/modules/BackgroundRemover/requirements.windows.gpu.txt @@ -1,28 +1,27 @@ #! Python3.9 +## Using the latest filetype # Installing FileType, a package to infer file types and MIME types gdown # Installing gdown, a package for downloading from Google Drive folders -Pillow # Installing Pillow, a Python Image Library - -## If there is an error regarding numba needing a higher version of numpy then use this -# numpy==1.22.4 # Installing NumPy, a package for scientific computing -# pymatting==1.1.8 # Installing pymatting, a Python Library for Alpha Matting - -numpy # Installing NumPy, a package for scientific computing -pymatting # Installing pymatting, a Python Library for Alpha Matting - - -## Using CPU version. GPU hasn't proven to be stable -onnxruntime # Installing the ONNX runtime - -## General version for GPU. -# onnxruntime-gpu # Installing the ONNX runtime with GPU support - -## Specific version. May not actually work -## onnxruntime-gpu==1.10.0 # Installing the ONNX runtime with GPU support - -## For nightly test build -## -i https://test.pypi.org/simple/ -## ort-nightly-gpu - -# last line blank \ No newline at end of file +Pillow<10.0.0 # Installing Pillow, a Python Image Library + +onnxruntime # Installing the ONNX runtime + +## IF there is an error regarding numba needing a higher version of numpy then use this +numpy # Installing NumPy, a package for scientific computing +pymatting # Installing pymatting, a Python Library for Alpha Matting + +## Not needed for our usage +## numpy # Installing NumPy, a package for scientific computing +## pymatting # Installing pymatting, a Python Library for Alpha Matting +## watchdog==2.1.7 # Installing watchdog, a set of utilities to monitor file system events +## asyncer==0.0.1 # Installing Asyncer, a library to make our networking/concurrency calls easier. +## pillow==9.0.1 # Pillow, the image library +## uvicorn==0.17.0 # Uvicorn, an ASGI web server implementation for Python +## aiohttp==3.8.1 # AIOHTTP, an Asynchronous HTTP Client/Server +## click==8.0.3 # Click, the Command Line Interface Creation Kit +## fastapi==0.72.0 # FastAPI, a web framework for building APIs +## python-multipart==0.0.5 +## scikit-image==0.19.1 +## scipy==1.8.0 +## tqdm==4.62.3 diff --git a/src/modules/Cartooniser/cartooniser.py b/src/modules/Cartooniser/cartooniser.py index 9170c68e..2fcfbd50 100644 --- a/src/modules/Cartooniser/cartooniser.py +++ b/src/modules/Cartooniser/cartooniser.py @@ -37,7 +37,7 @@ def get_model(weights_dir, model_name, device_type="cpu"): # Store path and loaded model for later models[model_name] = (model_path, loaded_model) else: - print(f"Using cached model {model_name}") + print(f"Debug: Using cached model {model_name}") return loaded_model diff --git a/src/modules/Cartooniser/cartooniser_adapter.py b/src/modules/Cartooniser/cartooniser_adapter.py index e4c09d1e..3ce992b3 100644 --- a/src/modules/Cartooniser/cartooniser_adapter.py +++ b/src/modules/Cartooniser/cartooniser_adapter.py @@ -7,6 +7,7 @@ from request_data import RequestData from module_runner import ModuleRunner from common import JSON +from threading import Lock # Import packages we've installed into our VENV from PIL import Image @@ -21,7 +22,6 @@ class cartooniser_adapter(ModuleRunner): def __init__(self): super().__init__() self.opts = Options() - async def initialise(self) -> None: # GPU support not fully working in Linux # if self.opts.use_gpu and not self.hasTorchCuda: @@ -41,12 +41,13 @@ async def process(self, data: RequestData) -> JSON: start_time = time.perf_counter() (cartoon, inferenceMs) = inference(img, self.opts.weights_dir, - model_name, device_type) + model_name, device_type) + processMs = int((time.perf_counter() - start_time) * 1000) return { "success": True, - "imageBase64": data.encode_image(cartoon), + "imageBase64": RequestData.encode_image(cartoon), "processMs": processMs, "inferenceMs": inferenceMs } diff --git a/src/modules/Cartooniser/install.bat b/src/modules/Cartooniser/install.bat index 5f8718e7..8d3e6e84 100644 --- a/src/modules/Cartooniser/install.bat +++ b/src/modules/Cartooniser/install.bat @@ -37,6 +37,7 @@ if errorlevel 1 exit /b 1 :: moduleDir - the name of the directory containing this module :: modulePath - the path to this module (%modulesPath%\%moduleDir%) :: platform - "windows" for this script +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/Cartooniser/install.sh b/src/modules/Cartooniser/install.sh index e90d2e12..6fa6d950 100644 --- a/src/modules/Cartooniser/install.sh +++ b/src/modules/Cartooniser/install.sh @@ -33,7 +33,7 @@ if [ $? -ne 0 ]; then quit 1; fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -42,6 +42,8 @@ if [ $? -ne 0 ]; then quit 1; fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/Cartooniser/modulesettings.json b/src/modules/Cartooniser/modulesettings.json index 7193eea5..8e0e5d5b 100644 --- a/src/modules/Cartooniser/modulesettings.json +++ b/src/modules/Cartooniser/modulesettings.json @@ -3,7 +3,7 @@ "Cartooniser": { "Name": "Cartooniser", - "Version": "1.0", + "Version": "1.1", // Publishing info "Description": "Convert a photo into an anime style cartoon", @@ -12,9 +12,10 @@ "LicenseUrl": "https://github.com/bryandlee/animegan2-pytorch/blob/main/LICENSE", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-28" }, - ], + { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1.7", "" ], "ReleaseDate": "2023-04-29", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } + ], // Launch instructions "AutoStart": true, diff --git a/src/modules/Cartooniser/requirements.gpu.txt b/src/modules/Cartooniser/requirements.gpu.txt index 46a6f892..412adeb4 100644 --- a/src/modules/Cartooniser/requirements.gpu.txt +++ b/src/modules/Cartooniser/requirements.gpu.txt @@ -2,5 +2,5 @@ torch # Installing PyTorch, for Tensor computation and Deep neural networks torchvision # Installing TorchVision, for Computer Vision based AI -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library diff --git a/src/modules/Cartooniser/requirements.txt b/src/modules/Cartooniser/requirements.txt index c06be2ca..28f5d8df 100644 --- a/src/modules/Cartooniser/requirements.txt +++ b/src/modules/Cartooniser/requirements.txt @@ -2,4 +2,4 @@ torch >= 1.7.1 # Installing PyTorch, for Tensor computation and Deep neural networks torchvision # Installing TorchVision, for Computer Vision based AI -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library diff --git a/src/modules/FaceProcessing/install.bat b/src/modules/FaceProcessing/install.bat index 029b5fda..f39e5035 100644 --- a/src/modules/FaceProcessing/install.bat +++ b/src/modules/FaceProcessing/install.bat @@ -43,6 +43,7 @@ if not exist "%modulePath%\datastore\" mkdir "%modulePath%\datastore" :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/FaceProcessing/install.sh b/src/modules/FaceProcessing/install.sh index 91b602a6..408bf12a 100644 --- a/src/modules/FaceProcessing/install.sh +++ b/src/modules/FaceProcessing/install.sh @@ -61,18 +61,18 @@ if [ ! -d "${commonDataDir}" ]; then fi # ... also needs SQLite -if [ "$hardware" == "RaspberryPi" ]; then +if [ "${systemName}" == "Raspberry Pi" ] || [ "${systemName}" == "Orange Pi" ] || \ + [ "${systemName}" == "Jetson" ]; then sudo apt-get install sqlite3 fi - # -- Install script cheatsheet -- # # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -81,6 +81,8 @@ fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/FaceProcessing/intelligencelayer/face.py b/src/modules/FaceProcessing/intelligencelayer/face.py index e98e2084..8d5b5adf 100644 --- a/src/modules/FaceProcessing/intelligencelayer/face.py +++ b/src/modules/FaceProcessing/intelligencelayer/face.py @@ -58,7 +58,7 @@ def __init__(self): self.facemap = {} self.models_lock = Lock() - self.face_lock = Lock() + self.face_lock = Lock() # Will be lazy initialised self.faceclassifier = None @@ -100,7 +100,9 @@ def initialise(self) -> None: self.processor_type = "GPU" self.execution_provider = "MPS" - self.init_models(); + if SharedOptions.USE_CUDA and self.half_precision == 'enable' and not self.hasTorchHalfPrecision: + self.half_precision = 'disable' + self.init_db() self.load_faces() @@ -150,7 +152,7 @@ def process(self, data: RequestData) -> JSON: def init_models(self, re_entered: bool = False) -> None: if self.faceclassifier is not None and self.detector is not None: - return + return True try: with self.models_lock: @@ -166,8 +168,11 @@ def init_models(self, re_entered: bool = False) -> None: self.detector = YOLODetector(model_path, self.resolution, cuda=SharedOptions.USE_CUDA, mps=SharedOptions.USE_MPS, - half_precision=SharedOptions.HALF_PRECISION) - + half_precision=self.half_precision) + + if self.faceclassifier is not None and self.detector is not None: + return True + except Exception as ex: if not re_entered and SharedOptions.USE_CUDA and str(ex).startswith('CUDA out of memory'): @@ -184,9 +189,10 @@ def init_models(self, re_entered: bool = False) -> None: "loglevel": "information", }) - self.init_models(re_entered = True) + return self.init_models(re_entered = True) else: self.report_error(ex, __file__) + return False # make sure the sqlLite database exists @@ -265,13 +271,25 @@ def update_faces(self, delay: int) -> None: def detect_face(self, data: RequestData) -> JSON: - + + if not self.init_models(): + return { + "success": False, + "predictions": [], + "count": 0, + "message": "Unable to load the face detector", + "error": "Unable to load the face detector", + "inferenceMs": 0 + } + try: threshold: float = float(data.get_value("min_confidence", "0.67")) img: Image = data.get_image(0) start_time = time.perf_counter() - det = self.detector.predictFromImage(img, threshold) + + det = self.detector.predictFromImage(img, threshold) + inferenceMs = int((time.perf_counter() - start_time) * 1000) outputs = [] @@ -313,7 +331,7 @@ def detect_face(self, data: RequestData) -> JSON: trace = "".join(traceback.TracebackException.from_exception(ex).format()) output = { "success": False, - "error": "An Error occured during processing", + "error": "An Error occurred during processing", "err_trace": trace } @@ -322,6 +340,14 @@ def detect_face(self, data: RequestData) -> JSON: def register_face(self, data: RequestData) -> Tuple[JSON, int]: + if not self.init_models(): + return { + "success": False, + "message": "Unable to load the face detector", + "error": "Unable to load the face detector", + "inferenceMs": 0 + } + try: user_id = data.get_value("userid") @@ -335,7 +361,9 @@ def register_face(self, data: RequestData) -> Tuple[JSON, int]: pil_image = data.get_image(i) start_time = time.perf_counter() - det = self.detector.predictFromImage(pil_image, 0.55) + + det = self.detector.predictFromImage(pil_image, 0.55) + inferenceMs += int((time.perf_counter() - start_time) * 1000) new_img = None @@ -361,7 +389,9 @@ def register_face(self, data: RequestData) -> Tuple[JSON, int]: if batch is not None: start_time = time.perf_counter() + img_embeddings = self.faceclassifier.predict(batch).cpu() + inferenceMs += int((time.perf_counter() - start_time) * 1000) img_embeddings = torch.mean(img_embeddings, 0) @@ -415,7 +445,7 @@ def register_face(self, data: RequestData) -> Tuple[JSON, int]: trace = "".join(traceback.TracebackException.from_exception(ex).format()) output = { "success": False, - "error": "An Error occured during processing", + "error": "An Error occurred during processing", "err_trace": trace } @@ -444,7 +474,7 @@ def list_faces(self, data: RequestData) -> JSON: trace = "".join(traceback.TracebackException.from_exception(ex).format()) output = { "success": False, - "error": "An Error occured during processing", + "error": "An Error occurred during processing", "err_trace": trace } @@ -472,7 +502,7 @@ def delete_user_faces(self, data: RequestData) -> JSON: trace = "".join(traceback.TracebackException.from_exception(ex).format()) output = { "success": False, - "error": "An Error occured during processing", + "error": "An Error occurred during processing", "err_trace": trace } @@ -481,6 +511,16 @@ def delete_user_faces(self, data: RequestData) -> JSON: def recognise_face(self, data: RequestData) -> JSON: + if not self.init_models(): + return { + "success": False, + "predictions": [], + "count": 0, + "message": "Unable to load the face detector", + "error": "Unable to load the face detector", + "inferenceMs": 0 + } + try: threshold = float(data.get_value("min_confidence", "0.67")) pil_image = data.get_image(0) @@ -497,7 +537,9 @@ def recognise_face(self, data: RequestData) -> JSON: face_tensors = face_tensors.cuda() start_time = time.perf_counter() - det = self.detector.predictFromImage(pil_image, threshold) + + det = self.detector.predictFromImage(pil_image, threshold) + inferenceMs = int((time.perf_counter() - start_time) * 1000) faces = [[]] @@ -523,9 +565,13 @@ def recognise_face(self, data: RequestData) -> JSON: detections.append((x_min, y_min, x_max, y_max)) - if found_face == False: + if not found_face: - output = {"success": False, "error": "No face found in image", "inferenceMs": inferenceMs} + output = { + "success": False, + "error": "No face found in image", + "inferenceMs": inferenceMs + } elif len(facemap) == 0: @@ -571,7 +617,10 @@ def recognise_face(self, data: RequestData) -> JSON: for face_list in faces: start_time = time.perf_counter() + + embedding = self.faceclassifier.predict(torch.cat(face_list)) + inferenceMs += int((time.perf_counter() - start_time) * 1000) embeddings.append(embedding) @@ -638,7 +687,7 @@ def recognise_face(self, data: RequestData) -> JSON: trace = "".join(traceback.TracebackException.from_exception(ex).format()) output = { "success": False, - "error": "An Error occured during processing", + "error": "An Error occurred during processing", "err_trace": trace } @@ -647,13 +696,23 @@ def recognise_face(self, data: RequestData) -> JSON: def match_faces(self, data: RequestData) -> JSON: + if not self.init_models(): + return { + "success": False, + "message": "Unable to load the face detector", + "error": "Unable to load the face detector", + "inferenceMs": 0 + } + try: image1 = data.get_image(0) image2 = data.get_image(1) start_time = time.perf_counter() - det1 = self.detector.predictFromImage(image1, 0.8) - det2 = self.detector.predictFromImage(image2, 0.8) + + det1 = self.detector.predictFromImage(image1, 0.8) + det2 = self.detector.predictFromImage(image2, 0.8) + inferenceMs = int((time.perf_counter() - start_time) * 1000) if len(det1) > 0 and len(det2) > 0: @@ -687,7 +746,9 @@ def match_faces(self, data: RequestData) -> JSON: faces = torch.cat([face1, face2], dim=0) start_time = time.perf_counter() + embeddings = self.faceclassifier.predict(faces) + inferenceMs += int((time.perf_counter() - start_time) * 1000) embed1 = embeddings[0, :].unsqueeze(0) @@ -706,7 +767,7 @@ def match_faces(self, data: RequestData) -> JSON: trace = "".join(traceback.TracebackException.from_exception(ex).format()) output = { "success": False, - "error": "An Error occured during processing.", + "error": "An Error occurred during processing.", "err_trace": trace } diff --git a/src/modules/FaceProcessing/intelligencelayer/process.py b/src/modules/FaceProcessing/intelligencelayer/process.py index c2bbd4de..465f695f 100644 --- a/src/modules/FaceProcessing/intelligencelayer/process.py +++ b/src/modules/FaceProcessing/intelligencelayer/process.py @@ -5,28 +5,20 @@ from models.experimental import attempt_load from PIL import Image from utils.augmentations import letterbox -from utils.general import ( - non_max_suppression, - scale_coords, -) +from utils.general import non_max_suppression, scale_coords from module_runner import ModuleRunner class YOLODetector(object): - def __init__(self, model_path: str, reso: int = 640, cuda: bool = False, mps: bool = False, - half_precision: str = 'enable'): + def __init__(self, model_path: str, reso: int = 640, cuda: bool = False, + mps: bool = False, half_precision: str = 'enable'): # Use half-precision if possible. There's a bunch of Nvidia cards where # this won't work if cuda: - device_type = "cuda" - self.device = torch.device(device_type) - + device_type = "cuda" + self.device = torch.device(device_type) self.device_name = torch.cuda.get_device_name() - - if half_precision == 'disable': - self.half = False - else: - self.half = half_precision == 'force' or torch.cuda.get_device_capability()[0] >= 6 + self.half = half_precision != 'disable' if self.half: print(f"Using half-precision for the device '{self.device_name}'") @@ -45,9 +37,9 @@ def __init__(self, model_path: str, reso: int = 640, cuda: bool = False, mps: bo self.device_name = "CPU" self.half = False - self.reso = (reso, reso) - self.cuda = cuda - self.mps = mps + self.reso = (reso, reso) + self.cuda = cuda + self.mps = mps self.model = attempt_load(model_path, device=self.device) self.names = ( self.model.module.names diff --git a/src/modules/FaceProcessing/intelligencelayer/requirements.linux.cuda.txt b/src/modules/FaceProcessing/intelligencelayer/requirements.linux.cuda.txt index 6fa8f602..8c130a35 100644 --- a/src/modules/FaceProcessing/intelligencelayer/requirements.linux.cuda.txt +++ b/src/modules/FaceProcessing/intelligencelayer/requirements.linux.cuda.txt @@ -3,7 +3,7 @@ Pandas # Installing Pandas, a data analysis / data manipulation tool CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/FaceProcessing/intelligencelayer/shared.py b/src/modules/FaceProcessing/intelligencelayer/shared.py index ad368187..7a83f1eb 100644 --- a/src/modules/FaceProcessing/intelligencelayer/shared.py +++ b/src/modules/FaceProcessing/intelligencelayer/shared.py @@ -83,7 +83,7 @@ class SharedOptions: SUPPORT_GPU = ModuleOptions.support_GPU PORT = ModuleOptions.port - print(f"Vision AI services setup: Retrieving environment variables...") + print(f"Trace: Vision AI services setup: Retrieving environment variables...") default_app_dir = os.getcwd() if default_app_dir.endswith("intelligencelayer"): @@ -97,7 +97,6 @@ class SharedOptions: USE_CUDA = ModuleOptions.getEnvVariable("USE_CUDA", "True") USE_MPS = ModuleOptions.getEnvVariable("USE_MPS", "True") - HALF_PRECISION = ModuleOptions.half_precision DATA_DIR = os.path.normpath(ModuleOptions.getEnvVariable("DATA_DIR", f"{APPDIR}/datastore")) MODELS_DIR = os.path.normpath(ModuleOptions.getEnvVariable("MODELS_DIR", f"{APPDIR}/assets")) @@ -130,9 +129,9 @@ class SharedOptions: # dump the important variables if showEnvVariables: - print(f"APPDIR: {APPDIR}") - print(f"PROFILE: {PROFILE}") - print(f"USE_CUDA: {USE_CUDA}") - print(f"DATA_DIR: {DATA_DIR}") - print(f"MODELS_DIR: {MODELS_DIR}") - print(f"MODE: {MODE}") \ No newline at end of file + print(f"debug: APPDIR: {APPDIR}") + print(f"debug: PROFILE: {PROFILE}") + print(f"debug: USE_CUDA: {USE_CUDA}") + print(f"debug: DATA_DIR: {DATA_DIR}") + print(f"debug: MODELS_DIR: {MODELS_DIR}") + print(f"debug: MODE: {MODE}") \ No newline at end of file diff --git a/src/modules/FaceProcessing/modulesettings.json b/src/modules/FaceProcessing/modulesettings.json index 7a117581..34997efd 100644 --- a/src/modules/FaceProcessing/modulesettings.json +++ b/src/modules/FaceProcessing/modulesettings.json @@ -3,7 +3,7 @@ "FaceProcessing": { "Name": "Face Processing", - "Version": "1.2", + "Version": "1.5", // Publishing info "Description": "A number of Face image APIs including detect, recognize, and compare.", @@ -12,9 +12,12 @@ "LicenseUrl": "https://opensource.org/licenses/GPL-3.0", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-03-01" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-17" }, + { "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-08-05", "ReleaseNotes": "Bugs in error reporting corrected", "Importance": "Minor" }, + { "ModuleVersion": "1.5", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-08-12", "ReleaseNotes": "PyTorch version downgrade" } ], // Launch instructions diff --git a/src/modules/FaceProcessing/requirements.linux.arm64.txt b/src/modules/FaceProcessing/requirements.linux.arm64.txt index 50624123..e4515485 100644 --- a/src/modules/FaceProcessing/requirements.linux.arm64.txt +++ b/src/modules/FaceProcessing/requirements.linux.arm64.txt @@ -1,24 +1,23 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files # Specific versions that match the models we're using. This requires <= Python 3.9. Any # version higher can use Python 3.10 -# Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks -# TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI +# Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks +# TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI -# This is annoying. +# Annoyingly, we need to drop down a version. # https://discuss.pytorch.org/t/failed-to-load-image-python-extension-could-not-find-module/140278/15 torch==1.9.0 # Installing Torch, for Tensor computation and Deep neural networks torchvision==0.10.0 # Installing TorchVision, for Computer Vision based AI -## These to be removed (not needed for inference) -# matlabplotlib +# We need this, but we don't need this. Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line left blank \ No newline at end of file diff --git a/src/modules/FaceProcessing/requirements.linux.cuda.txt b/src/modules/FaceProcessing/requirements.linux.cuda.txt index 6dee88e0..f3052285 100644 --- a/src/modules/FaceProcessing/requirements.linux.cuda.txt +++ b/src/modules/FaceProcessing/requirements.linux.cuda.txt @@ -1,21 +1,19 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -# the Ultralytics Yolov5 package -yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images - -# This will automatically grab the CUDA enabled Torch if possible ---extra-index-url https://download.pytorch.org/whl/cu116 -torch # Installing PyTorch, an open source machine learning framework -torchvision # Installing TorchVision, for working with computer vision models +## For CUDA 11.7 (NOT torch 2.0+) +--find-links https://download.pytorch.org/whl/torch_stable.html +torch==1.13.0+cu117 # Installing PyTorch, an open source machine learning framework +--find-links https://download.pytorch.org/whl/torch_stable.html +torchvision==0.14.0+cu117 # Installing TorchVision, for working with computer vision models # We need this, but we don't need this. -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line empty. \ No newline at end of file diff --git a/src/modules/FaceProcessing/requirements.linux.txt b/src/modules/FaceProcessing/requirements.linux.txt index 3a794a27..5519b63e 100644 --- a/src/modules/FaceProcessing/requirements.linux.txt +++ b/src/modules/FaceProcessing/requirements.linux.txt @@ -1,20 +1,29 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -## Specific versions that match the models we're using. +# PyTorch-DirectML not working for this module +# torch-directml # Installing the PyTorch DirectML plugin + +# Specific versions that match the models we're using. Size is ~830Mb +# ** Don't do this if we're installing Torch-DirectML ** --extra-index-url https://download.pytorch.org/whl/cpu -Torch==1.10.2+cpu # Installing Torch, for Tensor computation and Deep neural networks +Torch==1.10.2+cpu # Installing Torch, for Tensor computation and Deep neural networks --extra-index-url https://download.pytorch.org/whl/cpu -TorchVision==0.11.3+cpu # Installing TorchVision, for Computer Vision based AI +TorchVision==0.11.3+cpu # Installing TorchVision, for Computer Vision based AI + +# CPU specific Torch for Linux. This is Torch 2.0, though, which seems to be...troublesome. +# --index-url https://download.pytorch.org/whl/cpu +# Torch # Installing Torch, for Tensor computation and Deep neural networks +# --index-url https://download.pytorch.org/whl/cpu +# TorchVision # Installing TorchVision, for Computer Vision based AI -## These to be removed (not needed for inference) -# matlabplotlib +# We need this, but we don't need this. Seaborn # Installing Seaborn, a data visualization library based on matplotlib ## last line empty. \ No newline at end of file diff --git a/src/modules/FaceProcessing/requirements.macos.arm64.txt b/src/modules/FaceProcessing/requirements.macos.arm64.txt index 07ac6980..26ee930d 100644 --- a/src/modules/FaceProcessing/requirements.macos.arm64.txt +++ b/src/modules/FaceProcessing/requirements.macos.arm64.txt @@ -1,18 +1,15 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +# Looking for more info on M1 chips? https://developer.apple.com/forums/thread/695963 -## Bleeding edge versions of torch for Apple Silicon. -#--pre -#--extra-index-url https://download.pytorch.org/whl/nightly/cpu -torch # Installing PyTorch, for Tensor computation and Deep neural networks -#--pre -#--extra-index-url https://download.pytorch.org/whl/nightly/cpu -torchvision # Installing TorchVision, for Computer Vision based AI +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files + +torch # Installing PyTorch, for Tensor computation and Deep neural networks +torchvision # Installing TorchVision, for Computer Vision based AI # last line empty. \ No newline at end of file diff --git a/src/modules/FaceProcessing/requirements.macos.txt b/src/modules/FaceProcessing/requirements.macos.txt index d27888cc..886aa7ff 100644 --- a/src/modules/FaceProcessing/requirements.macos.txt +++ b/src/modules/FaceProcessing/requirements.macos.txt @@ -1,18 +1,19 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -## Specific versions that match the models we're using. -Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks -TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI +# Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks +# TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI -## These to be removed (not needed for inference) -# matlabplotlib -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +Torch # Installing Torch, for Tensor computation and Deep neural networks +TorchVision # Installing TorchVision, for Computer Vision based AI + +# We need this, but we don't need this. +Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line left blank \ No newline at end of file diff --git a/src/modules/FaceProcessing/requirements.txt b/src/modules/FaceProcessing/requirements.txt index f08380dc..0336dd0b 100644 --- a/src/modules/FaceProcessing/requirements.txt +++ b/src/modules/FaceProcessing/requirements.txt @@ -1,18 +1,29 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +urllib3<1.27,>=1.25.4 # Installing urllib3, the HTTP client for Python ---find-links https://download.pytorch.org/whl/torch_stable.html -torch==1.10.1+cpu # Installing PyTorch, for Tensor computation and Deep neural networks ---find-links https://download.pytorch.org/whl/torch_stable.html -torchvision==0.11.2+cpu # Installing TorchVision, for Computer Vision based AI +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -## These to be removed (not needed for inference) +# PyTorch-DirectML not working for this module +# torch-directml # Installing the PyTorch DirectML plugin + +# CPU specific Torch 1.13.0. Size ~830Mb +--extra-index-url https://download.pytorch.org/whl/cpu +torch==1.13.0+cpu # Installing PyTorch, for Tensor computation and Deep neural networks +--extra-index-url https://download.pytorch.org/whl/cpu +torchvision==0.14.0+cpu # Installing TorchVision, for Computer Vision based AI + +# as per https://pytorch.org/get-started/locally/, this should install CPU versions +# (currently 2.0.1 on python 3.8+, 1.13.1 on Python 3.7). ~1.3GB for CPU, ~3.9GB for GPU +# torch # Installing PyTorch, for Tensor computation and Deep neural networks +# torchvision # Installing TorchVision, for Computer Vision based AI + +# We need this, but we don't need this. Seaborn # Installing Seaborn, a data visualization library based on matplotlib ## last line empty. \ No newline at end of file diff --git a/src/modules/FaceProcessing/requirements.windows.cuda.txt b/src/modules/FaceProcessing/requirements.windows.cuda.txt index 6fa8f602..dd7da47c 100644 --- a/src/modules/FaceProcessing/requirements.windows.cuda.txt +++ b/src/modules/FaceProcessing/requirements.windows.cuda.txt @@ -1,20 +1,32 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -## For CUDA: (trying v1.10 since 1.11 has an issue with UpSample Module Layer) +# https://pytorch.org/get-started/locally/ says to do this, but if you do, (a) Pandas used to get +# upset, and (b) this installs Torch 2.0 or 1.3 depending on the python version, and 2.0 has issues +# --index-url https://download.pytorch.org/whl/cu117 +# torch # Installing PyTorch, an open source machine learning framework +# --index-url https://download.pytorch.org/whl/cu117 +# torchvision # Installing TorchVision, for working with computer vision models + +## For CUDA 11.3: (trying v1.10 since 1.11 has an issue with UpSample Module Layer) +#--find-links https://download.pytorch.org/whl/torch_stable.html +#torch==1.10.2+cu113 # Installing PyTorch, an open source machine learning framework +#--find-links https://download.pytorch.org/whl/torch_stable.html +#torchvision==0.11.3+cu113 # Installing TorchVision, for working with computer vision models + +## For CUDA 11.7 --find-links https://download.pytorch.org/whl/torch_stable.html -torch==1.10.2+cu113 # Installing PyTorch, an open source machine learning framework +torch==1.13.0+cu117 # Installing PyTorch, an open source machine learning framework --find-links https://download.pytorch.org/whl/torch_stable.html -torchvision==0.11.3+cu113 # Installing TorchVision, for working with computer vision models +torchvision==0.14.0+cu117 # Installing TorchVision, for working with computer vision models -## These to be removed (not needed fro inference) -# matlabplotlib +# We need this, but we don't need this. Seaborn # Installing Seaborn, a data visualization library based on matplotlib ## last line empty. \ No newline at end of file diff --git a/src/modules/OCR/OCR.pyproj b/src/modules/OCR/OCR.pyproj index 6de3aa83..1dd94b7c 100644 --- a/src/modules/OCR/OCR.pyproj +++ b/src/modules/OCR/OCR.pyproj @@ -25,10 +25,9 @@ - - - - + + + @@ -89,10 +88,9 @@ - - - - + + + diff --git a/src/modules/OCR/install.bat b/src/modules/OCR/install.bat index 7997b452..de793d9b 100644 --- a/src/modules/OCR/install.bat +++ b/src/modules/OCR/install.bat @@ -25,14 +25,6 @@ rem if errorlevel 1 exit /b 1 call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.7 "%absoluteAppRootDir%\SDK\Python" "Local" rem if errorlevel 1 exit /b 1 -:: We have a patch to apply! -call "!sdkScriptsPath!\utils.bat" WriteLine "Applying patch for PaddlePaddle" "!color_info!" -if /i "!hasCUDA!" == "true" ( - copy /Y "!modulePath!\patch\paddle2.3.2.post116\image.py" "!modulePath!\bin\%os%\python37\venv\Lib\site-packages\paddle\dataset\" -) else ( - copy /Y "!modulePath!\patch\paddle2.3.2\image.py" "!modulePath!\bin\%os%\python37\venv\Lib\site-packages\paddle\dataset\" -) - :: Download the OCR models and store in /paddleocr call "%sdkScriptsPath%\utils.bat" GetFromServer "paddleocr-models.zip" "paddleocr" "Downloading OCR models..." rem if errorlevel 1 exit /b 1 @@ -55,6 +47,7 @@ rem if errorlevel 1 exit /b 1 :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/OCR/install.sh b/src/modules/OCR/install.sh index 88db1d29..0450f31f 100644 --- a/src/modules/OCR/install.sh +++ b/src/modules/OCR/install.sh @@ -15,53 +15,22 @@ if [ "$1" != "install" ]; then fi # Work needs to be done to get Paddle to install on the Raspberry Pi -if [ "${hardware}" == "RaspberryPi" ]; then - writeLine 'Unable to install PaddleOCR on RaspberryPi. Quitting.' 'Red' +if [ "${systemName}" == "Raspberry Pi" ] || [ "${systemName}" == "Orange Pi" ] || [ "${systemName}" == "Jetson" ]; then + writeLine 'Unable to install PaddleOCR on Raspberry Pi, Orange Pi or Jetson. Quitting.' 'Red' else - message=" - *** IF YOU WISH TO USE GPU ON LINUX Please ensure you have CUDA installed *** - # The steps are: (See https://chennima.github.io/cuda-gpu-setup-for-paddle-on-windows-wsl) - - sudo apt install libgomp1 - - # Install CUDA - - sudo apt-key del 7fa2af80 - wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin - sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600 - wget https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda-repo-wsl-ubuntu-11-7-local_11.7.0-1_amd64.deb - sudo dpkg -i cuda-repo-wsl-ubuntu-11-7-local_11.7.0-1_amd64.deb - - sudo cp /var/cuda-repo-wsl-ubuntu-11-7-local/cuda-B81839D3-keyring.gpg /usr/share/keyrings/ - - sudo apt-get update - sudo apt-get -y install cuda - - # Now Install cuDNN - - sudo apt-get install zlib1g - - # => Go to https://developer.nvidia.com/cudnn, sign in / sign up, agree to terms - # and download 'Local Installer for Linux x86_64 (Tar)'. This will download a - # file similar to 'cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz' - # - # In the downloads folder do: - - tar -xvf cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz - sudo cp cudnn-*-archive/include/cudnn*.h /usr/local/cuda/include - sudo cp -P cudnn-*-archive/lib/libcudnn* /usr/local/cuda/lib64 - sudo chmod a+r /usr/local/cuda/include/cudnn*.h /usr/local/cuda/lib64/libcudnn* - - # and you'll be good to go - " - # print message - + # Ensure CUDA and cuDNN is installed. Note this is only for native linux since + # macOS no longer supports NVIDIA, WSL (Linux under Windows) uses the Windows + # drivers, and docker images already contain the necessary SDKs and libraries + if [ "$os" == "linux" ] && [ "$hasCUDA" == "true" ] && [ "${inDocker}" == "false" ]; then + correctLineEndings "${sdkScriptsPath}/install_cuDNN.sh" + source "${sdkScriptsPath}/install_cuDNN.sh" + fi # Install python and the required dependencies. - # Note that PaddlePaddle requires Python3.8 or below. Except on RPi? TODO: check 3.9 on all. - if [ ! "${hardware}" == "RaspberryPi" ]; then + # Note that PaddlePaddle requires Python3.8 or below. Except on RPi and Jetson? TODO: check 3.9 on all. + if [ "${systemName}" != "Raspberry Pi" ] && [ "${systemName}" != "Orange Pi" ] && [ "${systemName}" != "Jetson" ]; then setupPython 3.8 "Local" if [ $? -ne 0 ]; then quit 1; fi installPythonPackages 3.8 "${modulePath}" "Local" @@ -74,30 +43,17 @@ else getFromServer "paddleocr-models.zip" "paddleocr" "Downloading OCR models..." if [ $? -ne 0 ]; then quit 1; fi - # We have a patch to apply for linux. - if [ "${platform}" = "linux" ]; then - if [ "${hasCUDA}" != "true" ]; then - # writeLine 'Applying PaddlePaddle patch' - # https://www.codeproject.com/Tips/5347636/Getting-PaddleOCR-and-PaddlePaddle-to-work-in-Wind - # NOT Needed for Ubuntu 20.04 WSL under Win10 - # cp ${modulePath}/patch/paddle2.4.0rc0/image.py ${modulePath}/bin/${platform}/python38/venv/lib/python3.8/site-packages/paddle/dataset/. - - writeLine 'Applying PaddleOCR patch' - # IS needed due to a newer version of Numpy deprecating np.int - cp ${modulePath}/patch/paddleocr2.6.0.1/db_postprocess.py ${modulePath}/bin/${platform}/python38/venv/lib/python3.8/site-packages/paddleocr/ppocr/postprocess/. - fi - fi - - # We have a patch to apply for macOS-arm64 due to numpy upgrade that deprecates np.int that we can't downgrade - if [ "${os}" == "macos" ]; then + if [ "${systemName}" != "Raspberry Pi" ] && [ "${systemName}" != "Orange Pi" ] && [ "${systemName}" != "Jetson" ]; then + # We have a patch to apply for linux and macOS due to a numpy upgrade that + # deprecates np.int that we can't downgrade writeLine 'Applying PaddleOCR patch' - cp ${modulePath}/patch/paddleocr2.6.0.1/db_postprocess.py ${modulePath}/bin/${os}/python38/venv/lib/python3.8/site-packages/paddleocr/ppocr/postprocess/. + cp ${modulePath}/patch/paddleocr-2.6.0.1/db_postprocess.py ${modulePath}/bin/${os}/python38/venv/lib/python3.8/site-packages/paddleocr/ppocr/postprocess/. fi # Installing PaddlePaddle: Gotta do this the hard way for RPi. # Thanks to https://qengineering.eu/install-paddlepaddle-on-raspberry-pi-4.html # NOTE: This, so far, hasn't been working. Sorry. - if [ "${hardware}" == "RaspberryPi" ]; then + if [ "${systemName}" == "Raspberry Pi" ] || [ "${systemName}" == "Orange Pi" ] || [ "${systemName}" == "Jetson" ]; then setupPython 3.9 "Local" if [ $? -ne 0 ]; then quit 1; fi @@ -106,8 +62,6 @@ else installPythonPackages 3.9 "${absoluteAppRootDir}/SDK/Python" "Local" if [ $? -ne 0 ]; then quit 1; fi - popd "${modulePath}" - # a fresh start sudo apt-get update -y sudo apt-get upgrade -y @@ -152,7 +106,7 @@ fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -161,6 +115,7 @@ fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. Linux, macOS, or "Raspberry Pi", Jetson or Docker # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/OCR/modulesettings.json b/src/modules/OCR/modulesettings.json index 20c937bf..12aedf34 100644 --- a/src/modules/OCR/modulesettings.json +++ b/src/modules/OCR/modulesettings.json @@ -3,7 +3,7 @@ "OCR": { "Name": "Optical Character Recognition", - "Version": "1.2", + "Version": "1.4", // Publishing info "Description": "Provides OCR support using the PaddleOCR toolkit", @@ -12,9 +12,11 @@ "LicenseUrl": "http://www.apache.org/licenses/", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-11-01" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-15", "ReleaseNotes": "Updated module settings", "Importance": "Minor" }, + { "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-10", "ReleaseNotes": "PaddlePaddle install more reliable", "Importance": "Minor" } ], // Launch instructions diff --git a/src/modules/OCR/patch/paddle2.3.2.post116/image.py b/src/modules/OCR/patch/paddle2.3.2.post116/image.py deleted file mode 100644 index bdc254c0..00000000 --- a/src/modules/OCR/patch/paddle2.3.2.post116/image.py +++ /dev/null @@ -1,419 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This file contains some common interfaces for image preprocess. -Many users are confused about the image layout. We introduce -the image layout as follows. - -- CHW Layout - - - The abbreviations: C=channel, H=Height, W=Width - - The default layout of image opened by cv2 or PIL is HWC. - PaddlePaddle only supports the CHW layout. And CHW is simply - a transpose of HWC. It must transpose the input image. - -- Color format: RGB or BGR - - OpenCV use BGR color format. PIL use RGB color format. Both - formats can be used for training. Noted that, the format should - be keep consistent between the training and inference period. -""" - -from __future__ import print_function - -import six -import numpy as np -# FIXME(minqiyang): this is an ugly fix for the numpy bug reported here -# https://github.com/numpy/numpy/issues/12497 -if six.PY3: - import subprocess - import sys - import os - interpreter = sys.executable - # Note(zhouwei): if use Python/C 'PyRun_SimpleString', 'sys.executable' - # will be the C++ executable on Windows - #if sys.platform == 'win32' and 'python.exe' not in interpreter: - # interpreter = sys.exec_prefix + os.sep + 'python.exe' - import_cv2_proc = subprocess.Popen( - [interpreter, "-c", "import cv2"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - out, err = import_cv2_proc.communicate() - retcode = import_cv2_proc.poll() - if retcode != 0: - cv2 = None - else: - import cv2 -else: - try: - import cv2 - except ImportError: - cv2 = None -import os -import tarfile -import six.moves.cPickle as pickle - -__all__ = [] - - -def _check_cv2(): - if cv2 is None: - import sys - sys.stderr.write( - '''Warning with paddle image module: opencv-python should be imported, - or paddle image module could NOT work; please install opencv-python first.''' - ) - return False - else: - return True - - -def batch_images_from_tar(data_file, - dataset_name, - img2label, - num_per_batch=1024): - """ - Read images from tar file and batch them into batch file. - - :param data_file: path of image tar file - :type data_file: string - :param dataset_name: 'train','test' or 'valid' - :type dataset_name: string - :param img2label: a dic with image file name as key - and image's label as value - :type img2label: dic - :param num_per_batch: image number per batch file - :type num_per_batch: int - :return: path of list file containing paths of batch file - :rtype: string - """ - batch_dir = data_file + "_batch" - out_path = "%s/%s_%s" % (batch_dir, dataset_name, os.getpid()) - meta_file = "%s/%s_%s.txt" % (batch_dir, dataset_name, os.getpid()) - - if os.path.exists(out_path): - return meta_file - else: - os.makedirs(out_path) - - tf = tarfile.open(data_file) - mems = tf.getmembers() - data = [] - labels = [] - file_id = 0 - for mem in mems: - if mem.name in img2label: - data.append(tf.extractfile(mem).read()) - labels.append(img2label[mem.name]) - if len(data) == num_per_batch: - output = {} - output['label'] = labels - output['data'] = data - pickle.dump( - output, - open('%s/batch_%d' % (out_path, file_id), 'wb'), - protocol=2) - file_id += 1 - data = [] - labels = [] - if len(data) > 0: - output = {} - output['label'] = labels - output['data'] = data - pickle.dump( - output, open('%s/batch_%d' % (out_path, file_id), 'wb'), protocol=2) - - with open(meta_file, 'a') as meta: - for file in os.listdir(out_path): - meta.write(os.path.abspath("%s/%s" % (out_path, file)) + "\n") - return meta_file - - -def load_image_bytes(bytes, is_color=True): - """ - Load an color or gray image from bytes array. - - Example usage: - - .. code-block:: python - - with open('cat.jpg') as f: - im = load_image_bytes(f.read()) - - :param bytes: the input image bytes array. - :type bytes: str - :param is_color: If set is_color True, it will load and - return a color image. Otherwise, it will - load and return a gray image. - :type is_color: bool - """ - assert _check_cv2() is True - - flag = 1 if is_color else 0 - file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8) - img = cv2.imdecode(file_bytes, flag) - return img - - -def load_image(file, is_color=True): - """ - Load an color or gray image from the file path. - - Example usage: - - .. code-block:: python - - im = load_image('cat.jpg') - - :param file: the input image path. - :type file: string - :param is_color: If set is_color True, it will load and - return a color image. Otherwise, it will - load and return a gray image. - :type is_color: bool - """ - assert _check_cv2() is True - - # cv2.IMAGE_COLOR for OpenCV3 - # cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version - # cv2.IMAGE_GRAYSCALE for OpenCV3 - # cv2.CV_LOAD_IMAGE_GRAYSCALE for older OpenCV Version - # Here, use constant 1 and 0 - # 1: COLOR, 0: GRAYSCALE - flag = 1 if is_color else 0 - im = cv2.imread(file, flag) - return im - - -def resize_short(im, size): - """ - Resize an image so that the length of shorter edge is size. - - Example usage: - - .. code-block:: python - - im = load_image('cat.jpg') - im = resize_short(im, 256) - - :param im: the input image with HWC layout. - :type im: ndarray - :param size: the shorter edge size of image after resizing. - :type size: int - """ - assert _check_cv2() is True - - h, w = im.shape[:2] - h_new, w_new = size, size - if h > w: - h_new = size * h // w - else: - w_new = size * w // h - im = cv2.resize(im, (w_new, h_new), interpolation=cv2.INTER_CUBIC) - return im - - -def to_chw(im, order=(2, 0, 1)): - """ - Transpose the input image order. The image layout is HWC format - opened by cv2 or PIL. Transpose the input image to CHW layout - according the order (2,0,1). - - Example usage: - - .. code-block:: python - - im = load_image('cat.jpg') - im = resize_short(im, 256) - im = to_chw(im) - - :param im: the input image with HWC layout. - :type im: ndarray - :param order: the transposed order. - :type order: tuple|list - """ - assert len(im.shape) == len(order) - im = im.transpose(order) - return im - - -def center_crop(im, size, is_color=True): - """ - Crop the center of image with size. - - Example usage: - - .. code-block:: python - - im = center_crop(im, 224) - - :param im: the input image with HWC layout. - :type im: ndarray - :param size: the cropping size. - :type size: int - :param is_color: whether the image is color or not. - :type is_color: bool - """ - h, w = im.shape[:2] - h_start = (h - size) // 2 - w_start = (w - size) // 2 - h_end, w_end = h_start + size, w_start + size - if is_color: - im = im[h_start:h_end, w_start:w_end, :] - else: - im = im[h_start:h_end, w_start:w_end] - return im - - -def random_crop(im, size, is_color=True): - """ - Randomly crop input image with size. - - Example usage: - - .. code-block:: python - - im = random_crop(im, 224) - - :param im: the input image with HWC layout. - :type im: ndarray - :param size: the cropping size. - :type size: int - :param is_color: whether the image is color or not. - :type is_color: bool - """ - h, w = im.shape[:2] - h_start = np.random.randint(0, h - size + 1) - w_start = np.random.randint(0, w - size + 1) - h_end, w_end = h_start + size, w_start + size - if is_color: - im = im[h_start:h_end, w_start:w_end, :] - else: - im = im[h_start:h_end, w_start:w_end] - return im - - -def left_right_flip(im, is_color=True): - """ - Flip an image along the horizontal direction. - Return the flipped image. - - Example usage: - - .. code-block:: python - - im = left_right_flip(im) - - :param im: input image with HWC layout or HW layout for gray image - :type im: ndarray - :param is_color: whether input image is color or not - :type is_color: bool - """ - if len(im.shape) == 3 and is_color: - return im[:, ::-1, :] - else: - return im[:, ::-1] - - -def simple_transform(im, - resize_size, - crop_size, - is_train, - is_color=True, - mean=None): - """ - Simply data argumentation for training. These operations include - resizing, croping and flipping. - - Example usage: - - .. code-block:: python - - im = simple_transform(im, 256, 224, True) - - :param im: The input image with HWC layout. - :type im: ndarray - :param resize_size: The shorter edge length of the resized image. - :type resize_size: int - :param crop_size: The cropping size. - :type crop_size: int - :param is_train: Whether it is training or not. - :type is_train: bool - :param is_color: whether the image is color or not. - :type is_color: bool - :param mean: the mean values, which can be element-wise mean values or - mean values per channel. - :type mean: numpy array | list - """ - im = resize_short(im, resize_size) - if is_train: - im = random_crop(im, crop_size, is_color=is_color) - if np.random.randint(2) == 0: - im = left_right_flip(im, is_color) - else: - im = center_crop(im, crop_size, is_color=is_color) - if len(im.shape) == 3: - im = to_chw(im) - - im = im.astype('float32') - if mean is not None: - mean = np.array(mean, dtype=np.float32) - # mean value, may be one value per channel - if mean.ndim == 1 and is_color: - mean = mean[:, np.newaxis, np.newaxis] - elif mean.ndim == 1: - mean = mean - else: - # elementwise mean - assert len(mean.shape) == len(im) - im -= mean - - return im - - -def load_and_transform(filename, - resize_size, - crop_size, - is_train, - is_color=True, - mean=None): - """ - Load image from the input file `filename` and transform image for - data argumentation. Please refer to the `simple_transform` interface - for the transform operations. - - Example usage: - - .. code-block:: python - - im = load_and_transform('cat.jpg', 256, 224, True) - - :param filename: The file name of input image. - :type filename: string - :param resize_size: The shorter edge length of the resized image. - :type resize_size: int - :param crop_size: The cropping size. - :type crop_size: int - :param is_train: Whether it is training or not. - :type is_train: bool - :param is_color: whether the image is color or not. - :type is_color: bool - :param mean: the mean values, which can be element-wise mean values or - mean values per channel. - :type mean: numpy array | list - """ - im = load_image(filename, is_color) - im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean) - return im diff --git a/src/modules/OCR/patch/paddleocr2.6.0.1/db_postprocess.py b/src/modules/OCR/patch/paddleocr-2.6.0.1/db_postprocess.py similarity index 100% rename from src/modules/OCR/patch/paddleocr2.6.0.1/db_postprocess.py rename to src/modules/OCR/patch/paddleocr-2.6.0.1/db_postprocess.py diff --git a/src/modules/OCR/patch/paddle2.4.0rc0/image.py b/src/modules/OCR/patch/paddlepaddle-2.4.0rc0/image.py similarity index 100% rename from src/modules/OCR/patch/paddle2.4.0rc0/image.py rename to src/modules/OCR/patch/paddlepaddle-2.4.0rc0/image.py diff --git a/src/modules/OCR/patch/paddle2.3.2/image.py b/src/modules/OCR/patch/paddlepaddle-2.4.2/image.py similarity index 94% rename from src/modules/OCR/patch/paddle2.3.2/image.py rename to src/modules/OCR/patch/paddlepaddle-2.4.2/image.py index 51aea8b7..e941dfe8 100644 --- a/src/modules/OCR/patch/paddle2.3.2/image.py +++ b/src/modules/OCR/patch/paddlepaddle-2.4.2/image.py @@ -43,19 +43,21 @@ interpreter = sys.executable # Note(zhouwei): if use Python/C 'PyRun_SimpleString', 'sys.executable' # will be the C++ execubable on Windows - #if sys.platform == 'win32' and 'python.exe' not in interpreter: - # interpreter = sys.exec_prefix + os.sep + 'python.exe' - import_cv2_proc = subprocess.Popen( - [interpreter, "-c", "import cv2"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) + if sys.platform == 'win32' and 'python.exe' not in interpreter: + interpreter = sys.exec_prefix + os.sep + 'python.exe' + import_cv2_proc = subprocess.Popen([interpreter, "-c", "import cv2"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True) out, err = import_cv2_proc.communicate() retcode = import_cv2_proc.poll() if retcode != 0: cv2 = None else: - import cv2 + try: + import cv2 + except ImportError: + cv2 = None else: try: import cv2 @@ -121,10 +123,9 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - pickle.dump( - output, - open('%s/batch_%d' % (out_path, file_id), 'wb'), - protocol=2) + pickle.dump(output, + open('%s/batch_%d' % (out_path, file_id), 'wb'), + protocol=2) file_id += 1 data = [] labels = [] @@ -132,8 +133,9 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - pickle.dump( - output, open('%s/batch_%d' % (out_path, file_id), 'wb'), protocol=2) + pickle.dump(output, + open('%s/batch_%d' % (out_path, file_id), 'wb'), + protocol=2) with open(meta_file, 'a') as meta: for file in os.listdir(out_path): diff --git a/src/modules/OCR/requirements.linux.arm64.txt b/src/modules/OCR/requirements.linux.arm64.txt index 22cf602c..277a4886 100644 --- a/src/modules/OCR/requirements.linux.arm64.txt +++ b/src/modules/OCR/requirements.linux.arm64.txt @@ -10,7 +10,7 @@ paddleocr==2.6.0.1 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddl # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python>=4.6.0 # Installing OpenCV, the Computer Vision library for Python numpy==1.23.3 # Installing NumPy, a package for scientific computing diff --git a/src/modules/OCR/requirements.linux.cuda-no-luck.txt b/src/modules/OCR/requirements.linux.cuda-no-luck.txt index 94c27c27..a3234480 100644 --- a/src/modules/OCR/requirements.linux.cuda-no-luck.txt +++ b/src/modules/OCR/requirements.linux.cuda-no-luck.txt @@ -30,7 +30,7 @@ paddleocr==2.6.0.1 # Installing PaddleOCR, the OCR toolkit based on # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy==1.23.3 # Installing NumPy, a package for scientific computing diff --git a/src/modules/OCR/requirements.linux.txt b/src/modules/OCR/requirements.linux.txt index 9413babe..a8092d9f 100644 --- a/src/modules/OCR/requirements.linux.txt +++ b/src/modules/OCR/requirements.linux.txt @@ -10,7 +10,7 @@ paddleocr==2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddl # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy>=1.23.3 # Installing NumPy, a package for scientific computing diff --git a/src/modules/OCR/requirements.macos.arm64.txt b/src/modules/OCR/requirements.macos.arm64.txt index c4009604..2657639c 100644 --- a/src/modules/OCR/requirements.macos.arm64.txt +++ b/src/modules/OCR/requirements.macos.arm64.txt @@ -12,6 +12,6 @@ numpy # Installing NumPy, a package for scientific computing # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library # end of file \ No newline at end of file diff --git a/src/modules/OCR/requirements.macos.txt b/src/modules/OCR/requirements.macos.txt index 7ac3680e..7b1d3152 100644 --- a/src/modules/OCR/requirements.macos.txt +++ b/src/modules/OCR/requirements.macos.txt @@ -6,7 +6,7 @@ paddleocr==2.6.0.1 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddl # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy # Installing NumPy, a package for scientific computing diff --git a/src/modules/OCR/requirements.txt b/src/modules/OCR/requirements.txt index aa87a5d4..3d8680b0 100644 --- a/src/modules/OCR/requirements.txt +++ b/src/modules/OCR/requirements.txt @@ -1,15 +1,11 @@ -#! Python3.9 +#! Python3.7 -# We install a specific version of PaddlePaddle because we have a patch to apply -paddlepaddle==2.3.2 # Installing PaddelPaddle, the Deep Learning platform - -# PaddleOCR is famously painful to install. This works well for Windows, no GPU, -# using paddle2.3.2 -paddleocr>=2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddle +paddlepaddle==2.5.0 # Installing PaddelPaddle, the Deep Learning platform +paddleocr==2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddle # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy # Installing NumPy, a package for scientific computing diff --git a/src/modules/OCR/requirements.windows.cuda.txt b/src/modules/OCR/requirements.windows.cuda.txt index 0e8fa76c..00920008 100644 --- a/src/modules/OCR/requirements.windows.cuda.txt +++ b/src/modules/OCR/requirements.windows.cuda.txt @@ -1,14 +1,11 @@ -#! Python3.9 +#! Python3.7 -# PaddlePaddle is painful to install. This works on Windows / CUDA ---find-links https://www.paddlepaddle.org.cn/whl/windows/mkl/avx/stable.html -paddlepaddle-gpu==2.3.2.post116 # Installing PaddelPaddle, the R and D deep learning platform - -paddleocr==2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddle +paddlepaddle-gpu==2.5.0 # Installing PaddlePaddle, Parallel Distributed Deep Learning +paddleocr==2.6.1.3 # Installing PaddleOCR, the OCR toolkit based on PaddlePaddle # Do these after paddlepaddle because paddlepaddle requires specific versions imutils # Installing imutils, the image utilities library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library opencv-python # Installing OpenCV, the Computer Vision library for Python numpy # Installing NumPy, a package for scientific computing diff --git a/src/modules/ObjectDetectionCoral/ObjectDetectionCoral.pyproj b/src/modules/ObjectDetectionCoral/ObjectDetectionCoral.pyproj new file mode 100644 index 00000000..62b5aeb9 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/ObjectDetectionCoral.pyproj @@ -0,0 +1,74 @@ + + + + Debug + ObjectDetectionCoral + 2.0 + {470d3417-36a4-49a4-b719-496477fa92fb} + + objectdetection_coral_adapter.py + + . + . + {888888a0-9f3d-457c-b088-3a5042f75d52} + Standard Python launcher + MSBuild|env|$(MSBuildProjectFullPath) + False + False + True + + + + + + 10.0 + + + + + + + + + + + + + + + + + + + + + modulesettings.json + + + modulesettings.json + + + + + + + requirements.txt + + + requirements.txt + + + + + + env + 3.7 + env (Python 3.7 (64-bit)) + Scripts\python.exe + Scripts\pythonw.exe + PYTHONPATH + X64 + + + + \ No newline at end of file diff --git a/src/modules/ObjectDetectionCoral/__init__.py b/src/modules/ObjectDetectionCoral/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/__init__.py @@ -0,0 +1 @@ + diff --git a/src/modules/ObjectDetectionCoral/coral_util.py b/src/modules/ObjectDetectionCoral/coral_util.py new file mode 100644 index 00000000..158949b7 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/coral_util.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" + TensorFlow Lite Utils. + + Copyright (c) 2020 Nobuo Tsukamoto + Modified 2023 Chris Maunder @ CodeProject + + This software is released under the MIT License. + See the LICENSE file in the project root for more information. +""" + +from ctypes import * +from typing import Tuple +import numpy as np + +def make_interpreter(tpu_model_file: str, cpu_model_file: str = None, + num_of_threads: int = 1) -> Tuple[any, bool]: + """ make tf-lite interpreter. + + If tpu_model_file is provided, but no cpu_model_file, then we assume the + caller has determined the libraries and hardware that is available and has + supplied a suitable file. Otherwise, this method will assume the model file + is an edgetpu model but will sniff libraries and hardware and fallback to + cpu_model_file if edge TPU support isn't available. + + Args: + tpu_model_file: Model file path for TPUs. + cpu_model_file: Model file path for CPUs. + num_of_threads: Num of threads. + + Return: + tf-lite interpreter. + """ + + # First determine if we have TensorFlow-Lite runtime installed, or the whole Tensorflow + # In either case we're looking to load TFLite models + try: + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate + + # Initially try loading EdgeTPU delegates for the Coral TPU. If this fails fallback. + # For Coral edge TPU you load up a delegate that will handle the TPU computations, and + # pass that to the Interpreter constructor. Everything else is vanilla TFLite. + # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + delegates = None + + # Only try and load delegates if we're trying to use a TPU + if tpu_model_file: + try: + import platform + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + delegates = [load_delegate(delegate)] + except Exception as ex: + pass + + interpreter = None + edge_tpu = False + + if delegates and tpu_model_file: + try: + # TensorFlow-Lite loading a TF-Lite TPU model + # CRASH: On Windows, the interpreter.__init__ method accepts experimental + # delegates. These are used in self._interpreter.ModifyGraphWithDelegate, + # which fails on Windows + interpreter = Interpreter(model_path=tpu_model_file, experimental_delegates=delegates) + edge_tpu = True + except Exception as ex: + # Fall back + if cpu_model_file: + interpreter = Interpreter(model_path=cpu_model_file) + else: + # TensorFlow loading a TF-Lite CPU model + if cpu_model_file: + interpreter = Interpreter(model_path=cpu_model_file) + + return (interpreter, edge_tpu) + + """ + if "edgetpu.tflite" in model_file and EDGETPU_SHARED_LIB: + print("EdgeTpu delegate") + return tflite.Interpreter( + model_path=model_file, + experimental_delegates=[tflite.load_delegate(EDGETPU_SHARED_LIB)], + ) + elif delegate_library is not None: + print("{} delegate".format(os.path.splitext(os.path.basename(delegate_library))[0])) + option = {"backends": "CpuAcc", + "logging-severity": "info", + "number-of-threads": str(num_of_threads), + "enable-fast-math":"true"} + print(option) + return tflite.Interpreter( + model_path=model_file, + experimental_delegates=[ + tflite.load_delegate(delegate_library, options=option) + ], + ) + else: + return tflite.Interpreter(model_path=model_file, num_threads=num_of_threads) + """ + +def set_input_tensor(interpreter, image): + """ Sets the input tensor. + + Args: + interpreter: Interpreter object. + image: a function that takes a (width, height) tuple, + and returns an RGB image resized to those dimensions. + """ + tensor_index = interpreter.get_input_details()[0]["index"] + input_tensor = interpreter.tensor(tensor_index)()[0] + input_tensor[:, :] = image.copy() + +def get_output_tensor(interpreter, index): + """ Returns the output tensor at the given index. + + Args: + interpreter + index + + Returns: + tensor + """ + output_details = interpreter.get_output_details()[index] + tensor = np.squeeze(interpreter.get_tensor(output_details["index"])) + return tensor + +def get_output_results(interpreter, field: str): + """ Returns the output tensor at the given index. + + Args: + interpreter + index + + Returns: + tensor + """ + tensor = None + + for index in range(4): + output_details = interpreter.get_output_details()[index] + tensor = interpreter.get_tensor(output_details["index"]) + dimensions = np.ndim(tensor) + + if dimensions == 3 and field == "boxes": + break + + if dimensions == 1 and field == "count": + break + + if dimensions == 2: + if tensor.max() > 1.0 and field == "classes": + break + if tensor.max() <= 1.0 and field == "scores": + break + + return np.squeeze(tensor) + diff --git a/src/modules/ObjectDetectionCoral/imageclassification_coral.py b/src/modules/ObjectDetectionCoral/imageclassification_coral.py new file mode 100644 index 00000000..0137c16a --- /dev/null +++ b/src/modules/ObjectDetectionCoral/imageclassification_coral.py @@ -0,0 +1,272 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +r"""Example using PyCoral to classify a given image using an Edge TPU. + +To run this code, you must attach an Edge TPU attached to the host and +install the Edge TPU runtime (`libedgetpu.so`) and `tflite_runtime`. For +device setup instructions, see coral.ai/docs/setup. + +Example usage: +``` +bash examples/install_requirements.sh classify_image.py + +python3 examples/classify_image.py \ + --model test_data/mobilenet_v2_1.0_224_inat_bird_quant_edgetpu.tflite \ + --labels test_data/inat_bird_labels.txt \ + --input test_data/parrot.jpg +``` + +Running this directly in Windows from src\runtimes\bin\windows\python37: + + cd \src\runtimes\bin\windows\python37 + python.exe coral\pycoral\examples\classify_image.py --model coral\pycoral\test_data\mobilenet_v2_1.0_224_inat_bird_quant.tflite --labels coral\pycoral\test_data\inat_bird_labels.txt --input coral\pycoral\test_data\parrot.jpg + +""" + +import argparse +from datetime import datetime +import time + +import numpy as np +from PIL import Image + +from pycoral.adapters import common +from pycoral.adapters import classify +from pycoral.utils.dataset import read_label_file +from pycoral.utils.edgetpu import make_interpreter + +interpreter_lifespan_secs = 3600 # Refresh the interpreter once an hour + +interpreter = None # The model interpreter +interpreter_created = None # When was the interpreter created? +labels = None # set of labels for this model + + +from options import Options + +def init_classify(options: Options): + + global interpreter + global interpreter_created + global labels + + # edge_tpu = options.support_GPU # Assuming this correctly tests for Coral TPU + # model_file = options.model_tpu_file if edge_tpu else options.model_cpu_file + + # Read labels + labels = read_label_file(options.label_file) if options.label_file else {} + + # Initialize TF-Lite interpreter. + try: + interpreter = make_interpreter(options.model_tpu_file, device=None, delegate=None) + except Exception as ex: + print("Error creating interpreter: " + str(ex)) + interpreter = None + return; + + interpreter.allocate_tensors() + + interpreter_created = datetime.now() + + # Model must be uint8 quantized + if common.input_details(interpreter, 'dtype') != np.uint8: + raise ValueError('Only support uint8 input type.') + + # Get input and output tensors. + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + + print(f"Debug: Input details: {input_details[0]}\n") + print(f"Debug: Output details: {output_details[0]}\n") + + +def do_classify(options: Options, img: Image, score_threshold: float = 0.5): + + global interpreter + global interpreter_created + + mean = 128 # args.input_mean + std = 128 # args.input_std + top_k = 1 + + # Once an hour, refresh the interpreter + if interpreter != None: + seconds_since_created = (datetime.now() - interpreter_created).total_seconds() + if seconds_since_created > interpreter_lifespan_secs: + print("Info: Refreshing the Tensorflow Interpreter") + interpreter = None + + if interpreter == None: + init_detect(options) + + if interpreter == None: + return { + "success" : False, + "error" : "Unable to create interpreter", + "count" : 0, + "predictions" : [], + "inferenceMs" : 0 + } + + w,h = img.size + print("Debug: Input(height, width): ", h, w) + + size = common.input_size(interpreter) + resize_im = img.convert('RGB').resize(size, Image.ANTIALIAS) + + # numpy_image = np.array(img) + # input_im = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2RGB) + # resize_im = cv2.resize(input_im, size) + + # Image data must go through two transforms before running inference: + # 1. normalization: f = (input - mean) / std + # 2. quantization: q = f / scale + zero_point + # The following code combines the two steps as such: + # q = (input - mean) / (std * scale) + zero_point + # However, if std * scale equals 1, and mean - zero_point equals 0, the input + # does not need any preprocessing (but in practice, even if the results are + # very close to 1 and 0, it is probably okay to skip preprocessing for better + # efficiency; we use 1e-5 below instead of absolute zero). + + params = common.input_details(interpreter, 'quantization_parameters') + scale = params['scales'] + zero_point = params['zero_points'] + + if abs(scale * std - 1) < 1e-5 and abs(mean - zero_point) < 1e-5: + # Input data does not require preprocessing. + common.set_input(interpreter, resize_im) + else: + # Input data requires preprocessing + normalized_input = (np.asarray(resize_im) - mean) / (std * scale) + zero_point + np.clip(normalized_input, 0, 255, out=normalized_input) + common.set_input(interpreter, normalized_input.astype(np.uint8)) + + # Run inference + start_inference_time = time.perf_counter() + interpreter.invoke() + inferenceMs = int((time.perf_counter() - start_inference_time) * 1000) + + # Get output + classes = classify.get_classes(interpreter, top_k, score_threshold) + objs = [] + for c in classes: + detection = { + "class_id": c.id, + "score": c.score, + "bounding_box": (0,0,0,0) + } + objs.append(detection) + + # Generate results + outputs = [] + for i, obj in enumerate(objs): + class_id = int(obj["class_id"]) + caption = labels.get(class_id, class_id) if class_id in labels else class_id + score = float(obj["score"]) + + if score >= score_threshold: + detection = { + "confidence": score, + "label": caption + } + + outputs.append(detection) + + return { + "success" : True, + "count" : len(outputs), + "predictions" : outputs, + "inferenceMs" : inferenceMs + } + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '-m', '--model', required=True, help='File path of .tflite file.') + parser.add_argument( + '-i', '--input', required=True, help='Image to be classified.') + parser.add_argument( + '-l', '--labels', help='File path of labels file.') + parser.add_argument( + '-k', '--top_k', type=int, default=1, + help='Max number of classification results') + parser.add_argument( + '-t', '--threshold', type=float, default=0.0, + help='Classification score threshold') + parser.add_argument( + '-c', '--count', type=int, default=5, + help='Number of times to run inference') + parser.add_argument( + '-a', '--input_mean', type=float, default=128.0, + help='Mean value for input normalization') + parser.add_argument( + '-s', '--input_std', type=float, default=128.0, + help='STD value for input normalization') + args = parser.parse_args() + + labels = read_label_file(args.labels) if args.labels else {} + + interpreter = make_interpreter(*args.model.split('@')) + interpreter.allocate_tensors() + + # Model must be uint8 quantized + if common.input_details(interpreter, 'dtype') != np.uint8: + raise ValueError('Only support uint8 input type.') + + size = common.input_size(interpreter) + image = Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS) + + # Image data must go through two transforms before running inference: + # 1. normalization: f = (input - mean) / std + # 2. quantization: q = f / scale + zero_point + # The following code combines the two steps as such: + # q = (input - mean) / (std * scale) + zero_point + # However, if std * scale equals 1, and mean - zero_point equals 0, the input + # does not need any preprocessing (but in practice, even if the results are + # very close to 1 and 0, it is probably okay to skip preprocessing for better + # efficiency; we use 1e-5 below instead of absolute zero). + params = common.input_details(interpreter, 'quantization_parameters') + scale = params['scales'] + zero_point = params['zero_points'] + mean = args.input_mean + std = args.input_std + if abs(scale * std - 1) < 1e-5 and abs(mean - zero_point) < 1e-5: + # Input data does not require preprocessing. + common.set_input(interpreter, image) + else: + # Input data requires preprocessing + normalized_input = (np.asarray(image) - mean) / (std * scale) + zero_point + np.clip(normalized_input, 0, 255, out=normalized_input) + common.set_input(interpreter, normalized_input.astype(np.uint8)) + + # Run inference + print('----INFERENCE TIME----') + print('Note: The first inference on Edge TPU is slow because it includes', + 'loading the model into Edge TPU memory.') + for _ in range(args.count): + start = time.perf_counter() + interpreter.invoke() + inference_time = time.perf_counter() - start + classes = classify.get_classes(interpreter, args.top_k, args.threshold) + print('%.1fms' % (inference_time * 1000)) + + print('-------RESULTS--------') + for c in classes: + print('%s: %.5f' % (labels.get(c.id, c.id), c.score)) + +if __name__ == '__main__': + main() diff --git a/src/modules/ObjectDetectionCoral/imageclassification_coral_adapter.py b/src/modules/ObjectDetectionCoral/imageclassification_coral_adapter.py new file mode 100644 index 00000000..bdedc2c4 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/imageclassification_coral_adapter.py @@ -0,0 +1,128 @@ +# Import our general libraries +import sys +import time +import threading + +# Import the CodeProject.AI SDK. This will add to the PATH var for future imports +sys.path.append("../../SDK/Python") +from common import JSON +from request_data import RequestData +from module_runner import ModuleRunner + +# Import the method of the module we're wrapping +from options import Options +from PIL import UnidentifiedImageError, Image + +# Import the method of the module we're wrapping +from imageclassification_coral import init_classify, do_classify + +opts = Options() +sem = threading.Semaphore() + +class CoralClassifier_adapter(ModuleRunner): + + # async + def initialise(self) -> None: + + if self.support_GPU: + self.support_GPU = self.hasCoralTPU + + if self.support_GPU: + print("Edge TPU detected") + self.execution_provider = "TPU" + # else: + # opts.model_tpu_file = None # disable TPU + + init_classify(opts) + + #async + def process(self, data: RequestData) -> JSON: + + # The route to here is /v1/vision/classify + + if data.command == "list-custom": # list all models available + return { "success": True, "models": [ 'MobileNet SSD'] } + + if data.command == "classify" or data.command == "custom": + threshold: float = float(data.get_value("min_confidence", opts.min_confidence)) + img: Image = data.get_image(0) + + # response = await self.do_classification(img, threshold) + response = self.do_classification(img, threshold) + else: + # await self.report_error_async(None, __file__, f"Unknown command {data.command}") + self.report_error(None, __file__, f"Unknown command {data.command}") + response = { "success": False, "error": "unsupported command" } + + return response + + + # async + def do_classification(self, img: any, score_threshold: float): + + start_process_time = time.perf_counter() + + try: + + # An attempt to fix "RuntimeError: There is at least 1 reference to + # internal data in the interpreter in the form of a numpy array or + # slice. Be sure to only hold the function returned from tensor() if + # you are using raw data access. + if not sem.acquire(timeout=1): + return { + "success" : False, + "predictions" : [], + "message" : "The interpreter is in use. Please try again later", + "count" : 0, + "processMs" : int((time.perf_counter() - start_process_time) * 1000), + "inferenceMs" : 0 + } + + result = do_classify(opts, img, score_threshold) + sem.release() + + if not result['success']: + return { + "success" : False, + "predictions" : [], + "message" : '', + "error" : result["error"] if "error" in result else "Unable to perform classification", + "count" : 0, + "processMs" : int((time.perf_counter() - start_process_time) * 1000), + "inferenceMs" : result['inferenceMs'] + } + + predictions = result["predictions"] + if len(predictions) > 3: + message = 'Found ' + (', '.join(det["label"] for det in predictions[0:3])) + "..." + elif len(predictions) > 0: + message = 'Found ' + (', '.join(det["label"] for det in predictions)) + elif "error" in result: + message = result["error"] + else: + message = "No objects found" + + # print(message) + + return { + "success" : result['success'], + "predictions" : result['predictions'], + "message" : message, + "count" : result["count"], + "processMs" : int((time.perf_counter() - start_process_time) * 1000), + "inferenceMs" : result['inferenceMs'] + } + + except UnidentifiedImageError as img_ex: + # await self.report_error_async(img_ex, __file__, "The image provided was of an unknown type") + self.report_error(img_ex, __file__, "The image provided was of an unknown type") + return { "success": False, "error": "invalid image file" } + + except Exception as ex: + # await self.report_error_async(ex, __file__) + self.report_error(ex, __file__) + return { "success": False, "error": "Error occurred on the server"} + + +if __name__ == "__main__": + CoralClassifier_adapter().start_loop() diff --git a/src/modules/ObjectDetectionCoral/install.bat b/src/modules/ObjectDetectionCoral/install.bat new file mode 100644 index 00000000..0e6be55c --- /dev/null +++ b/src/modules/ObjectDetectionCoral/install.bat @@ -0,0 +1,94 @@ +:: Development mode setup script :::::::::::::::::::::::::::::::::::::::::::::: +:: +:: ObjectDetection (Coral) +:: +:: This script is only called from ..\..\src\setup.bat + +@if "%1" NEQ "install" ( + echo This script is only called from ..\..\src\setup.bat + @pause + @goto:eof +) + +rem set verbosity=loud + +REM Python setup +call "%sdkScriptsPath%\utils.bat" SetupPython 3.7 "Local" +if errorlevel 1 exit /b 1 + +REM Do SDK first, since it's a little fussy +call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.7 "%absoluteAppRootDir%\SDK\Python" "Local" +if errorlevel 1 exit /b 1 + +call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.7 "%modulePath%" "Local" +if errorlevel 1 exit /b 1 + +if not exist edgetpu_runtime ( + call "%sdkScriptsPath%\utils.bat" GetFromServer "edgetpu_runtime_20221024.zip" "." "Downloading edge TPU runtime..." + + rem call "%sdkScriptsPath%\utils.bat" ExtractToDirectory "edgetpu_runtime_20221024.zip" + rem move edgetpu_runtime_20221024\edgetpu_runtime edgetpu_runtime + rem rmdir edgetpu_runtime_20221024 +) + +if exist edgetpu_runtime ( + call "!sdkScriptsPath!\utils.bat" WriteLine "*** You may need to run !modulePath!\edgetpu_runtime\install.bat to complete this process. Attempting to run this script now." "!color_info!" + + pushd edgetpu_runtime + call install.bat + popd +) + +:: Download the MobileNet TFLite models and store in /assets +call "%sdkScriptsPath%\utils.bat" GetFromServer "objectdetect-coral-models.zip" "assets" "Downloading MobileNet models..." +if errorlevel 1 exit /b 1 + + +:: -- Install script cheatsheet -- +:: +:: Variables available: +:: +:: absoluteAppRootDir - the root path of the app (eg: C:\Program Files]\CodeProject\AI\) +:: sdkScriptsPath - the path to the installation utility scripts (%rootPath%\src\SDK\Scripts) +:: downloadPath - the path to where downloads will be stored (%rootPath%\src\downloads) +:: runtimesPath - the path to the installed runtimes (%rootPath%\src\runtimes) +:: modulesPath - the path to all the AI modules (%rootPath%\src\modules) +:: moduleDir - the name of the directory containing this module +:: modulePath - the path to this module (%modulesPath%\%moduleDir%) +:: os - "windows" +:: architecture - "x86_64" or "arm64" +:: platform - "windows" or "windows-arm64" +:: systemName - "Windows" +:: verbosity - quiet, info or loud. Use this to determines the noise level of output. +:: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. +:: GetFromServer will honour this value. Do it yourself for DownloadAndExtract +:: +:: Methods available (call by 'call %sdkScriptsPath%\utils.bat ') +:: +:: Write text [foreground [background]] (eg call %sdkScriptsPath%\utils.bat WriteLine "Hi" "green") +:: WriteLine text [foreground [background]] +:: +:: GetFromServer filename moduleAssetDir message +:: filename - Name of the compressed archive to be downloaded +:: moduleAssetDir - Name of folder inthe module's directory where archive will be extracted +:: message - Message to display during download +:: +:: DownloadAndExtract storageUrl filename downloadPath dirNameToSave message +:: storageUrl - Url that holds the compressed archive to Download +:: filename - Name of the compressed archive to be downloaded +:: downloadPath - Path to where the downloaded compressed archive should be downloaded +:: dirNameToSave - name of directory, relative to downloadPath, where contents of archive +:: will be extracted and saved +:: message - Message to display during download +:: +:: SetupPython Version [install-location] +:: Version - version number of python to setup. 3.7 and 3.9 currently supported. A virtual +:: environment will be created in the module's local folder if install-location is +:: "Local", otherwise in %runtimesPath%/bin/windows/python/venv. +:: install-location - [optional] "Local" or "Shared" (see above) +:: +:: InstallPythonPackages Version requirements-file-directory [install-location] +:: Version - version number, as per SetupPython +:: requirements-file-directory - directory containing the requirements.txt file +:: install-location - [optional] "Local" (installed in the module's local folder) or +:: "Shared" (installed in the shared runtimes/bin directory) diff --git a/src/modules/ObjectDetectionCoral/install.sh b/src/modules/ObjectDetectionCoral/install.sh new file mode 100644 index 00000000..b38e30dc --- /dev/null +++ b/src/modules/ObjectDetectionCoral/install.sh @@ -0,0 +1,178 @@ +# Development mode setup script :::::::::::::::::::::::::::::::::::::::::::::: +# +# ObjectDetection (Coral) +# +# This script is called from the ObjectDetectionCoral directory using: +# +# bash ../../setup.sh +# +# The setup.sh script will find this install.sh file and execute it. + +if [ "$1" != "install" ]; then + read -t 3 -p "This script is only called from: bash ../../setup.sh" + echo + exit 1 +fi + +# verbosity="loud" + +# Python setup first + +setupPython 3.9 "Local" +installPythonPackages 3.9 "${modulePath}" "Local" +installPythonPackages 3.9 "${absoluteAppRootDir}/SDK/Python" "Local" + +# Now the supporting libraries + +if [ "${systemName}" == "Raspberry Pi" ] || [ "${systemName}" == "Orange Pi" ] || \ + [ "${systemName}" == "Jetson" ]; then + + if [[ $EUID -ne 0 ]]; then + writeLine "=================================================================================" $color_error + writeLine "Please run: sudo apt install libopenblas-dev libblas-dev m4 cmake cython python3-dev python3-yaml python3-setuptools " $color_info + writeLine "to complete the setup for ObjectDetectionCoral" $color_info + writeLine "=================================================================================" $color_error + else + sudo apt install libopenblas-dev libblas-dev m4 cmake cython python3-dev python3-yaml python3-setuptools -y + fi +fi + +if [ "$os" == "linux" ]; then + + write "Ensuring curl is installed (just in case)..." $color_mute + apt-get install curl -y >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" + + # Add the Debian package repository to your system + echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list + + if [ ! -d "${downloadPath}" ]; then mkdir -p "${downloadPath}"; fi + if [ ! -d "${downloadPath}/Coral" ]; then mkdir -p "${downloadPath}/Coral"; fi + pushd "${downloadPath}/Coral" >/dev/null 2>/dev/null + + write "Downloading signing keys..." $color_mute + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg -s --output apt-key.gpg >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" + + write "Installing signing keys..." $color_mute + # NOTE: 'add' is deprecated. We should, instead, name apt-key.gpg as coral.ai-apt-key.gpg and + # place it directly in the /etc/apt/trusted.gpg.d/ directory + sudo apt-key add apt-key.gpg >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" + + popd "${downloadPath}/Coral" >/dev/null 2>/dev/null + + if [[ $EUID -ne 0 ]]; then + writeLine "=================================================================================" $color_error + writeLine "Please run the following commands to complete the setup for ObjectDetectionCoral:" $color_info + writeLine "sudo apt-get update && sudo apt-get install libedgetpu1-std" $color_info + writeLine "=================================================================================" $color_error + else + # Install the Edge TPU runtime (standard, meaning half speed, or max, meaning full speed) + write "Installing libedgetpu1-std (the non-desk-melting version of libedgetpu1)..." $color_mute + sudo apt-get update -y >/dev/null 2>/dev/null & + spin $! + sudo apt-get install libedgetpu1-std -y >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" + + # BE CAREFUL. If you want your TPU to go to 11 and choose 'max' you may burn a hole in your desk + # sudo apt-get update && apt-get install libedgetpu1-max + fi + +elif [ "$os" == "macos" ]; then + + # brew install doesn't seem to be enough. Macports gets it right + if ! command -v /opt/local/bin/port >/dev/null; then + writeLine "Please install Macports from https://www.macports.org/install.php before you run this script" "$color_success" + quit 1 + fi + + # curl -LO https://github.com/google-coral/libedgetpu/releases/download/release-grouper/edgetpu_runtime_20221024.zip + # We have modified the install.sh script in this zip so it forces the install of the throttled version + getFromServer "edgetpu_runtime_20221024.zip" "" "Downloading edge TPU runtime..." + + unzip edgetpu_runtime_20221024.zip + pushd edgetpu_runtime + bash install.sh + + # For whatever reason the libs don't seem to be getting put in place, so do this manually + sudo cp edgetpu_runtime/libedgetpu/throttled/darwin_x86_64/libedgetpu.1.0.dylib . + # sudo cp edgetpu_runtime/libedgetpu/throttled/darwin_x86_64/libedgetpu.1.dylib . + popd + + venvPath="${modulePath}/bin/${os}/python39/venv" + packagesPath="${venvPath}/lib/python3.9/site-packages/" + + if [ "$os_name" == "Big Sur" ] && [ "$platform" == "macos" ]; then # macOS 11.x on Intel + "${venvPath}/bin/python" -m pip install tflite-runtime==2.5.0.post1 pycoral --extra-index-url https://google-coral.github.io/py-repo/ + elif [ "$os_name" == "Monterey" ] && [ "$platform" == "macos-arm64" ]; then # macOS 12.x on Apple Silicon + "${venvPath}/bin/python" -m pip install tflite-runtime==2.5.0.post1 pycoral --extra-index-url https://google-coral.github.io/py-repo/ + else + "${venvPath}/bin/python" -m pip install tflite-runtime==2.5.0.post1 pycoral --extra-index-url https://google-coral.github.io/py-repo/ + fi +fi + +# Download the MobileNet TFLite models and store in /assets +getFromServer "objectdetect-coral-models.zip" "assets" "Downloading MobileNet models..." +if [ $? -ne 0 ]; then quit 1; fi + + +# -- Install script cheatsheet -- +# +# Variables available: +# +# absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) +# downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) +# runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) +# modulesPath - the path to all the AI modules ($rootPath/src/modules) +# moduleDir - the name of the directory containing this module +# modulePath - the path to this module ($modulesPath/$moduleDir) +# os - "linux" or "macos" +# architecture - "x86_64" or "arm64" +# platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" +# verbosity - quiet, info or loud. Use this to determines the noise level of output. +# forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. +# getFromServer will honour this value. Do it yourself for downloadAndExtract +# +# Methods available +# +# write text [foreground [background]] (eg write "Hi" "green") +# writeLine text [foreground [background]] +# Download storageUrl downloadPath filename moduleDir message +# storageUrl - Url that holds the compressed archive to Download +# downloadPath - Path to where the downloaded compressed archive should be downloaded +# filename - Name of the compressed archive to be downloaded +# dirNameToSave - name of directory, relative to downloadPath, where contents of archive +# will be extracted and saved +# +# getFromServer filename moduleAssetDir message +# filename - Name of the compressed archive to be downloaded +# moduleAssetDir - Name of folder in module's directory where archive will be extracted +# message - Message to display during download +# +# downloadAndExtract storageUrl filename downloadPath dirNameToSave message +# storageUrl - Url that holds the compressed archive to Download +# filename - Name of the compressed archive to be downloaded +# downloadPath - Path to where the downloaded compressed archive should be downloaded +# dirNameToSave - name of directory, relative to downloadPath, where contents of archive +# will be extracted and saved +# message - Message to display during download +# +# setupPython Version [install-location] +# Version - version number of python to setup. 3.8 and 3.9 currently supported. A virtual +# environment will be created in the module's local folder if install-location is +# "Local", otherwise in $runtimesPath/bin/$platform/python/venv. +# install-location - [optional] "Local" or "Shared" (see above) +# +# installPythonPackages Version requirements-file-directory +# Version - version number, as per SetupPython +# requirements-file-directory - directory containing the requirements.txt file +# install-location - [optional] "Local" (installed in the module's local venv) or +# "Shared" (installed in the shared $runtimesPath/bin venv folder) \ No newline at end of file diff --git a/src/modules/ObjectDetectionCoral/label_util.py b/src/modules/ObjectDetectionCoral/label_util.py new file mode 100644 index 00000000..64d9aa24 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/label_util.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" + Label util function. + + Copyright (c) 2019 Nobuo Tsukamoto + + This software is released under the MIT License. + See the LICENSE file in the project root for more information. +""" + +import numpy as np +import re + +def create_pascal_label_colormap(): + """ Creates a label colormap used in PASCAL VOC segmentation benchmark. + + Returns: + A Colormap for visualizing segmentation results. + """ + colormap = np.zeros((256, 3), dtype=np.uint8) + ind = np.arange(256, dtype=np.uint8) + + for shift in reversed(range(8)): + for channel in range(3): + colormap[:, channel] |= ((ind >> channel) & 1) << shift + ind >>= 3 + return colormap + + +def label_to_color_image(colormap, label): + """ Adds color defined by the dataset colormap to the label. + + Args: + colormap: A Colormap for visualizing segmentation results. + label: A 2D array with integer type, storing the segmentation label. + + Returns: + result: A 2D array with floating type. The element of the array + is the color indexed by the corresponding element in the input label + to the PASCAL color map. + + Raises: + ValueError: If label is not of rank 2 or its value is larger than color + map maximum entry. + """ + if label.ndim != 2: + raise ValueError("Expect 2-D input label") + + if np.max(label) >= len(colormap): + raise ValueError("label value too large.") + + return colormap[label] + + +def read_label_file(file_path): + """ Function to read labels from text files. + + Args: + file_path: File path to labels. + """ + with open(file_path, 'r', encoding='utf-8') as f: + lines = f.readlines() + ret = {} + for row_number, content in enumerate(lines): + pair = re.split(r'[:\s]+', content.strip(), maxsplit=1) + if len(pair) == 2 and pair[0].strip().isdigit(): + ret[int(pair[0])] = pair[1].strip() + else: + ret[row_number] = content.strip() + return ret \ No newline at end of file diff --git a/src/modules/ObjectDetectionCoral/modulesettings.docker.build.json b/src/modules/ObjectDetectionCoral/modulesettings.docker.build.json new file mode 100644 index 00000000..dbb422a8 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/modulesettings.docker.build.json @@ -0,0 +1,22 @@ +{ + "Modules": { + "ObjectDetectionCoral": { + /* In Docker, when building the image, we copy over the code and config files, which includes + this special modulesettings.docker.build.json file that enables us to point the Python + interpreter to the shared, pre-installed python venv in the Docker image. This file will be + renamed to modulesettings.docker.json during the Docker image build process. + + If this module were downloaded and installed during runtime, then the usual + modulesettings.linux.json would be loaded, followed by the modulesettings.docker.json file. + The modulesettings.docker.build.json file would be ignored. Downloaded modules would have + their Python interpreter point to a Local install of Python, not the shared, so that Python + packages can be installed and persisted. + */ + + // This NEEDS to be 'shared' for docker pre-installed + "RuntimeLocation": "Shared", // Can be Local or Shared + + "PreInstalled": "true" + } + } +} \ No newline at end of file diff --git a/src/modules/ObjectDetectionCoral/modulesettings.docker.build.rpi64.json b/src/modules/ObjectDetectionCoral/modulesettings.docker.build.rpi64.json new file mode 100644 index 00000000..210a69c2 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/modulesettings.docker.build.rpi64.json @@ -0,0 +1,29 @@ +{ + "Modules": { + "ObjectDetectionCoral": { + /* In Docker, when building the image, we copy over the code and config files, which includes + this special modulesettings.docker.build.json file that enables us to point the Python + interpreter to the shared, pre-installed python venv in the Docker image. This file will be + renamed to modulesettings.docker.json during the Docker image build process. + + If this module were downloaded and installed during runtime, then the usual + modulesettings.linux.json would be loaded, followed by the modulesettings.docker.json file. + The modulesettings.docker.build.json file would be ignored. Downloaded modules would have + their Python interpreter point to a Local install of Python, not the shared, so that Python + packages can be installed and persisted. + */ + + // This NEEDS to be 'shared' for docker pre-installed + "RuntimeLocation": "Shared", // Can be Local or Shared + + // This is usable on an RPi, but it needs better modules, and hopefully Coral.AI device, to + // be truly fast + "AutoStart": true, + "PreInstalled": "true", + + "EnvironmentVariables": { + "MODEL_SIZE": "Tiny" + } + } + } +} \ No newline at end of file diff --git a/src/modules/ObjectDetectionCoral/modulesettings.json b/src/modules/ObjectDetectionCoral/modulesettings.json new file mode 100644 index 00000000..dc4a84ed --- /dev/null +++ b/src/modules/ObjectDetectionCoral/modulesettings.json @@ -0,0 +1,95 @@ +{ + "Modules": { + "ObjectDetectionCoral": { + "Name": "ObjectDetection (Coral)", + "Version": "1.3", + + // Publishing info + "Description": "The object detection module uses the Coral TPU to locate and classify the objects the models have been trained on.", + "Platforms": [ "windows", "linux", "linux-arm64", "macos", "macos-arm64" ], + "License": "Apache-2.0", + "LicenseUrl": "https://opensource.org/licenses/Apache-2.0", + + // Which server version is compatible with each version of this module. + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-07-11" }, + { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-07-12" }, + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-07-12" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-08-11", "ReleaseNotes": "installer corrections, macOS/Ubuntu support improved" } + ], + + // Launch instructions + "AutoStart": false, + "FilePath": "objectdetection_coral_adapter.py", + "Runtime": "python39", + "RuntimeLocation": "Local", // Can be Local or Shared + + // These are all optional. Defaults are usually fine + "SupportGPU": true, + "AcceleratorDeviceName": null, // = default + "Parallelism": 1, // 0 = Default (number of CPUs - 1) + "HalfPrecision": "enable", // "Force", "Enable", "Disable": whether to force on, allow, or disable half-precision ops + "PostStartPauseSecs": 1, // 1 if using GPU, 0 for CPU + + // Deliberately not using the default queue: We make all Object detectors use the same queue. + "Queue": "objectdetection_queue", // default is lower(modulename) + "_queue" + + "EnvironmentVariables": { + "MODELS_DIR": "%CURRENT_MODULE_PATH%/assets", + "MODEL_SIZE": "Medium" + }, + + "RouteMaps": [ + { + "Name": "Object Detector", + "Path": "vision/detection", + "Method": "POST", + "Command": "detect", + "Description": "Detects multiple objects in an image.", + "Inputs": [ + { + "Name": "image", + "Type": "File", + "Description": "The HTTP file object (image) to be analyzed." + }, + { + "Name": "min_confidence", + "Type": "Float", + "Description": "The minimum confidence level for an object will be detected. In the range 0.0 to 1.0. Default 0.4.", + "DefaultValue": 0.4, + "MinValue": 0.0, + "MaxValue": 1.0 + } + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + }, + { + "Name": "predictions", + "Type": "Object", + "Description": "An array of objects with the x_max, x_min, max, y_min, label and confidence." + }, + { + "Name": "inferenceMs", + "Type": "Integer", + "Description": "The time (ms) to perform the AI inference." + }, + { + "Name": "processMs", + "Type": "Integer", + "Description": "The time (ms) to process the image (includes inference and image manipulation operations)." + }, + { + "Name": "analysisRoundTripMs", + "Type": "Integer", + "Description": "The time (ms) for the round trip to the analysis module and back." + } + ] + } + ] + } + } +} diff --git a/src/modules/ObjectDetectionCoral/modulesettings.linux.arm64.json b/src/modules/ObjectDetectionCoral/modulesettings.linux.arm64.json new file mode 100644 index 00000000..2e18f021 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/modulesettings.linux.arm64.json @@ -0,0 +1,7 @@ +{ + "Modules": { + "ObjectDetectionCoral": { + "Runtime": "python39" + } + } +} diff --git a/src/modules/ObjectDetectionCoral/modulesettings.windows.json b/src/modules/ObjectDetectionCoral/modulesettings.windows.json new file mode 100644 index 00000000..9db738d9 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/modulesettings.windows.json @@ -0,0 +1,7 @@ +{ + "Modules": { + "ObjectDetectionCoral": { + "Runtime": "python37" + } + } +} diff --git a/src/modules/ObjectDetectionCoral/objectdetection_coral.py b/src/modules/ObjectDetectionCoral/objectdetection_coral.py new file mode 100644 index 00000000..9a8331d8 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/objectdetection_coral.py @@ -0,0 +1,285 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +r"""Example using PyCoral to classify a given image using an Edge TPU. + +To run this code, you must attach an Edge TPU attached to the host and +install the Edge TPU runtime (`libedgetpu.so`) and `tflite_runtime`. For +device setup instructions, see coral.ai/docs/setup. + +Example usage: +``` +bash examples/install_requirements.sh classify_image.py + +python3 examples/classify_image.py \ + --model test_data/mobilenet_v2_1.0_224_inat_bird_quant_edgetpu.tflite \ + --labels test_data/inat_bird_labels.txt \ + --input test_data/parrot.jpg +``` + +Running this directly from src\runtimes\bin\windows\python37: + +cd \src\runtimes\bin\windows\python37 +python.exe coral\pycoral\examples\classify_image.py --model coral\pycoral\test_data\mobilenet_v2_1.0_224_inat_bird_quant.tflite --labels coral\pycoral\test_data\inat_bird_labels.txt --input coral\pycoral\test_data\parrot.jpg + + + +""" +import os +import platform +import sys + +import argparse +from datetime import datetime +import time + +import numpy as np +from PIL import Image + +# For Linux we have installed the pycoral libs via apt-get, not PIP in the venv, +# So make sure the interpreter can find the coral libraries +if platform.system() == "Linux": + sys.path.insert(0, "/usr/lib/python3.9/site-packages/") + +from pycoral.adapters import common +from pycoral.adapters import detect +from pycoral.utils.dataset import read_label_file +from pycoral.utils.edgetpu import make_interpreter + +interpreter_lifespan_secs = 3600 # Refresh the interpreter once an hour + +interpreter = None # The model interpreter +interpreter_created = None # When was the interpreter created? +labels = None # set of labels for this model + + +from options import Options + +def init_detect(options: Options) -> str: + + global interpreter + global interpreter_created + global labels + + + # edge_tpu = options.support_GPU # Assuming this correctly tests for Coral TPU + # model_file = options.model_tpu_file if edge_tpu else options.model_cpu_file + + # Read labels + labels = read_label_file(options.label_file) if options.label_file else {} + + # Initialize TF-Lite interpreter. + device = "" + try: + device = "tpu" + interpreter = make_interpreter(options.model_tpu_file, device=None, delegate=None) + + if interpreter == None: + device = "cpu" + interpreter = make_interpreter(options.model_cpu_file, device="cpu", delegate=None) + + except Exception as ex: + try: + device = "cpu" + interpreter = make_interpreter(options.model_cpu_file, device="cpu", delegate=None) + except Exception as ex: + print("Error creating interpreter: " + str(ex)) + interpreter = None + + if interpreter == None: + device = "" + else: + interpreter.allocate_tensors() + interpreter_created = datetime.now() + + """ + # Model must be uint8 quantized + if common.input_details(interpreter, 'dtype') != np.uint8: + raise ValueError('Only support uint8 input type.') + """ + + # Get input and output tensors. + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + + print(f"Debug: Input details: {input_details[0]}\n") + print(f"Debug: Output details: {output_details[0]}\n") + + return device + +def do_detect(options: Options, img: Image, score_threshold: float = 0.5): + + global interpreter + global interpreter_created + + mean = 128 # args.input_mean + std = 128 # args.input_std + top_k = 1 + + # Once an hour, refresh the interpreter + if interpreter != None: + seconds_since_created = (datetime.now() - interpreter_created).total_seconds() + if seconds_since_created > interpreter_lifespan_secs: + print("Info: Refreshing the Tensorflow Interpreter") + interpreter = None + + if interpreter == None: + init_detect(options) + + if interpreter == None: + return { + "success" : False, + "error" : "Unable to create interpreter", + "count" : 0, + "predictions" : [], + "inferenceMs" : 0 + } + + w,h = img.size + # print("Debug: Input(height, width): ", h, w) + + _, scale = common.set_resized_input( + interpreter, img.size, lambda size: img.resize(size, Image.LANCZOS)) + + """ + size = common.input_size(interpreter) + resize_im = img.convert('RGB').resize(size, Image.ANTIALIAS) + + # numpy_image = np.array(img) + # input_im = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2RGB) + # resize_im = cv2.resize(input_im, size) + + # Image data must go through two transforms before running inference: + # 1. normalization: f = (input - mean) / std + # 2. quantization: q = f / scale + zero_point + # The following code combines the two steps as such: + # q = (input - mean) / (std * scale) + zero_point + # However, if std * scale equals 1, and mean - zero_point equals 0, the input + # does not need any preprocessing (but in practice, even if the results are + # very close to 1 and 0, it is probably okay to skip preprocessing for better + # efficiency; we use 1e-5 below instead of absolute zero). + + params = common.input_details(interpreter, 'quantization_parameters') + scale = params['scales'] + zero_point = params['zero_points'] + + if abs(scale * std - 1) < 1e-5 and abs(mean - zero_point) < 1e-5: + # Input data does not require preprocessing. + common.set_input(interpreter, resize_im) + else: + # Input data requires preprocessing + normalized_input = (np.asarray(resize_im) - mean) / (std * scale) + zero_point + np.clip(normalized_input, 0, 255, out=normalized_input) + common.set_input(interpreter, normalized_input.astype(np.uint8)) + """ + + # Run inference + start_inference_time = time.perf_counter() + interpreter.invoke() + inferenceMs = int((time.perf_counter() - start_inference_time) * 1000) + + # Get output + outputs = [] + objs = detect.get_objects(interpreter, score_threshold, scale) + for obj in objs: + class_id = obj.id + caption = labels.get(class_id, class_id) + score = float(obj.score) + # ymin, xmin, ymax, xmax = obj.bbox + xmin, ymin, xmax, ymax = obj.bbox + + if score >= score_threshold: + detection = { + "confidence": score, + "label": caption, + "x_min": xmin, + "y_min": ymin, + "x_max": xmax, + "y_max": ymax, + } + + outputs.append(detection) + + return { + "success" : True, + "count" : len(outputs), + "predictions" : outputs, + "inferenceMs" : inferenceMs + } + +from PIL import Image +from PIL import ImageDraw + +def draw_objects(draw, objs, labels): + """Draws the bounding box and label for each object.""" + for obj in objs: + bbox = obj.bbox + draw.rectangle([(bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax)], + outline='red') + draw.text((bbox.xmin + 10, bbox.ymin + 10), + '%s\n%.2f' % (labels.get(obj.id, obj.id), obj.score), + fill='red') + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-m', '--model', required=True, + help='File path of .tflite file') + parser.add_argument('-i', '--input', required=True, + help='File path of image to process') + parser.add_argument('-l', '--labels', help='File path of labels file') + parser.add_argument('-t', '--threshold', type=float, default=0.4, + help='Score threshold for detected objects') + parser.add_argument('-o', '--output', + help='File path for the result image with annotations') + parser.add_argument('-c', '--count', type=int, default=5, + help='Number of times to run inference') + args = parser.parse_args() + + labels = read_label_file(args.labels) if args.labels else {} + interpreter = make_interpreter(args.model) + interpreter.allocate_tensors() + + image = Image.open(args.input) + _, scale = common.set_resized_input( + interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS)) + + print('----INFERENCE TIME----') + print('Note: The first inference is slow because it includes', + 'loading the model into Edge TPU memory.') + for _ in range(args.count): + start = time.perf_counter() + interpreter.invoke() + inference_time = time.perf_counter() - start + objs = detect.get_objects(interpreter, args.threshold, scale) + print('%.2f ms' % (inference_time * 1000)) + + print('-------RESULTS--------') + if not objs: + print('No objects detected') + + for obj in objs: + print(labels.get(obj.id, obj.id)) + print(' id: ', obj.id) + print(' score: ', obj.score) + print(' bbox: ', obj.bbox) + + if args.output: + image = image.convert('RGB') + draw_objects(ImageDraw.Draw(image), objs, labels) + image.save(args.output) + image.show() + +if __name__ == '__main__': + main() diff --git a/src/modules/ObjectDetectionCoral/objectdetection_coral_adapter.py b/src/modules/ObjectDetectionCoral/objectdetection_coral_adapter.py new file mode 100644 index 00000000..06e9ad22 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/objectdetection_coral_adapter.py @@ -0,0 +1,135 @@ +# Import our general libraries +import sys +import time +import threading + +# Import the CodeProject.AI SDK. This will add to the PATH var for future imports +sys.path.append("../../SDK/Python") +from common import JSON +from request_data import RequestData +from module_runner import ModuleRunner + +# Import the method of the module we're wrapping +from options import Options +from PIL import UnidentifiedImageError, Image + +# Import the method of the module we're wrapping +from objectdetection_coral import init_detect, do_detect + +opts = Options() +sem = threading.Semaphore() + +class CoralObjectDetector_adapter(ModuleRunner): + + # async + def initialise(self) -> None: + # if the module was launched outside of the server then the queue name + # wasn't set. This is normally fine, but here we want the queue to be + # the same as the other object detection queues + if not self.launched_by_server: + self.queue_name = "objectdetection_queue" + + if self.support_GPU: + self.support_GPU = self.hasCoralTPU + + if self.support_GPU: + print("Edge TPU detected") + self.execution_provider = "TPU" + + device = init_detect(opts) + if device.upper() == "TPU": + self.execution_provider = "TPU" + else: + self.execution_provider = "CPU" + + #async + def process(self, data: RequestData) -> JSON: + + # The route to here is /v1/vision/detection + + if data.command == "list-custom": # list all models available + return { "success": True, "models": [ 'MobileNet SSD'] } + + if data.command == "detect" or data.command == "custom": + threshold: float = float(data.get_value("min_confidence", opts.min_confidence)) + img: Image = data.get_image(0) + + # response = await self.do_detection(img, threshold) + response = self.do_detection(img, threshold) + else: + # await self.report_error_async(None, __file__, f"Unknown command {data.command}") + self.report_error(None, __file__, f"Unknown command {data.command}") + response = { "success": False, "error": "unsupported command" } + + return response + + + # async + def do_detection(self, img: any, score_threshold: float): + + start_process_time = time.perf_counter() + + try: + + # An attempt to fix "RuntimeError: There is at least 1 reference to + # internal data in the interpreter in the form of a numpy array or + # slice. Be sure to only hold the function returned from tensor() if + # you are using raw data access. + if not sem.acquire(timeout=1): + return { + "success" : False, + "predictions" : [], + "message" : "The interpreter is in use. Please try again later", + "count" : 0, + "processMs" : int((time.perf_counter() - start_process_time) * 1000), + "inferenceMs" : 0 + } + + result = do_detect(opts, img, score_threshold) + sem.release() + + if not result['success']: + return { + "success" : False, + "predictions" : [], + "message" : '', + "error" : result["error"] if "error" in result else "Unable to perform detection", + "count" : 0, + "processMs" : int((time.perf_counter() - start_process_time) * 1000), + "inferenceMs" : result['inferenceMs'] + } + + predictions = result["predictions"] + if len(predictions) > 3: + message = 'Found ' + (', '.join(det["label"] for det in predictions[0:3])) + "..." + elif len(predictions) > 0: + message = 'Found ' + (', '.join(det["label"] for det in predictions)) + elif "error" in result: + message = result["error"] + else: + message = "No objects found" + + # print(message) + + return { + "success" : result['success'], + "predictions" : result['predictions'], + "message" : message, + "count" : result["count"], + "processMs" : int((time.perf_counter() - start_process_time) * 1000), + "inferenceMs" : result['inferenceMs'] + } + + except UnidentifiedImageError as img_ex: + # await self.report_error_async(img_ex, __file__, "The image provided was of an unknown type") + self.report_error(img_ex, __file__, "The image provided was of an unknown type") + return { "success": False, "error": "invalid image file" } + + except Exception as ex: + # await self.report_error_async(ex, __file__) + self.report_error(ex, __file__) + return { "success": False, "error": "Error occurred on the server"} + + +if __name__ == "__main__": + CoralObjectDetector_adapter().start_loop() diff --git a/src/modules/ObjectDetectionCoral/options.py b/src/modules/ObjectDetectionCoral/options.py new file mode 100644 index 00000000..e600c804 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/options.py @@ -0,0 +1,84 @@ +import os +from module_options import ModuleOptions + +class Settings: + def __init__(self, resolution, std_model_name, tpu_model_name, labels_name): + self.resolution = resolution + self.cpu_model_name = std_model_name + self.tpu_model_name = tpu_model_name + self.labels_name = labels_name + +class Options: + + def __init__(self): + + # ------------------------------------------------------------------------- + # Setup constants + + # Models at https://coral.ai/models/object-detection/ + self.MODEL_SETTINGS = { + # Large: SSD/FPN MobileNet V1 90 objects, COCO 640x640x3 TF-lite v2 229.4 ms 31.1% mAP + "large": Settings(640, 'tf2_ssd_mobilenet_v1_fpn_640x640_coco17_ptq.tflite', + 'tf2_ssd_mobilenet_v1_fpn_640x640_coco17_ptq_edgetpu.tflite', + 'tf2_ssd_mobilenet_v1_coco_labels.txt'), + # Medium: EfficientDet-Lite3 90 objects, COCO 512x512x3 TF-lite v2 107.6 ms 39.4% mAP + "medium": Settings(512, 'efficientdet_lite3_512_ptq.tflite', + 'efficientdet_lite3_512_ptq_edgetpu.tflite', + 'efficientdet_lite3_512_ptq_labels.txt'), + # Small: SSD/FPN MobileNet V2 90 objects, COCO 300x300x3 TF-lite v2 7.6 ms 22.4% mAP + "small": Settings(300, 'tf2_ssd_mobilenet_v2_coco17_ptq.tflite', + 'tf2_ssd_mobilenet_v2_coco17_ptq_edgetpu.tflite', + 'tf2_ssd_mobilenet_v2_coco17_labels.txt'), + + # Tiny: MobileNet V2 90 objects, COCO 300x300x3 TF-lite v2 Quant + "tiny": Settings(300, 'ssd_mobilenet_v2_coco_quant_postprocess.tflite', + 'ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite', + 'ssd_mobilenet_v2_coco_quant_postprocess_labels.txt'), + } + + self.NUM_THREADS = 1 + self.MIN_CONFIDENCE = 0.5 + + # ------------------------------------------------------------------------- + # Setup values + + self._show_env_variables = True + + self.module_path = ModuleOptions.module_path + self.models_dir = os.path.normpath(ModuleOptions.getEnvVariable("MODELS_DIR", f"{self.module_path}/assets")) + self.model_size = ModuleOptions.getEnvVariable("MODEL_SIZE", "Small") # small, medium, large + + # custom_models_dir = os.path.normpath(ModuleOptions.getEnvVariable("CUSTOM_MODELS_DIR", f"{module_path}/custom-models")) + + self.num_threads = int(ModuleOptions.getEnvVariable("NUM_THREADS", self.NUM_THREADS)) + self.min_confidence = float(ModuleOptions.getEnvVariable("MIN_CONFIDENCE", self.MIN_CONFIDENCE)) + + self.sleep_time = 0.01 + + # Normalise input + self.model_size = self.model_size.lower() + if self.model_size == "tiny": + self.model_size = "small" + if self.model_size not in [ "tiny", "small", "medium", "large" ]: + self.model_size = "small" + + # Get settings + settings = self.MODEL_SETTINGS[self.model_size] + self.cpu_model_name = settings.cpu_model_name + self.tpu_model_name = settings.tpu_model_name + self.labels_name = settings.labels_name + + # pre-chew + self.model_cpu_file = os.path.normpath(os.path.join(self.models_dir, self.cpu_model_name)) + self.model_tpu_file = os.path.normpath(os.path.join(self.models_dir, self.tpu_model_name)) + self.label_file = os.path.normpath(os.path.join(self.models_dir, self.labels_name)) + + # ------------------------------------------------------------------------- + # dump the important variables + + if self._show_env_variables: + print(f"Debug: MODULE_PATH: {self.module_path}") + print(f"Debug: MODELS_DIR: {self.models_dir}") + print(f"Debug: MODEL_SIZE: {self.model_size}") + print(f"Debug: CPU_MODEL_NAME: {self.cpu_model_name}") + print(f"Debug: TPU_MODEL_NAME: {self.tpu_model_name}") diff --git a/src/modules/ObjectDetectionCoral/package.bat b/src/modules/ObjectDetectionCoral/package.bat new file mode 100644 index 00000000..184d9cba --- /dev/null +++ b/src/modules/ObjectDetectionCoral/package.bat @@ -0,0 +1,8 @@ +@Echo off +REM Module Packaging script. To be called from create_packages.bat + +set moduleId=%~1 +set version=%~2 + +tar -caf %moduleId%-%version%.zip --exclude=__pycache__ --exclude=edgetpu_runtime --exclude=*.development.* --exclude=*.log ^ + pycoral\* *.py modulesettings.* requirements.* install.sh install.bat diff --git a/src/modules/ObjectDetectionCoral/pycoral/__init__.py b/src/modules/ObjectDetectionCoral/pycoral/__init__.py new file mode 100644 index 00000000..349dbdd0 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/__init__.py @@ -0,0 +1,17 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Version information for Coral Python APIs.""" + +__version__ = "2.0.0" diff --git a/src/modules/ObjectDetectionCoral/pycoral/adapters/__init__.py b/src/modules/ObjectDetectionCoral/pycoral/adapters/__init__.py new file mode 100644 index 00000000..30257881 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/adapters/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/modules/ObjectDetectionCoral/pycoral/adapters/classify.py b/src/modules/ObjectDetectionCoral/pycoral/adapters/classify.py new file mode 100644 index 00000000..eed057c1 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/adapters/classify.py @@ -0,0 +1,106 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Functions to work with a classification model.""" + +import collections +import operator +import numpy as np + + +Class = collections.namedtuple('Class', ['id', 'score']) +"""Represents a single classification, with the following fields: + + .. py:attribute:: id + + The class id. + + .. py:attribute:: score + + The prediction score. +""" + + +def num_classes(interpreter): + """Gets the number of classes output by a classification model. + + Args: + interpreter: The ``tf.lite.Interpreter`` holding the model. + + Returns: + The total number of classes output by the model. + """ + return np.prod(interpreter.get_output_details()[0]['shape']) + + +def get_scores(interpreter): + """Gets the output (all scores) from a classification model, dequantizing it if necessary. + + Args: + interpreter: The ``tf.lite.Interpreter`` to query for output. + + Returns: + The output tensor (flattened and dequantized) as :obj:`numpy.array`. + """ + output_details = interpreter.get_output_details()[0] + output_data = interpreter.tensor(output_details['index'])().flatten() + + if np.issubdtype(output_details['dtype'], np.integer): + scale, zero_point = output_details['quantization'] + # Always convert to np.int64 to avoid overflow on subtraction. + return scale * (output_data.astype(np.int64) - zero_point) + + return output_data.copy() + + +def get_classes_from_scores(scores, + top_k=float('inf'), + score_threshold=-float('inf')): + """Gets results from a classification model as a list of ordered classes, based on given scores. + + Args: + scores: The output from a classification model. Must be flattened and + dequantized. + top_k (int): The number of top results to return. + score_threshold (float): The score threshold for results. All returned + results have a score greater-than-or-equal-to this value. + + Returns: + A list of :obj:`Class` objects representing the classification results, + ordered by scores. + """ + top_k = min(top_k, len(scores)) + classes = [ + Class(i, scores[i]) + for i in np.argpartition(scores, -top_k)[-top_k:] + if scores[i] >= score_threshold + ] + return sorted(classes, key=operator.itemgetter(1), reverse=True) + + +def get_classes(interpreter, top_k=float('inf'), score_threshold=-float('inf')): + """Gets results from a classification model as a list of ordered classes. + + Args: + interpreter: The ``tf.lite.Interpreter`` to query for results. + top_k (int): The number of top results to return. + score_threshold (float): The score threshold for results. All returned + results have a score greater-than-or-equal-to this value. + + Returns: + A list of :obj:`Class` objects representing the classification results, + ordered by scores. + """ + return get_classes_from_scores( + get_scores(interpreter), top_k, score_threshold) diff --git a/src/modules/ObjectDetectionCoral/pycoral/adapters/common.py b/src/modules/ObjectDetectionCoral/pycoral/adapters/common.py new file mode 100644 index 00000000..e1f55e25 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/adapters/common.py @@ -0,0 +1,100 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Functions to work with any model.""" + +import numpy as np + + +def output_tensor(interpreter, i): + """Gets a model's ith output tensor. + + Args: + interpreter: The ``tf.lite.Interpreter`` holding the model. + i (int): The index position of an output tensor. + Returns: + The output tensor at the specified position. + """ + return interpreter.tensor(interpreter.get_output_details()[i]['index'])() + + +def input_details(interpreter, key): + """Gets a model's input details by specified key. + + Args: + interpreter: The ``tf.lite.Interpreter`` holding the model. + key (int): The index position of an input tensor. + Returns: + The input details. + """ + return interpreter.get_input_details()[0][key] + + +def input_size(interpreter): + """Gets a model's input size as (width, height) tuple. + + Args: + interpreter: The ``tf.lite.Interpreter`` holding the model. + Returns: + The input tensor size as (width, height) tuple. + """ + _, height, width, _ = input_details(interpreter, 'shape') + return width, height + + +def input_tensor(interpreter): + """Gets a model's input tensor view as numpy array of shape (height, width, 3). + + Args: + interpreter: The ``tf.lite.Interpreter`` holding the model. + Returns: + The input tensor view as :obj:`numpy.array` (height, width, 3). + """ + tensor_index = input_details(interpreter, 'index') + return interpreter.tensor(tensor_index)()[0] + + +def set_input(interpreter, data): + """Copies data to a model's input tensor. + + Args: + interpreter: The ``tf.lite.Interpreter`` to update. + data: The input tensor. + """ + input_tensor(interpreter)[:, :] = data + + +def set_resized_input(interpreter, size, resize): + """Copies a resized and properly zero-padded image to a model's input tensor. + + Args: + interpreter: The ``tf.lite.Interpreter`` to update. + size (tuple): The original image size as (width, height) tuple. + resize: A function that takes a (width, height) tuple, and returns an + image resized to those dimensions. + + Returns: + The resized tensor with zero-padding as tuple + (resized_tensor, resize_ratio). + """ + width, height = input_size(interpreter) + w, h = size + scale = min(width / w, height / h) + w, h = int(w * scale), int(h * scale) + tensor = input_tensor(interpreter) + tensor.fill(0) # padding + _, _, channel = tensor.shape + result = resize((w, h)) + tensor[:h, :w] = np.reshape(result, (h, w, channel)) + return result, (scale, scale) diff --git a/src/modules/ObjectDetectionCoral/pycoral/adapters/detect.py b/src/modules/ObjectDetectionCoral/pycoral/adapters/detect.py new file mode 100644 index 00000000..f7b63cd7 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/adapters/detect.py @@ -0,0 +1,237 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Functions to work with a detection model.""" + +import collections +from pycoral.adapters import common + +Object = collections.namedtuple('Object', ['id', 'score', 'bbox']) +"""Represents a detected object. + + .. py:attribute:: id + + The object's class id. + + .. py:attribute:: score + + The object's prediction score. + + .. py:attribute:: bbox + + A :obj:`BBox` object defining the object's location. +""" + + +class BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])): + """The bounding box for a detected object. + + .. py:attribute:: xmin + + X-axis start point + + .. py:attribute:: ymin + + Y-axis start point + + .. py:attribute:: xmax + + X-axis end point + + .. py:attribute:: ymax + + Y-axis end point + """ + __slots__ = () + + @property + def width(self): + """The bounding box width.""" + return self.xmax - self.xmin + + @property + def height(self): + """The bounding box height.""" + return self.ymax - self.ymin + + @property + def area(self): + """The bound box area.""" + return self.width * self.height + + @property + def valid(self): + """Indicates whether bounding box is valid or not (boolean). + + A valid bounding box has xmin <= xmax and ymin <= ymax (equivalent + to width >= 0 and height >= 0). + """ + return self.width >= 0 and self.height >= 0 + + def scale(self, sx, sy): + """Scales the bounding box. + + Args: + sx (float): Scale factor for the x-axis. + sy (float): Scale factor for the y-axis. + + Returns: + A :obj:`BBox` object with the rescaled dimensions. + """ + return BBox( + xmin=sx * self.xmin, + ymin=sy * self.ymin, + xmax=sx * self.xmax, + ymax=sy * self.ymax) + + def translate(self, dx, dy): + """Translates the bounding box position. + + Args: + dx (int): Number of pixels to move the box on the x-axis. + dy (int): Number of pixels to move the box on the y-axis. + + Returns: + A :obj:`BBox` object at the new position. + """ + return BBox( + xmin=dx + self.xmin, + ymin=dy + self.ymin, + xmax=dx + self.xmax, + ymax=dy + self.ymax) + + def map(self, f): + """Maps all box coordinates to a new position using a given function. + + Args: + f: A function that takes a single coordinate and returns a new one. + + Returns: + A :obj:`BBox` with the new coordinates. + """ + return BBox( + xmin=f(self.xmin), + ymin=f(self.ymin), + xmax=f(self.xmax), + ymax=f(self.ymax)) + + @staticmethod + def intersect(a, b): + """Gets a box representing the intersection between two boxes. + + Args: + a: :obj:`BBox` A. + b: :obj:`BBox` B. + + Returns: + A :obj:`BBox` representing the area where the two boxes intersect + (may be an invalid box, check with :func:`valid`). + """ + return BBox( + xmin=max(a.xmin, b.xmin), + ymin=max(a.ymin, b.ymin), + xmax=min(a.xmax, b.xmax), + ymax=min(a.ymax, b.ymax)) + + @staticmethod + def union(a, b): + """Gets a box representing the union of two boxes. + + Args: + a: :obj:`BBox` A. + b: :obj:`BBox` B. + + Returns: + A :obj:`BBox` representing the unified area of the two boxes + (always a valid box). + """ + return BBox( + xmin=min(a.xmin, b.xmin), + ymin=min(a.ymin, b.ymin), + xmax=max(a.xmax, b.xmax), + ymax=max(a.ymax, b.ymax)) + + @staticmethod + def iou(a, b): + """Gets the intersection-over-union value for two boxes. + + Args: + a: :obj:`BBox` A. + b: :obj:`BBox` B. + + Returns: + The intersection-over-union value: 1.0 meaning the two boxes are + perfectly aligned, 0 if not overlapping at all (invalid intersection). + """ + intersection = BBox.intersect(a, b) + if not intersection.valid: + return 0.0 + area = intersection.area + return area / (a.area + b.area - area) + + +def get_objects(interpreter, + score_threshold=-float('inf'), + image_scale=(1.0, 1.0)): + """Gets results from a detection model as a list of detected objects. + + Args: + interpreter: The ``tf.lite.Interpreter`` to query for results. + score_threshold (float): The score threshold for results. All returned + results have a score greater-than-or-equal-to this value. + image_scale (float, float): Scaling factor to apply to the bounding boxes as + (x-scale-factor, y-scale-factor), where each factor is from 0 to 1.0. + + Returns: + A list of :obj:`Object` objects, which each contains the detected object's + id, score, and bounding box as :obj:`BBox`. + """ + # If a model has signature, we use the signature output tensor names to parse + # the results. Otherwise, we parse the results based on some assumption of the + # output tensor order and size. + # pylint: disable=protected-access + signature_list = interpreter._get_full_signature_list() + # pylint: enable=protected-access + if signature_list: + if len(signature_list) > 1: + raise ValueError('Only support model with one signature.') + signature = signature_list[next(iter(signature_list))] + count = int(interpreter.tensor(signature['outputs']['output_0'])()[0]) + scores = interpreter.tensor(signature['outputs']['output_1'])()[0] + class_ids = interpreter.tensor(signature['outputs']['output_2'])()[0] + boxes = interpreter.tensor(signature['outputs']['output_3'])()[0] + elif common.output_tensor(interpreter, 3).size == 1: + boxes = common.output_tensor(interpreter, 0)[0] + class_ids = common.output_tensor(interpreter, 1)[0] + scores = common.output_tensor(interpreter, 2)[0] + count = int(common.output_tensor(interpreter, 3)[0]) + else: + scores = common.output_tensor(interpreter, 0)[0] + boxes = common.output_tensor(interpreter, 1)[0] + count = (int)(common.output_tensor(interpreter, 2)[0]) + class_ids = common.output_tensor(interpreter, 3)[0] + + width, height = common.input_size(interpreter) + image_scale_x, image_scale_y = image_scale + sx, sy = width / image_scale_x, height / image_scale_y + + def make(i): + ymin, xmin, ymax, xmax = boxes[i] + return Object( + id=int(class_ids[i]), + score=float(scores[i]), + bbox=BBox(xmin=xmin, ymin=ymin, xmax=xmax, + ymax=ymax).scale(sx, sy).map(int)) + + return [make(i) for i in range(count) if scores[i] >= score_threshold] diff --git a/src/modules/ObjectDetectionCoral/pycoral/adapters/segment.py b/src/modules/ObjectDetectionCoral/pycoral/adapters/segment.py new file mode 100644 index 00000000..56135f12 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/adapters/segment.py @@ -0,0 +1,21 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Functions to work with segmentation models.""" +import numpy as np + + +def get_output(interpreter): + output_details = interpreter.get_output_details()[0] + return interpreter.tensor(output_details['index'])()[0].astype(np.uint8) diff --git a/src/modules/ObjectDetectionCoral/pycoral/learn/__init__.py b/src/modules/ObjectDetectionCoral/pycoral/learn/__init__.py new file mode 100644 index 00000000..086a24e6 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/learn/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/modules/ObjectDetectionCoral/pycoral/learn/backprop/__init__.py b/src/modules/ObjectDetectionCoral/pycoral/learn/backprop/__init__.py new file mode 100644 index 00000000..086a24e6 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/learn/backprop/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/modules/ObjectDetectionCoral/pycoral/learn/backprop/softmax_regression.py b/src/modules/ObjectDetectionCoral/pycoral/learn/backprop/softmax_regression.py new file mode 100644 index 00000000..f2fd372d --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/learn/backprop/softmax_regression.py @@ -0,0 +1,143 @@ +# Lint as: python3 +# pylint:disable=g-doc-args,g-short-docstring-punctuation,g-no-space-after-docstring-summary +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""A softmax regression model for on-device backpropagation of the last layer.""" +from pycoral.pybind import _pywrap_coral + + +class SoftmaxRegression: + """An implementation of the softmax regression function (multinominal logistic + + regression) that operates as the last layer of your classification model, and + allows for on-device training with backpropagation (for this layer only). + + The input for this layer must be an image embedding, which should be the + output of your embedding extractor (the backbone of your model). Once given + here, the input is fed to a fully-connected layer where weights and bias are + applied, and then passed to the softmax function to receive the final + probability distribution based on the number of classes for your model: + + training/inference input (image embedding) --> fully-connected layer --> + softmax function + + When you're conducting training with :func:`train_with_sgd`, the process uses + a cross-entropy loss function to measure the error and then update the weights + of the fully-connected layer (backpropagation). + + When you're satisfied with the inference accuracy, call + :func:`serialize_model` to create a new model in `bytes` with this + retrained layer appended to your embedding extractor. You can then run + inferences with this new model as usual (using TensorFlow Lite interpreter + API). + + .. note:: + + This last layer (FC + softmax) in the retrained model always runs on the + host CPU instead of the Edge TPU. As long as the rest of your embedding + extractor model is compiled for the Edge TPU, then running this last layer + on the CPU should not significantly affect the inference speed. + + + """ + + def __init__(self, + feature_dim=None, + num_classes=None, + weight_scale=0.01, + reg=0.0): + """For more detail, see the `Stanford CS231 explanation of the softmax + classifier `_. + + Args: + feature_dim (int): The dimension of the input feature (length of the + feature vector). + num_classes (int): The number of output classes. + weight_scale (float): A weight factor for computing new weights. The + backpropagated weights are drawn from standard normal distribution, then + multiplied by this number to keep the scale small. + reg (float): The regularization strength. + """ + self.model = _pywrap_coral.SoftmaxRegressionModelWrapper( + feature_dim, num_classes, weight_scale, reg) + + def serialize_model(self, in_model_path): + """Appends learned weights to your TensorFlow Lite model and serializes it. + + Beware that learned weights and biases are quantized from float32 to uint8. + + Args: + in_model_path (str): Path to the embedding extractor model (``.tflite`` + file). + + Returns: + The TF Lite model with new weights, as a `bytes` object. + """ + return self.model.AppendLayersToEmbeddingExtractor(in_model_path) + + def get_accuracy(self, mat_x, labels): + """Calculates the model's accuracy (percentage correct). + + The calculation is on performing inferences on the given data and labels. + + Args: + mat_x (:obj:`numpy.array`): The input data (image embeddings) to test, + as a matrix of shape ``NxD``, where ``N`` is number of inputs to test + and ``D`` is the dimension of the input feature (length of the feature + vector). + labels (:obj:`numpy.array`): An array of the correct label indices that + correspond to the test data passed in ``mat_x`` (class label index in + one-hot vector). + + Returns: + The accuracy (the percent correct) as a float. + """ + return self.model.GetAccuracy(mat_x, labels) + + def train_with_sgd(self, + data, + num_iter, + learning_rate, + batch_size=100, + print_every=100): + """Trains your model using stochastic gradient descent (SGD). + + The training data must be structured in a dictionary as specified in the + ``data`` argument below. Notably, the training/validation images must be + passed as image embeddings, not as the original image input. That is, run + the images through your embedding extractor (the backbone of your graph) and + use the resulting image embeddings here. + + Args: + data (dict): A dictionary that maps ``'data_train'`` to an array of + training image embeddings, ``'labels_train'`` to an array of training + labels, ``'data_val'`` to an array of validation image embeddings, and + ``'labels_val'`` to an array of validation labels. + num_iter (int): The number of iterations to train. + learning_rate (float): The learning rate (step size) to use in training. + batch_size (int): The number of training examples to use in each + iteration. + print_every (int): The number of iterations for which to print the loss, + and training/validation accuracy. For example, ``20`` prints the stats + for every 20 iterations. ``0`` disables printing. + """ + train_config = _pywrap_coral.TrainConfigWrapper(num_iter, batch_size, + print_every) + + training_data = _pywrap_coral.TrainingDataWrapper(data['data_train'], + data['data_val'], + data['labels_train'], + data['labels_val']) + + self.model.Train(training_data, train_config, learning_rate) diff --git a/src/modules/ObjectDetectionCoral/pycoral/learn/imprinting/__init__.py b/src/modules/ObjectDetectionCoral/pycoral/learn/imprinting/__init__.py new file mode 100644 index 00000000..086a24e6 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/learn/imprinting/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/modules/ObjectDetectionCoral/pycoral/learn/imprinting/engine.py b/src/modules/ObjectDetectionCoral/pycoral/learn/imprinting/engine.py new file mode 100644 index 00000000..779a4865 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/learn/imprinting/engine.py @@ -0,0 +1,80 @@ +# Lint as: python3 +# pylint:disable=g-doc-args,g-short-docstring-punctuation,invalid-name,missing-class-docstring +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""A weight imprinting engine that performs low-shot transfer-learning for image classification models. + +For more information about how to use this API and how to create the type of +model required, see +`Retrain a classification model on-device with weight imprinting +`_. +""" + +from pycoral.pybind import _pywrap_coral + + +class ImprintingEngine: + + def __init__(self, model_path, keep_classes=False): + """Performs weight imprinting (transfer learning) with the given model. + + Args: + model_path (str): Path to the ``.tflite`` model you want to retrain. + This must be a model that's specially-designed for this API. You + can use our `weight imprinting model + `_ that + has a pre-trained base model, or you can train the base model yourself + by following our guide to `Retrain the base MobileNet model + `_. + keep_classes (bool): If True, keep the existing classes from the + pre-trained model (and use training to add additional classes). If + False, drop the existing classes and train the model to include new + classes only. + """ + self._engine = _pywrap_coral.ImprintingEnginePythonWrapper( + model_path, keep_classes) + + @property + def embedding_dim(self): + """Returns number of embedding dimensions.""" + return self._engine.EmbeddingDim() + + @property + def num_classes(self): + """Returns number of currently trained classes.""" + return self._engine.NumClasses() + + def serialize_extractor_model(self): + """Returns embedding extractor model as `bytes` object.""" + return self._engine.SerializeExtractorModel() + + def serialize_model(self): + """Returns newly trained model as `bytes` object.""" + return self._engine.SerializeModel() + + def train(self, embedding, class_id): + """Trains the model with the given embedding for specified class. + + You can use this to add new classes to the model or retrain classes that you + previously added using this imprinting API. + + Args: + embedding (:obj:`numpy.array`): The embedding vector for training + specified single class. + class_id (int): The label id for this class. The index must be either the + number of existing classes (to add a new class to the model) or the + index of an existing class that was trained using this imprinting API + (you can't retrain classes from the pre-trained model). + """ + self._engine.Train(embedding, class_id) diff --git a/src/modules/ObjectDetectionCoral/pycoral/pipeline/__init__.py b/src/modules/ObjectDetectionCoral/pycoral/pipeline/__init__.py new file mode 100644 index 00000000..30257881 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/pipeline/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/modules/ObjectDetectionCoral/pycoral/pipeline/pipelined_model_runner.py b/src/modules/ObjectDetectionCoral/pycoral/pipeline/pipelined_model_runner.py new file mode 100644 index 00000000..ee92f0dd --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/pipeline/pipelined_model_runner.py @@ -0,0 +1,175 @@ +# Lint as: python3 +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""The pipeline API allows you to run a segmented model across multiple Edge TPUs. + +For more information, see `Pipeline a model with multiple Edge +TPUs `_. +""" + +import numpy as np + +from pycoral.pybind import _pywrap_coral + + +def _get_names(details): + """Returns a set of names given input/output tensor details.""" + return {d['name'] for d in details} + + +class PipelinedModelRunner: + """Manages the model pipeline. + + To create an instance:: + + interpreter_a = tflite.Interpreter(model_path=model_segment_a, + experimental_delegates=delegate_a) + interpreter_a.allocate_tensors() + interpreter_b = tflite.Interpreter(model_path=model_segment_b, + experimental_delegates=delegate_b) + interpreter_b.allocate_tensors() + interpreters = [interpreter_a, interpreter_b] + runner = PipelinedModelRunner(interpreters) + """ + + def __init__(self, interpreters): + """Be sure you first call ``allocate_tensors()`` on each interpreter. + + Args: + interpreters: A list of ``tf.lite.Interpreter`` objects, one for each + segment in the pipeline. + """ + self._runner = None + + if not interpreters: + raise ValueError('At least one interpreter expected') + + # It requires that the inputs of interpreter[i] is a subset of outputs of + # interpreter[j], where j=0,...,i-1. + prev_outputs = _get_names(interpreters[0].get_input_details()) + for index, interpreter in enumerate(interpreters): + inputs = _get_names(interpreter.get_input_details()) + if not inputs.issubset(prev_outputs): + raise ValueError( + 'Interpreter {} can not get its input tensors'.format(index)) + prev_outputs.update(_get_names(interpreter.get_output_details())) + + self._interpreters = interpreters + self._runner = _pywrap_coral.PipelinedModelRunnerWrapper( + [i._native_handle() for i in interpreters]) + + self._input_types = {} + for d in self._interpreters[0].get_input_details(): + self._input_types[d['name']] = d['dtype'] + + self._output_shapes = {} + for d in self._interpreters[-1].get_output_details(): + self._output_shapes[d['name']] = d['shape'] + + def __del__(self): + if self._runner: + # Push empty request to stop the pipeline in case user forgot. + self.push({}) + num_unconsumed = 0 + # Release any unconsumed tensors if any. + while self.pop(): + num_unconsumed += 1 + if num_unconsumed: + print( + 'WARNING: {} unconsumed results in the pipeline during destruction!' + .format(num_unconsumed)) + + def set_input_queue_size(self, size): + """Sets the maximum number of inputs that may be queued for inference. + + By default, input queue size is unlimited. + + Note: It's OK to change the queue size max when PipelinedModelRunner is + active. If the new max is smaller than current queue size, pushes to + the queue are blocked until the current queue size drops below the new max. + + Args: + size (int): The input queue size max + """ + self._runner.SetInputQueueSize(size) + + def set_output_queue_size(self, size): + """Sets the maximum number of outputs that may be unconsumed. + + By default, output queue size is unlimited. + + Note: It's OK to change the queue size max when PipelinedModelRunner is + active. If the new max is smaller than current queue size, pushes to the + queue are blocked until the current queue size drops below the new max. + + Args: + size (int): The output queue size max + """ + self._runner.SetOutputQueueSize(size) + + def push(self, input_tensors): + """Pushes input tensors to trigger inference. + + Pushing an empty dict is allowed, which signals the class that no more + inputs will be added (the function will return false if inputs were pushed + after this special push). This special push allows the ``pop()`` consumer to + properly drain unconsumed output tensors. + + Caller will be blocked if the current input queue size is greater than the + queue size max (use ``set_input_queue_size()``). By default, input queue + size threshold is unlimited, in this case, call to push() is non-blocking. + + Args: + input_tensors: A dictionary with key of type string, and value of type + :obj:`numpy.array` representing the model's input tensors, where keys + are the tensor names. + + Raises: + RuntimeError: error during pushing pipelined model inference request. + """ + if input_tensors and len(input_tensors) != len(self._input_types): + raise ValueError('Expected input of length {}, but got {}'.format( + len(self._input_types), len(input_tensors))) + + for key, tensor in input_tensors.items(): + input_type = self._input_types[key] + if not isinstance(tensor, np.ndarray) or tensor.dtype != input_type: + raise ValueError( + 'Input should be a list of numpy array of type {}'.format( + input_type)) + + self._runner.Push(input_tensors) + + def pop(self): + """Returns a single inference result. + + This function blocks the calling thread until a result is returned. + + Returns: + Dictionary with key of type string, and value of type :obj:`numpy.array` + representing the model's output tensors, where keys are the tensor names. + Returns None when a ``push()`` receives an empty dict input, indicating + there are no more output tensors available. + + Raises: + RuntimeError: error during retrieving pipelined model inference results. + """ + result = self._runner.Pop() + if result: + result = {k: v.reshape(self._output_shapes[k]) for k, v in result.items()} + return result + + def interpreters(self): + """Returns list of interpreters that constructed PipelinedModelRunner.""" + return self._interpreters diff --git a/src/modules/ObjectDetectionCoral/pycoral/utils/__init__.py b/src/modules/ObjectDetectionCoral/pycoral/utils/__init__.py new file mode 100644 index 00000000..086a24e6 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/utils/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/modules/ObjectDetectionCoral/pycoral/utils/dataset.py b/src/modules/ObjectDetectionCoral/pycoral/utils/dataset.py new file mode 100644 index 00000000..caa141ed --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/utils/dataset.py @@ -0,0 +1,45 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities to help process a dataset.""" + +import re + + +def read_label_file(file_path): + """Reads labels from a text file and returns it as a dictionary. + + This function supports label files with the following formats: + + + Each line contains id and description separated by colon or space. + Example: ``0:cat`` or ``0 cat``. + + Each line contains a description only. The returned label id's are based on + the row number. + + Args: + file_path (str): path to the label file. + + Returns: + Dict of (int, string) which maps label id to description. + """ + with open(file_path, 'r', encoding='utf-8') as f: + lines = f.readlines() + ret = {} + for row_number, content in enumerate(lines): + pair = re.split(r'[:\s]+', content.strip(), maxsplit=1) + if len(pair) == 2 and pair[0].strip().isdigit(): + ret[int(pair[0])] = pair[1].strip() + else: + ret[row_number] = content.strip() + return ret diff --git a/src/modules/ObjectDetectionCoral/pycoral/utils/edgetpu.py b/src/modules/ObjectDetectionCoral/pycoral/utils/edgetpu.py new file mode 100644 index 00000000..2f517f61 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/pycoral/utils/edgetpu.py @@ -0,0 +1,240 @@ +# Lint as: python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for using the TensorFlow Lite Interpreter with Edge TPU.""" + +import contextlib +import ctypes +import ctypes.util +import numpy as np + +# pylint:disable=unused-import +# We're trying to support 5 different platforms with 3 different libs across 2 +# different packages using libraries that are tied to out of date OSs and things +# just are not consistent. We do what we can where we can. +try: + from pycoral.pybind._pywrap_coral import GetRuntimeVersion as get_runtime_version +except: pass +try: + from pycoral.pybind._pywrap_coral import InvokeWithBytes as invoke_with_bytes +except: pass +try: + from pycoral.pybind._pywrap_coral import InvokeWithDmaBuffer as invoke_with_dmabuffer +except: pass +try: + from pycoral.pybind._pywrap_coral import InvokeWithMemBuffer as invoke_with_membuffer +except: pass +try: + from pycoral.pybind._pywrap_coral import ListEdgeTpus as list_edge_tpus +except: pass +try: + from pycoral.pybind._pywrap_coral import SetVerbosity as set_verbosity +except: pass +try: + from pycoral.pybind._pywrap_coral import SupportsDmabuf as supports_dmabuf +except: pass + +import platform + +# First determine if we have TensorFlow-Lite runtime installed, or the whole Tensorflow +# In either case we're looking to load TFLite models +try: + from tflite_runtime.interpreter import Interpreter, load_delegate +except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate + +_EDGETPU_SHARED_LIB = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll' +}[platform.system()] + + +def load_edgetpu_delegate(options=None): + """Loads the Edge TPU delegate with the given options. + + Args: + options (dict): Options that are passed to the Edge TPU delegate, via + ``tf.lite.load_delegate``. The only option you should use is + "device", which defines the Edge TPU to use. Supported values are the same + as `device` in :func:`make_interpreter`. + Returns: + The Edge TPU delegate object. + """ + return load_delegate(_EDGETPU_SHARED_LIB, options or {}) + + +def make_interpreter(model_path_or_content, device=None, delegate=None): + """Creates a new ``tf.lite.Interpreter`` instance using the given model. + + **Note:** If you have multiple Edge TPUs, you should always specify the + ``device`` argument. + + Args: + model_path_or_content (str or bytes): `str` object is interpreted as + model path, `bytes` object is interpreted as model content. + device (str): The Edge TPU device you want: + + + "cpu" -- use the CPU + + None -- use any Edge TPU (this is the default) + + ":" -- use N-th Edge TPU (this corresponds to the enumerated + index position from :func:`list_edge_tpus`) + + "usb" -- use any USB Edge TPU + + "usb:" -- use N-th USB Edge TPU + + "pci" -- use any PCIe Edge TPU + + "pci:" -- use N-th PCIe Edge TPU + + If left as None, you cannot reliably predict which device you'll get. + So if you have multiple Edge TPUs and want to run a specific model on + each one, then you must specify the device. + delegate: A pre-loaded Edge TPU delegate object, as provided by + :func:`load_edgetpu_delegate`. If provided, the `device` argument + is ignored. + + Returns: + New ``tf.lite.Interpreter`` instance. + """ + if device == "cpu": + return Interpreter(model_path=model_path_or_content) + + try: + if delegate: + delegates = [delegate] + else: + delegates = [load_edgetpu_delegate({'device': device} if device else {})] + + if isinstance(model_path_or_content, bytes): + return Interpreter( + model_content=model_path_or_content, experimental_delegates=delegates) + else: + return Interpreter( + model_path=model_path_or_content, experimental_delegates=delegates) + except: + return None + + +# ctypes definition of GstMapInfo. This is a stable API, guaranteed to be +# ABI compatible for any past and future GStreamer 1.0 releases. +# Used to get the underlying memory pointer without any copies, and without +# native library linking against libgstreamer. +class _GstMapInfo(ctypes.Structure): + _fields_ = [ + ('memory', ctypes.c_void_p), # GstMemory *memory + ('flags', ctypes.c_int), # GstMapFlags flags + ('data', ctypes.c_void_p), # guint8 *data + ('size', ctypes.c_size_t), # gsize size + ('maxsize', ctypes.c_size_t), # gsize maxsize + ('user_data', ctypes.c_void_p * 4), # gpointer user_data[4] + ('_gst_reserved', ctypes.c_void_p * 4) + ] # GST_PADDING + + +# Try to import GStreamer but don't fail if it's not available. If not available +# we're probably not getting GStreamer buffers as input anyway. +_libgst = None +try: + # pylint:disable=g-import-not-at-top + import gi + gi.require_version('Gst', '1.0') + gi.require_version('GstAllocators', '1.0') + # pylint:disable=g-multiple-import + from gi.repository import Gst, GstAllocators + _libgst = ctypes.CDLL(ctypes.util.find_library('gstreamer-1.0')) + _libgst.gst_buffer_map.argtypes = [ + ctypes.c_void_p, + ctypes.POINTER(_GstMapInfo), ctypes.c_int + ] + _libgst.gst_buffer_map.restype = ctypes.c_int + _libgst.gst_buffer_unmap.argtypes = [ + ctypes.c_void_p, ctypes.POINTER(_GstMapInfo) + ] + _libgst.gst_buffer_unmap.restype = None +except (ImportError, ValueError, OSError): + pass + + +def _is_valid_ctypes_input(input_data): + if not isinstance(input_data, tuple): + return False + pointer, size = input_data + if not isinstance(pointer, ctypes.c_void_p): + return False + return isinstance(size, int) + + +@contextlib.contextmanager +def _gst_buffer_map(buffer): + """Yields gst buffer map.""" + mapping = _GstMapInfo() + ptr = hash(buffer) + success = _libgst.gst_buffer_map(ptr, mapping, Gst.MapFlags.READ) + if not success: + raise RuntimeError('gst_buffer_map failed') + try: + yield ctypes.c_void_p(mapping.data), mapping.size + finally: + _libgst.gst_buffer_unmap(ptr, mapping) + + +def _check_input_size(input_size, expected_input_size): + if input_size < expected_input_size: + raise ValueError('input size={}, expected={}.'.format( + input_size, expected_input_size)) + + +def run_inference(interpreter, input_data): + """Performs interpreter ``invoke()`` with a raw input tensor. + + Args: + interpreter: The ``tf.lite.Interpreter`` to invoke. + input_data: A 1-D array as the input tensor. Input data must be uint8 + format. Data may be ``Gst.Buffer`` or :obj:`numpy.ndarray`. + """ + input_shape = interpreter.get_input_details()[0]['shape'] + expected_input_size = np.prod(input_shape) + + interpreter_handle = interpreter._native_handle() # pylint:disable=protected-access + if isinstance(input_data, bytes): + _check_input_size(len(input_data), expected_input_size) + invoke_with_bytes(interpreter_handle, input_data) + elif _is_valid_ctypes_input(input_data): + pointer, actual_size = input_data + _check_input_size(actual_size, expected_input_size) + invoke_with_membuffer(interpreter_handle, pointer.value, + expected_input_size) + elif _libgst and isinstance(input_data, Gst.Buffer): + memory = input_data.peek_memory(0) + map_buffer = not GstAllocators.is_dmabuf_memory( + memory) or not supports_dmabuf(interpreter_handle) + if not map_buffer: + _check_input_size(memory.size, expected_input_size) + fd = GstAllocators.dmabuf_memory_get_fd(memory) + try: + invoke_with_dmabuffer(interpreter_handle, fd, expected_input_size) + except RuntimeError: + # dma-buf input didn't work, likely due to old kernel driver. This + # situation can't be detected until one inference has been tried. + map_buffer = True + if map_buffer: + with _gst_buffer_map(input_data) as (pointer, actual_size): + assert actual_size >= expected_input_size + invoke_with_membuffer(interpreter_handle, pointer.value, + expected_input_size) + elif isinstance(input_data, np.ndarray): + _check_input_size(len(input_data), expected_input_size) + invoke_with_membuffer(interpreter_handle, input_data.ctypes.data, + expected_input_size) + else: + raise TypeError('input data type is not supported.') diff --git a/src/modules/ObjectDetectionCoral/requirements.linux.arm64.txt b/src/modules/ObjectDetectionCoral/requirements.linux.arm64.txt new file mode 100644 index 00000000..d9ef1508 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/requirements.linux.arm64.txt @@ -0,0 +1,19 @@ +#! Python3.8 + +# Install Tensorflow. One way or another... +# tensorflow # Installing Tensorflow, the open source machine learning framework for everyone +# tflite-runtime==2.5.0.post1 # Installing Tensorflow Lite +# ./third_party/wheels/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl # Installing Tensorflow Lite +https://github.com/google-coral/pycoral/releases/download/v2.0.0/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl # Installing Tensorflow Lite + +numpy>=1.16.0 # Installing NumPy, the fundamental package for array computing with Python. + +Pillow>=4.0.0 # Installing Pillow, a Python Image Library + +# Support for Python 3.6-3.9 on Linux arm64 (though we use apt-get to install pycoral, not PIP) +# 3.6 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp36-cp36m-linux_aarch64.whl +# 3.7 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp37-cp37m-linux_aarch64.whl +# 3.8 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp38-cp38-linux_aarch64.whl +# 3.9 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp39-cp39-linux_aarch64.whl + +# last line empty \ No newline at end of file diff --git a/src/modules/ObjectDetectionCoral/requirements.linux.txt b/src/modules/ObjectDetectionCoral/requirements.linux.txt new file mode 100644 index 00000000..b1788fe7 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/requirements.linux.txt @@ -0,0 +1,17 @@ +#! Python3.8 + +#tflite-runtime==2.5.0.post1 # Installing Tensorflow Lite +https://github.com/google-coral/pycoral/releases/download/v2.0.0/tflite_runtime-2.5.0.post1-cp39-cp39-linux_x86_64.whl # Installing Tensorflow Lite + +# numpy>=1.16.0 # Installing NumPy, the fundamental package for array computing with Python. +numpy>=1.16.0 # Installing NumPy, a package for scientific computing + +Pillow>=4.0.0 # Installing Pillow, a Python Image Library + +# Support for Python 3.6-3.9 on Linux x64 (though we use apt-get to install pycorl, not PIP) +# 3.6 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp36-cp36m-linux_x86_64.whl +# 3.7 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp37-cp37m-linux_x86_64.whl +# 3.8 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp38-cp38-linux_x86_64.whl +# 3.9 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp39-cp39-linux_x86_64.whl + +# last line empty \ No newline at end of file diff --git a/src/modules/ObjectDetectionCoral/requirements.macos.txt b/src/modules/ObjectDetectionCoral/requirements.macos.txt new file mode 100644 index 00000000..93b2f9e5 --- /dev/null +++ b/src/modules/ObjectDetectionCoral/requirements.macos.txt @@ -0,0 +1,31 @@ +#! Python3.8 + + +# neither tflite or tf-macos has versions that support new OSs. We will install via scripp +# tflite-runtime==2.5.0.post1 # Installing Tensorflow Lite +# tensorflow-macos # Installing Tensorflow for macOS, the open source machine learning framework for everyone +# tensorflow # Installing Tensorflow, the open source machine learning framework for everyone + +# Note: if we wanted to support Metal and keras directly, we can add +# https://developer.apple.com/metal/tensorflow-plugin/ + +numpy>=1.16.0 # Installing NumPy, the fundamental package for array computing with Python. +#numpy==1.16.2 # Installing NumPy, a package for scientific computing + +Pillow>=4.0.0 # Installing Pillow, a Python Image Library + +# Support for Python 3.6-3.9 on macOS 11 only +# 3.6 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp36-cp36m-macosx_11_0_x86_64.whl +# 3.7 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp37-cp37m-macosx_11_0_x86_64.whl +# 3.8 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp38-cp38-macosx_11_0_x86_64.whl +# 3.9 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp39-cp39-macosx_11_0_x86_64.whl + +# Not supported on macOS > 12 +# --extra-index-url https://google-coral.github.io/py-repo/ +# pycoral~=2.0 +# We are going to have to build this library ourselves +# - https://hub.docker.com/r/tensorflow/build for the docker build images +# - https://www.tensorflow.org/lite/guide/build_cmake_pip - official instructions +# - https://medium.com/@andrewlr/building-the-tensorflow-lite-python-tflite-runtime-on-a-raspberry-pi-zero-116bfa38be3f - useful instructions + +# last line empty \ No newline at end of file diff --git a/src/modules/ObjectDetectionCoral/requirements.txt b/src/modules/ObjectDetectionCoral/requirements.txt new file mode 100644 index 00000000..d67baaec --- /dev/null +++ b/src/modules/ObjectDetectionCoral/requirements.txt @@ -0,0 +1,22 @@ +#! Python3.7 + +Pillow>=4.0.0 # Installing Pillow, a Python Image Library + +# For edge devices only +tflite-runtime==2.5.0.post1 # Installing Tensorflow Lite + +# Or...Tensorflow 2.10 was the last version that supported GPU on windows +# tensorflow==2.10 # Installing Tensorflow + +# Coral example has numpy>=1.16.0', +numpy==1.21.6 # Installing NumPy, a package for scientific computing + +# Support for Python 3.6-3.9 +# 3.6 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp36-cp36m-win_amd64.whl +# 3.7 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp37-cp37m-win_amd64.whl +# 3.8 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp38-cp38-win_amd64.whl +# 3.9 https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp39-cp39-win_amd64.whl +--extra-index-url https://google-coral.github.io/py-repo/ +pycoral~=2.0 + +# last line empty \ No newline at end of file diff --git a/src/modules/ObjectDetectionNet/ObjectDetectionNet.csproj b/src/modules/ObjectDetectionNet/ObjectDetectionNet.csproj index 2b4d3cec..d4f34830 100644 --- a/src/modules/ObjectDetectionNet/ObjectDetectionNet.csproj +++ b/src/modules/ObjectDetectionNet/ObjectDetectionNet.csproj @@ -8,7 +8,7 @@ ObjectDetection (YOLO .Net) CodeProject.AI.Analysis.Yolo ObjectDetectionNet - 1.2 + 1.4 enable disable dotnet-CodeProject.AI.Yolo-384BE45C-AAED-42BA-9DDB-EF37356B630F @@ -58,7 +58,7 @@ CPU DirectML CUDA - CPU + CPU CPU @@ -84,7 +84,10 @@ - + + + + @@ -96,12 +99,19 @@ + + CPU + CUDA + OpenVINO + DirectML + + AnyCPU AnyCPU AnyCPU x64 - 2.1.0 + 2.1.5 diff --git a/src/modules/ObjectDetectionNet/ObjectDetectionWorker.cs b/src/modules/ObjectDetectionNet/ObjectDetectionWorker.cs index 98e17f78..db676378 100644 --- a/src/modules/ObjectDetectionNet/ObjectDetectionWorker.cs +++ b/src/modules/ObjectDetectionNet/ObjectDetectionWorker.cs @@ -13,6 +13,8 @@ using Yolov5Net.Scorer; +#pragma warning disable CS0162 // unreachable code + namespace CodeProject.AI.Modules.ObjectDetection.Yolo { /// @@ -24,6 +26,8 @@ namespace CodeProject.AI.Modules.ObjectDetection.Yolo /// public class ObjectDetectionWorker : CommandQueueWorker { + private const bool ShowTrace = false; + private readonly ConcurrentDictionary _detectors = new (); private readonly ILogger _logger; @@ -31,9 +35,6 @@ public class ObjectDetectionWorker : CommandQueueWorker private readonly string _modelDir; private readonly string _customDir; - private string _hardwareType = "CPU"; - private string _executionProvider = string.Empty; - private string[] _customFileList = Array.Empty(); private DateTime _lastTimeCustomFileListGenerated = DateTime.MinValue; @@ -67,7 +68,15 @@ protected override void InitModule() "Object Detection module using the 'vision/detection' route and " + "'objectdetection_queue' queue (eg. ObjectDetectionYolo). " + "There will be conflicts"); - +#if CPU + Logger.LogInformation("ObjectDetection (.NET) built for CPU"); +#elif CUDA + Logger.LogInformation("ObjectDetection (.NET) built for CUDA"); +#elif OpenVINO + Logger.LogInformation("ObjectDetection (.NET) built for OpenVINO"); +#elif DirectML + Logger.LogInformation("ObjectDetection (.NET) built for DirectML"); +#endif base.InitModule(); } @@ -76,7 +85,7 @@ protected override void InitModule() /// /// The request. /// The response. - public override BackendResponseBase ProcessRequest(BackendRequest request) + protected override BackendResponseBase ProcessRequest(BackendRequest request) { BackendResponseBase response; @@ -114,7 +123,7 @@ public override BackendResponseBase ProcessRequest(BackendRequest request) } } else if (payload.command.EqualsIgnoreCase("detect") == true) // Perform 'standard' object detection - { + { var file = payload.files?.FirstOrDefault(); if (file is null) return new BackendErrorResponse("No File supplied for object detection."); @@ -178,8 +187,15 @@ protected BackendResponseBase DoDetection(string modelPath, RequestFormFile file { Logger.LogInformation($"Processing {file.filename}"); + Stopwatch traceSW = Stopwatch.StartNew(); + if (ShowTrace) + Console.WriteLine($"Trace: Start DoDetection: {traceSW.ElapsedMilliseconds}ms"); + if (!_detectors.TryGetValue(modelPath, out ObjectDetector? detector) || detector is null) { + if (ShowTrace) + Console.WriteLine($"Trace: Creating Detector: {traceSW.ElapsedMilliseconds}ms"); + detector = new ObjectDetector(modelPath, _logger); _detectors.TryAdd(modelPath, detector); } @@ -187,16 +203,28 @@ protected BackendResponseBase DoDetection(string modelPath, RequestFormFile file if (detector is null) return new BackendErrorResponse($"Unable to create detector for model {modelPath}"); - _hardwareType = detector.HardwareType; - _executionProvider = detector.ExecutionProvider; + if (ShowTrace) + Console.WriteLine($"Trace: Setting hardware type: {traceSW.ElapsedMilliseconds}ms"); + + HardwareType = detector.HardwareType; + ExecutionProvider = detector.ExecutionProvider; + + if (ShowTrace) + Console.WriteLine($"Trace: Start Predict: {traceSW.ElapsedMilliseconds}ms"); Stopwatch sw = Stopwatch.StartNew(); List? yoloResult = detector.Predict(file.data, minConfidence); long inferenceMs = sw.ElapsedMilliseconds; + if (ShowTrace) + Console.WriteLine($"Trace: End Predict: {traceSW.ElapsedMilliseconds}ms"); + if (yoloResult == null) return new BackendErrorResponse("Yolo returned null."); + if (ShowTrace) + Console.WriteLine($"Trace: Start Processing results: {traceSW.ElapsedMilliseconds}ms"); + var results = yoloResult.Where(x => x?.Rectangle != null && x.Score >= minConfidence); int count = results.Count(); string message = string.Empty; @@ -207,6 +235,9 @@ protected BackendResponseBase DoDetection(string modelPath, RequestFormFile file else message = "No objects found"; + if (ShowTrace) + Console.WriteLine($"Trace: Sending results: {traceSW.ElapsedMilliseconds}ms"); + return new BackendObjectDetectionResponse { count = count, @@ -224,13 +255,6 @@ protected BackendResponseBase DoDetection(string modelPath, RequestFormFile file inferenceMs = inferenceMs }; } - - protected async override void GetHardwareInfo() - { - await System.Threading.Tasks.Task.Run(() => { - HardwareType = _hardwareType; - ExecutionProvider = _executionProvider; - }); - } } } +#pragma warning restore CS0162 // unreachable code \ No newline at end of file diff --git a/src/modules/ObjectDetectionNet/ObjectDetector.cs b/src/modules/ObjectDetectionNet/ObjectDetector.cs index e47737fe..af64c4a2 100644 --- a/src/modules/ObjectDetectionNet/ObjectDetector.cs +++ b/src/modules/ObjectDetectionNet/ObjectDetector.cs @@ -66,7 +66,7 @@ public ObjectDetector(string modelPath, ILogger logger) try { var modelFileInfo = new FileInfo(modelPath); - SessionOptions sessionOpts = GetHardwareInfo(); + SessionOptions sessionOpts = GetSessionOptions(); if (modelFileInfo.Exists) { @@ -74,19 +74,19 @@ public ObjectDetector(string modelPath, ILogger logger) { _scorer = new YoloScorer(modelPath, sessionOpts); } - catch // something went wrong, probably the device is too old and no longer supported. + catch (Exception ex) // something went wrong, probably the device is too old and no longer supported. { // fall back to CPU only if (ExecutionProvider != "CPU") { - _logger.LogError($"Unable to load the model with {ExecutionProvider}. Falling back to CPU."); + _logger.LogError(ex, $"Unable to load the model with {ExecutionProvider}. Falling back to CPU."); _scorer = new YoloScorer(modelPath); ExecutionProvider = "CPU"; HardwareType = "CPU"; } else - _logger.LogError("Unable to load the model at " + modelPath); + _logger.LogError(ex, $"Unable to load the model at {modelPath}"); } } else @@ -98,12 +98,12 @@ public ObjectDetector(string modelPath, ILogger logger) } } - private SessionOptions GetHardwareInfo() + private SessionOptions GetSessionOptions() { var sessionOpts = new SessionOptions(); bool supportGPU = (Environment.GetEnvironmentVariable("CPAI_MODULE_SUPPORT_GPU") ?? "true").ToLower() == "true"; - _logger.LogDebug($"GetHardwareInfo supportGPU={supportGPU}"); + _logger.LogDebug($"ObjectDetection (.NET) supportGPU={supportGPU}"); if (supportGPU) { @@ -117,14 +117,14 @@ private SessionOptions GetHardwareInfo() } foreach (var providerName in providers ?? Array.Empty()) - _logger.LogDebug($"GetHardwareInfo provider: {providerName}"); + _logger.LogDebug($"ObjectDetection (.NET) provider: {providerName}"); // Enable CUDA ------------------- if (providers?.Any(p => p.StartsWithIgnoreCase("CUDA")) ?? false) { try { - _logger.LogDebug($"GetHardwareInfo setting ExecutionProvider = \"CUDA\""); + _logger.LogDebug($"ObjectDetection (.NET) setting ExecutionProvider = \"CUDA\""); sessionOpts.AppendExecutionProvider_CUDA(); @@ -133,7 +133,7 @@ private SessionOptions GetHardwareInfo() } catch (Exception ex) { - _logger.LogDebug(ex, $"GetHardwareInfo setting ExecutionProvider = \"CUDA\""); + _logger.LogDebug(ex, $"ObjectDetection (.NET) setting ExecutionProvider = \"CUDA\""); // do nothing, the provider didn't work so keep going } } @@ -143,7 +143,7 @@ private SessionOptions GetHardwareInfo() { try { - _logger.LogDebug($"GetHardwareInfo setting ExecutionProvider = \"OpenVINO\""); + _logger.LogDebug($"ObjectDetection (.NET) setting ExecutionProvider = \"OpenVINO\""); sessionOpts.AppendExecutionProvider_OpenVINO("GPU_FP16"); //sessionOpts.EnableMemoryPattern = false; //sessionOpts.ExecutionMode = ExecutionMode.ORT_PARALLEL; @@ -163,11 +163,11 @@ private SessionOptions GetHardwareInfo() { try { - _logger.LogDebug($"GetHardwareInfo setting ExecutionProvider = \"DML\""); - sessionOpts.AppendExecutionProvider_DML(); + _logger.LogDebug($"ObjectDetection (.NET) setting ExecutionProvider = \"DML\""); sessionOpts.EnableMemoryPattern = false; sessionOpts.ExecutionMode = ExecutionMode.ORT_SEQUENTIAL; - sessionOpts.GraphOptimizationLevel = GraphOptimizationLevel.ORT_DISABLE_ALL; + sessionOpts.GraphOptimizationLevel = GraphOptimizationLevel.ORT_ENABLE_ALL; + sessionOpts.AppendExecutionProvider_DML(); // Or set the device Id here in order to choose a card ExecutionProvider = "DirectML"; HardwareType = "GPU"; @@ -195,7 +195,7 @@ private SessionOptions GetHardwareInfo() if (!fi.Exists) return null; - using SKImage? image = GetImage(filename); + using SKImage? image = ImageUtils.GetImage(filename); try { List? predictions = Predict(image, minConfidence: minConfidence); @@ -237,7 +237,7 @@ private SessionOptions GetHardwareInfo() if (imageData == null) return null; - var image = GetImage(imageData); + var image = ImageUtils.GetImage(imageData); if (image is null) return null; @@ -255,30 +255,5 @@ private SessionOptions GetHardwareInfo() image?.Dispose(); } } - - /// - /// Loads a Bitmap from a file. - /// - /// The file name. - /// The Bitmap, or null. - /// SkiSharp handles more image formats than System.Drawing. - private SKImage? GetImage(string filename) - { - // TODO: Add error handling and port this to Maui - var skiaImage = SKImage.FromEncodedData(filename); - if (skiaImage is null) - return null; - - return skiaImage; //.ToBitmap(); - } - - private SKImage? GetImage(byte[] imageData) - { - var skiaImage = SKImage.FromEncodedData(imageData); - if (skiaImage is null) - return null; - - return skiaImage; //.ToBitmap(); - } } } diff --git a/src/modules/ObjectDetectionNet/Program.cs b/src/modules/ObjectDetectionNet/Program.cs index d26c4ff2..de2b67c4 100644 --- a/src/modules/ObjectDetectionNet/Program.cs +++ b/src/modules/ObjectDetectionNet/Program.cs @@ -12,4 +12,6 @@ }) .Build(); -await host.RunAsync(); \ No newline at end of file +#pragma warning disable CA2007 // Consider calling ConfigureAwait on the awaited task +await host.RunAsync(); +#pragma warning restore CA2007 // Consider calling ConfigureAwait on the awaited task \ No newline at end of file diff --git a/src/modules/ObjectDetectionNet/Properties/launchSettings.json b/src/modules/ObjectDetectionNet/Properties/launchSettings.json index d63280f0..135f92ff 100644 --- a/src/modules/ObjectDetectionNet/Properties/launchSettings.json +++ b/src/modules/ObjectDetectionNet/Properties/launchSettings.json @@ -5,8 +5,9 @@ "environmentVariables": { "DOTNET_ENVIRONMENT": "Development", "CPAI_MODULE_SUPPORT_GPU": "true", - "CPAI_LOG_VERBOSITY" : "info", - "CPAI_MODULE_PARALLELISM": "0" + "CPAI_LOG_VERBOSITY": "info", + "CPAI_MODULE_PARALLELISM": "0", + "CPAI_MODULE_QUEUENAME": "objectdetection_queue" }, "dotnetRunMessages": "true" }, diff --git a/src/modules/ObjectDetectionNet/YOLOv5/YoloScorer.cs b/src/modules/ObjectDetectionNet/YOLOv5/YoloScorer.cs index 15e71451..bf1204d0 100644 --- a/src/modules/ObjectDetectionNet/YOLOv5/YoloScorer.cs +++ b/src/modules/ObjectDetectionNet/YOLOv5/YoloScorer.cs @@ -36,6 +36,7 @@ public class YoloScorer : IDisposable where T : YoloModel private ObjectPool> _tensorPool; private ObjectPool> _predictionListPool; + private bool disposedValue; // To scale up we will need to create multiple InferenceSessions per model // as the InferenceSession instance is not thread safe. @@ -496,7 +497,12 @@ public YoloScorer(string weights, SessionOptions? opts = null) : this() { FilePath = weights; - _inferenceSession = new InferenceSession(File.ReadAllBytes(weights), opts ?? new SessionOptions()); + // Breaking this up so we can debug timing. + var bytes = File.ReadAllBytes(weights); + var options = opts ?? new SessionOptions(); + + _inferenceSession = new InferenceSession(bytes, options); + SetModelPropetiesFromMetadata(); } @@ -561,12 +567,40 @@ private void SetModelPropetiesFromMetadata() } } + /// + /// Disposes YoloScorer instance. + /// + protected virtual void Dispose(bool disposing) + { + if (!disposedValue) + { + if (disposing) + { + // TODO: dispose managed state (managed objects) + _inferenceSession?.Dispose(); + } + + // TODO: free unmanaged resources (unmanaged objects) and override finalizer + // TODO: set large fields to null + disposedValue = true; + } + } + + // // TODO: override finalizer only if 'Dispose(bool disposing)' has code to free unmanaged resources + // ~YoloScorer() + // { + // // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method + // Dispose(disposing: false); + // } + /// /// Disposes YoloScorer instance. /// public void Dispose() { - _inferenceSession?.Dispose(); + // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method + Dispose(disposing: true); + GC.SuppressFinalize(this); } } } diff --git a/src/modules/ObjectDetectionNet/install.bat b/src/modules/ObjectDetectionNet/install.bat index c9bd8d7c..dced1537 100644 --- a/src/modules/ObjectDetectionNet/install.bat +++ b/src/modules/ObjectDetectionNet/install.bat @@ -12,7 +12,7 @@ ) REM Read the version from the modulesettings.json file -call "!sdkScriptsPath!\utils.bat" GetVersionFromModuleSettings "modulesettings.json" "Version" +call "!sdkScriptsPath!\utils.bat" GetVersionFromModuleSettings "!modulePath!\modulesettings.json" "Version" set version=!jsonValue! :: Pull down the correct .NET image of ObjectDetectionNet based on this OS / GPU combo @@ -55,6 +55,7 @@ if errorlevel 1 exit /b 1 :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/ObjectDetectionNet/install.sh b/src/modules/ObjectDetectionNet/install.sh index 4a252f9f..4c9f24d3 100644 --- a/src/modules/ObjectDetectionNet/install.sh +++ b/src/modules/ObjectDetectionNet/install.sh @@ -49,7 +49,7 @@ fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -58,6 +58,8 @@ fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/ObjectDetectionNet/modulesettings.development.json b/src/modules/ObjectDetectionNet/modulesettings.development.json index 24b46e30..3e28389e 100644 --- a/src/modules/ObjectDetectionNet/modulesettings.development.json +++ b/src/modules/ObjectDetectionNet/modulesettings.development.json @@ -1,7 +1,7 @@ { "Modules": { "ObjectDetectionNet": { - "FilePath": "bin\\Debug\\net7.0\\ObjectDetectionNet.dll" + "FilePath": "bin\\Debug\\net7.0\\ObjectDetectionNet.dll" } } } diff --git a/src/modules/ObjectDetectionNet/modulesettings.json b/src/modules/ObjectDetectionNet/modulesettings.json index 4e0a4520..5580ba3c 100644 --- a/src/modules/ObjectDetectionNet/modulesettings.json +++ b/src/modules/ObjectDetectionNet/modulesettings.json @@ -5,7 +5,7 @@ "Modules": { "ObjectDetectionNet": { "Name": "Object Detection (YOLOv5 .NET)", - "Version": "1.2", + "Version": "1.5", // Publishing info "Description": "Provides Object Detection using YOLOv5 ONNX models with DirectML. This module is best for those on Windows and Linux without CUDA enabled GPUs", @@ -14,17 +14,20 @@ "LicenseUrl": "https://opensource.org/licenses/MIT", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-06-01" }, - { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "2.1" ], "ReleaseDate": "2023-03-20" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-09" } - ], + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-06-01" }, + { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1.0", "2.1.0" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1.0", "2.1.6" ], "ReleaseDate": "2023-04-09", "ReleaseNotes": "Corrected installer issues" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1.0", "2.1.6" ], "ReleaseDate": "2023-04-20", "ReleaseNotes": "Corrected module launch command" }, + { "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1.8", "2.1.8" ], "ReleaseDate": "2023-04-20", "ReleaseNotes": "Minor changes in module setup", "Importance": "Minor" }, + { "ModuleVersion": "1.5", "ServerVersionRange": [ "2.1.9", "" ], "ReleaseDate": "2023-05-04", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } + ], // Launch instructions "AutoStart": false, - "FilePath": "ObjectDetectionNet.dll", - "Runtime": "dotnet", - "RuntimeLocation": "Shared", // Can be Local or Shared. .NET so moot point here + "FilePath": "ObjectDetectionNet.dll", // The default for Linux / macOS + "Runtime": "dotnet", + "RuntimeLocation": "Shared", // Can be Local or Shared. .NET so moot point here // These are all optional. Defaults are usually fine "SupportGPU": true, @@ -34,7 +37,7 @@ "PostStartPauseSecs": 1, // 1 if using GPU, 0 for CPU // Deliberately not using the default queue: We make all Object detectors use the same queue. - "Queue": "objectdetection_queue", // default is lower(modulename) + "_queue" + "Queue": "objectdetection_queue", // default is lower(modulename) + "_queue" "EnvironmentVariables": { "MODEL_SIZE": "MEDIUM", diff --git a/src/modules/ObjectDetectionNet/modulesettings.linux.development.json b/src/modules/ObjectDetectionNet/modulesettings.linux.development.json deleted file mode 100644 index a3e7f4f8..00000000 --- a/src/modules/ObjectDetectionNet/modulesettings.linux.development.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "Modules": { - "ObjectDetectionNet": { - "FilePath": "bin/Debug/net7.0/ObjectDetectionNet.dll" - } - } -} diff --git a/src/modules/ObjectDetectionNet/modulesettings.macos.development.json b/src/modules/ObjectDetectionNet/modulesettings.macos.development.json deleted file mode 100644 index a3e7f4f8..00000000 --- a/src/modules/ObjectDetectionNet/modulesettings.macos.development.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "Modules": { - "ObjectDetectionNet": { - "FilePath": "bin/Debug/net7.0/ObjectDetectionNet.dll" - } - } -} diff --git a/src/modules/ObjectDetectionNet/modulesettings.windows.development.json b/src/modules/ObjectDetectionNet/modulesettings.windows.development.json new file mode 100644 index 00000000..c02a6f15 --- /dev/null +++ b/src/modules/ObjectDetectionNet/modulesettings.windows.development.json @@ -0,0 +1,7 @@ +{ + "Modules": { + "ObjectDetectionNet": { + "FilePath": "bin/Debug/net7.0/ObjectDetectionNet.exe" + } + } +} diff --git a/src/modules/ObjectDetectionNet/modulesettings.windows.json b/src/modules/ObjectDetectionNet/modulesettings.windows.json new file mode 100644 index 00000000..1ca2ea70 --- /dev/null +++ b/src/modules/ObjectDetectionNet/modulesettings.windows.json @@ -0,0 +1,8 @@ +{ + "Modules": { + "ObjectDetectionNet": { + "Runtime": "execute", + "FilePath": "ObjectDetectionNet.exe" + } + } +} diff --git a/src/modules/ObjectDetectionTFLite/ObjectDetectionTFLite.pyproj b/src/modules/ObjectDetectionTFLite/ObjectDetectionTFLite.pyproj index a61eac0b..5964e9d3 100644 --- a/src/modules/ObjectDetectionTFLite/ObjectDetectionTFLite.pyproj +++ b/src/modules/ObjectDetectionTFLite/ObjectDetectionTFLite.pyproj @@ -4,7 +4,7 @@ Debug ObjectDetectionTFLite 2.0 - {470d3417-36a4-49a4-b719-496477fa92fb} + {4c40a443-6a02-43f1-bd33-8f1a73349cda} objectdetection_tflite_adapter.py @@ -16,14 +16,12 @@ False False True - - + 10.0 - @@ -32,18 +30,13 @@ - - - - modulesettings.json - modulesettings.json @@ -51,7 +44,6 @@ modulesettings.json - @@ -78,6 +70,5 @@ X64 - \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/install.bat b/src/modules/ObjectDetectionTFLite/install.bat index 43a99428..ab5c4ee6 100644 --- a/src/modules/ObjectDetectionTFLite/install.bat +++ b/src/modules/ObjectDetectionTFLite/install.bat @@ -10,16 +10,19 @@ @goto:eof ) -REM "%modulePath%\install_coral.bat" +call "!sdkScriptsPath!\utils.bat" WriteLine "*** You need to run !modulePath!\install_coral.bat to complete this process" "!color_info!" +rem Needs admin permissions, so can't run via server install +rem "%modulePath%\install_coral.bat" REM Python setup call "%sdkScriptsPath%\utils.bat" SetupPython 3.9 "Local" if errorlevel 1 exit /b 1 -call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.9 "%modulePath%" "Local" +REM Do SDK first, since it's a little fussy +call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.9 "%absoluteAppRootDir%\SDK\Python" "Local" if errorlevel 1 exit /b 1 -call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.9 "%absoluteAppRootDir%\SDK\Python" "Local" +call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.9 "%modulePath%" "Local" if errorlevel 1 exit /b 1 :: Download the MobileNet TFLite models and store in /assets @@ -41,6 +44,7 @@ if errorlevel 1 exit /b 1 :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/ObjectDetectionTFLite/install.sh b/src/modules/ObjectDetectionTFLite/install.sh index 511b6309..23c7b652 100644 --- a/src/modules/ObjectDetectionTFLite/install.sh +++ b/src/modules/ObjectDetectionTFLite/install.sh @@ -1,8 +1,8 @@ # Development mode setup script :::::::::::::::::::::::::::::::::::::::::::::: # -# ObjectDetection (Coral) +# ObjectDetection (TFLite) # -# This script is called from the ObjectDetectionCoral directory using: +# This script is called from the ObjectDetectionTFLite directory using: # # bash ../../setup.sh # @@ -14,28 +14,45 @@ if [ "$1" != "install" ]; then exit 1 fi -# We no longer try installing the Coral libraries directly. They are no longer supported and -# Tensorflow provide access to the Coral TPU directly -# source "${modulePath}/install_coral.sh" - -if [ $(uname -n) == "raspberrypi" ]; then +if [ "${systemName}" == "Raspberry Pi" ] || [ "${systemName}" == "Orange Pi" ] || \ + [ "${systemName}" == "Jetson" ]; then if [[ $EUID -ne 0 ]]; then writeLine "=================================================================================" $color_error writeLine "Please run: sudo apt install libopenblas-dev libblas-dev m4 cmake cython python3-dev python3-yaml python3-setuptools " $color_info writeLine "to complete the setup for ObjectDetectionTFLite" $color_info writeLine "=================================================================================" $color_error else - sudo apt install libopenblas-dev libblas-dev m4 cmake cython python3-dev python3-yaml python3-setuptools + sudo apt install libopenblas-dev libblas-dev m4 cmake cython python3-dev python3-yaml python3-setuptools -y fi fi if [ "$os" == "linux" ]; then - apt-get install curl -y + write "Ensuring curl is installed (just in case)..." $color_mute + apt-get install curl -y >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" # Add the Debian package repository to your system echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + + if [ ! -d "${downloadPath}" ]; then mkdir -p "${downloadPath}"; fi + if [ ! -d "${downloadPath}/Coral" ]; then mkdir -p "${downloadPath}/Coral"; fi + pushd "${downloadPath}/Coral" >/dev/null 2>/dev/null + + write "Downloading signing keys..." $color_mute + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg -s --output apt-key.gpg >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" + + write "Installing signing keys..." $color_mute + # NOTE: 'add' is deprecated. We should, instead, name apt-key.gpg as coral.ai-apt-key.gpg and + # place it directly in the /etc/apt/trusted.gpg.d/ directory + sudo apt-key add apt-key.gpg >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" + + popd "${downloadPath}/Coral" >/dev/null 2>/dev/null if [[ $EUID -ne 0 ]]; then writeLine "=================================================================================" $color_error @@ -44,7 +61,12 @@ if [ "$os" == "linux" ]; then writeLine "=================================================================================" $color_error else # Install the Edge TPU runtime (standard, meaning half speed, or max, meaning full speed) - sudo apt-get update && apt-get install libedgetpu1-std + write "Installing libedgetpu1-std (the non-desk-melting version of libedgetpu1)..." $color_mute + sudo apt-get update -y >/dev/null 2>/dev/null & + spin $! + sudo apt-get install libedgetpu1-std -y >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" # BE CAREFUL. If you want your TPU to go to 11 and choose 'max' you may burn a hole in your desk # sudo apt-get update && apt-get install libedgetpu1-max @@ -76,7 +98,7 @@ if [ $? -ne 0 ]; then quit 1; fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -85,6 +107,8 @@ if [ $? -ne 0 ]; then quit 1; fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/ObjectDetectionTFLite/install/edgetpu_runtime.zip b/src/modules/ObjectDetectionTFLite/install/edgetpu_runtime.zip new file mode 100644 index 00000000..84a42030 Binary files /dev/null and b/src/modules/ObjectDetectionTFLite/install/edgetpu_runtime.zip differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/wheels/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl b/src/modules/ObjectDetectionTFLite/install/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl similarity index 100% rename from src/modules/ObjectDetectionTFLite/third_party/wheels/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl rename to src/modules/ObjectDetectionTFLite/install/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl diff --git a/src/modules/ObjectDetectionTFLite/install_coral.bat b/src/modules/ObjectDetectionTFLite/install_coral.bat index 453898f7..091c1de3 100644 --- a/src/modules/ObjectDetectionTFLite/install_coral.bat +++ b/src/modules/ObjectDetectionTFLite/install_coral.bat @@ -23,25 +23,40 @@ call "!sdkScriptsPath!\utils.bat" setESC rem echo modulePath = %modulePath% rem echo sdkScriptsPath = %sdkScriptsPath% +call "!sdkScriptsPath!\utils.bat" WriteLine "Extracting EdgeTPU setup files" "!color_info!" +pushd install +mkdir edgetpu +tar -xf edgetpu_runtime.zip -C edgetpu + +:: We'll just use the existing install.bat that comes with the edgeTPU install instead of doing it +:: ourselves +call "!sdkScriptsPath!\utils.bat" WriteLine "Installing EdgeTPU support" "!color_info!" +pushd edgetpu +install.bat + +popd +popd + +call "!sdkScriptsPath!\utils.bat" WriteLine "Done" + + REM Coral TPU setup -call "!sdkScriptsPath!\utils.bat" WriteLine "Installing UsbDk ==============================================" "!color_info!" -REM See https://pi3g.com/2022/10/19/coral-usb-inference-not-working-on-windows-10-valueerror-failed-to-load-delegate-from-edgetpu-dll/ -REM start /wait msiexec /i "%modulePath%\third_party\usbdk\UsbDk_1.0.22_x64.msi" /qb! /norestart -start /wait msiexec /i "%modulePath%\third_party\usbdk\UsbDk_1.0.21_x64.msi" /qb! /norestart -call "!sdkScriptsPath!\utils.bat" WriteLine - -call "!sdkScriptsPath!\utils.bat" WriteLine "Installing Windows drivers ====================================" "!color_info!" -pnputil /add-driver "%modulePath%\third_party\coral_accelerator_windows\*.inf" /install -call "!sdkScriptsPath!\utils.bat" WriteLine - -call "!sdkScriptsPath!\utils.bat" WriteLine "Installing performance counters ===============================" "!color_info!" -lodctr /M:"%modulePath%\third_party\coral_accelerator_windows\coral.man" -call "!sdkScriptsPath!\utils.bat" WriteLine -call "!sdkScriptsPath!\utils.bat" WriteLine - -call "!sdkScriptsPath!\utils.bat" WriteLine "Copying edgetpu and libusb to System32 ========================" "!color_info!" - -rem copy "%workingDir%\third_party\libedgetpu\throttled\x64_windows\edgetpu.dll" %systemroot%\system32 -copy "%modulePath%\third_party\libedgetpu\direct\x64_windows\edgetpu.dll" "%systemroot%\system32\" -copy "%modulePath%\third_party\libusb_win\libusb-1.0.dll" "%systemroot%\system32\" -call "!sdkScriptsPath!\utils.bat" WriteLine +REM call "!sdkScriptsPath!\utils.bat" WriteLine "Installing UsbDk ==============================================" "!color_info!" +REM start /wait msiexec /i "%modulePath%\third_party\usbdk\UsbDk_1.0.21_x64.msi" /qb! /norestart +REM call "!sdkScriptsPath!\utils.bat" WriteLine + +REM call "!sdkScriptsPath!\utils.bat" WriteLine "Installing Windows drivers ====================================" "!color_info!" +REM pnputil /add-driver "%modulePath%\third_party\coral_accelerator_windows\*.inf" /install +REM call "!sdkScriptsPath!\utils.bat" WriteLine + +REM call "!sdkScriptsPath!\utils.bat" WriteLine "Installing performance counters ===============================" "!color_info!" +REM lodctr /M:"%modulePath%\third_party\coral_accelerator_windows\coral.man" +REM call "!sdkScriptsPath!\utils.bat" WriteLine +REM call "!sdkScriptsPath!\utils.bat" WriteLine + +REM call "!sdkScriptsPath!\utils.bat" WriteLine "Copying edgetpu and libusb to System32 ========================" "!color_info!" + +REM copy "%workingDir%\third_party\libedgetpu\throttled\x64_windows\edgetpu.dll" %systemroot%\system32 +REM copy "%modulePath%\third_party\libedgetpu\direct\x64_windows\edgetpu.dll" "%systemroot%\system32\" +REM copy "%modulePath%\third_party\libusb_win\libusb-1.0.dll" "%systemroot%\system32\" +REM call "!sdkScriptsPath!\utils.bat" WriteLine \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/install_coral.sh b/src/modules/ObjectDetectionTFLite/install_coral.sh deleted file mode 100644 index 04160b7e..00000000 --- a/src/modules/ObjectDetectionTFLite/install_coral.sh +++ /dev/null @@ -1,35 +0,0 @@ -# -# Install Coral Drivers -# -# bash install_coral.sh -# - - -# Setup the Coral libraries -if [[ $OSTYPE == 'darwin'* ]]; then - - curl -LO https://github.com/google-coral/libedgetpu/releases/download/release-grouper/edgetpu_runtime_20221024.zip - mv edgetpu_runtime_20221024.zip ../../downloads/ObjectDetectionTFLite/. - pushd ../../downloads/ObjectDetectionTFLite/ >/dev/null - unzip edgetpu_runtime_20221024.zip - cd edgetpu_runtime - sudo bash install.sh - popd >/dev/null - -else - - if [ $(uname -n) == "raspberrypi" ]; then - sudo apt install libopenblas-dev libblas-dev m4 cmake cython python3-dev python3-yaml python3-setuptools - fi - - # Add the Debian package repository to your system - echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - - sudo apt-get update - - # Install the Edge TPU runtime (standard, meaning half speed, or max, meaning full speed. - # BE CAREFUL. If you want your TPU to go to 11 and choose 'max' you may burn a hole in your desk - sudo apt-get install libedgetpu1-std - # sudo apt-get install libedgetpu1-max - -fi \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/modulesettings.docker.build.json b/src/modules/ObjectDetectionTFLite/modulesettings.docker.build.json index 777139ad..7228f552 100644 --- a/src/modules/ObjectDetectionTFLite/modulesettings.docker.build.json +++ b/src/modules/ObjectDetectionTFLite/modulesettings.docker.build.json @@ -14,7 +14,7 @@ */ // This NEEDS to be 'shared' for docker pre-installed - "RuntimeLocation": "Shared", // Can be Local or Shareds + "RuntimeLocation": "Shared", // Can be Local or Shared "PreInstalled": "true" } diff --git a/src/modules/ObjectDetectionTFLite/modulesettings.docker.build.rpi64.json b/src/modules/ObjectDetectionTFLite/modulesettings.docker.build.rpi64.json index 2810c77b..f4bc9513 100644 --- a/src/modules/ObjectDetectionTFLite/modulesettings.docker.build.rpi64.json +++ b/src/modules/ObjectDetectionTFLite/modulesettings.docker.build.rpi64.json @@ -14,7 +14,7 @@ */ // This NEEDS to be 'shared' for docker pre-installed - "RuntimeLocation": "Shared", // Can be Local or Shareds + "RuntimeLocation": "Shared", // Can be Local or Shared // This is usable on an RPi, but it needs better modules, and hopefully Coral.AI device, to // be truly fast diff --git a/src/modules/ObjectDetectionTFLite/modulesettings.json b/src/modules/ObjectDetectionTFLite/modulesettings.json index ccab16ae..03627f8c 100644 --- a/src/modules/ObjectDetectionTFLite/modulesettings.json +++ b/src/modules/ObjectDetectionTFLite/modulesettings.json @@ -2,19 +2,21 @@ "Modules": { "ObjectDetectionTFLite": { "Name": "ObjectDetection (TF-Lite)", - "Version": "1.2", + "Version": "1.4", // Publishing info - "Description": "The object detection module Tensorflow Lite to locate and classify the objects the models have been trained on.", - "Platforms": [ "windows", "linux", "linux-arm64", "macos", "macos-arm64" ], // model issues on Windows. Invoke hangs. M1 install not fixed yet + "Description": "The object detection module uses Tensorflow Lite to locate and classify the objects the models have been trained on.", + "Platforms": [ "windows", "linux", "linux-arm64", "macos", "macos-arm64" ], // model issues on Windows. Invoke hangs. "License": "Apache-2.0", "LicenseUrl": "https://opensource.org/licenses/Apache-2.0", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-10" }, - { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "2.1" ], "ReleaseDate": "2023-04-03" }, - { "ModuleVersion": "1.0", "ServerVersionRange": [ "2.1", "2.1" ], "ReleaseDate": "2023-03-20" } + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" }, + { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-03" }, + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-10" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-10", "ReleaseNotes": "Updated Windows installer" }, + { "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-17", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } ], // Launch instructions diff --git a/src/modules/ObjectDetectionTFLite/modulesettings.windows.json b/src/modules/ObjectDetectionTFLite/modulesettings.windows.json deleted file mode 100644 index c8c0da4c..00000000 --- a/src/modules/ObjectDetectionTFLite/modulesettings.windows.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "Modules": { - "ObjectDetectionTFLite": { - "Command": "%CURRENT_MODULE_PATH%\\bin\\%OS%\\python39\\venv\\Scripts\\python" - } - } -} diff --git a/src/modules/ObjectDetectionTFLite/objectdetection_tflite.py b/src/modules/ObjectDetectionTFLite/objectdetection_tflite.py index 44aea548..1ab5eb0a 100644 --- a/src/modules/ObjectDetectionTFLite/objectdetection_tflite.py +++ b/src/modules/ObjectDetectionTFLite/objectdetection_tflite.py @@ -21,6 +21,7 @@ import argparse import collections +from datetime import datetime import time import cv2 @@ -32,10 +33,12 @@ from PIL import Image from PIL import ImageDraw +interpreter_lifespan_secs = 3600 # Refresh the interpreter once an hour -interpreter = None -labels = None -edge_tpu = False +interpreter = None # The model interpreter +interpreter_created = None # When was the interpreter created? +labels = None # set of labels for this model +edge_tpu = False # Are we using a TPU? Object = collections.namedtuple('Object', ['label', 'score', 'bbox']) @@ -86,6 +89,7 @@ def get_output(interpreter, score_threshold): def init_detect(options: Options): global interpreter + global interpreter_created global labels global edge_tpu @@ -94,6 +98,7 @@ def init_detect(options: Options): # Initialize TF-Lite interpreter. (interpreter, edge_tpu) = make_interpreter(options.model_tpu_file, options.model_cpu_file, options.num_threads) + interpreter_created = datetime.now() # Get input and output tensors. input_details = interpreter.get_input_details() @@ -101,22 +106,32 @@ def init_detect(options: Options): interpreter.allocate_tensors() - print(f"Input details: {input_details[0]}\n") - print(f"Output details: {output_details[0]}\n") + print(f"Debug: Input details: {input_details[0]}\n") + print(f"Debug: Output details: {output_details[0]}\n") # Read label and generate random colors. labels = read_label_file(options.label_file) if options.label_file else None -def do_detect(img: Image, score_threshold: float = 0.5): +def do_detect(options: Options, img: Image, score_threshold: float = 0.5): + + global interpreter + global interpreter_created w,h = img.size - print("Input(height, width): ", h, w) + print("Debug: Input(height, width): ", h, w) numpy_image = np.array(img) input_im = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2RGB) + # Once an hour, refresh the interpreter + seconds_since_created = (datetime.now() - interpreter_created).total_seconds() + if seconds_since_created > interpreter_lifespan_secs: + print("Info: Refreshing the Tensorflow Interpreter") + interpreter = None + init_detect(options) + _, height, width, channel = interpreter.get_input_details()[0]["shape"] resize_im = cv2.resize(input_im, (width, height)) # resize_im = resize_im / 127.5 -1. diff --git a/src/modules/ObjectDetectionTFLite/objectdetection_tflite_adapter.py b/src/modules/ObjectDetectionTFLite/objectdetection_tflite_adapter.py index b9fcd77f..519b51c4 100644 --- a/src/modules/ObjectDetectionTFLite/objectdetection_tflite_adapter.py +++ b/src/modules/ObjectDetectionTFLite/objectdetection_tflite_adapter.py @@ -29,12 +29,19 @@ def initialise(self) -> None: if not self.launched_by_server: self.queue_name = "objectdetection_queue" + # No luck on Windows so far + import platform + if platform.system() == "Windows": + self.support_GPU = False + if self.support_GPU: self.support_GPU = self.hasCoralTPU if self.support_GPU: print("Edge TPU detected") self.execution_provider = "TPU" + else: + opts.model_tpu_file = None # disable TPU init_detect(opts) @@ -73,7 +80,7 @@ def do_detection(self, img: any, score_threshold: float): # you are using raw data access. if not sem.acquire(timeout=1): return { - "success" : "false", + "success" : False, "predictions" : [], "message" : "The interpreter is in use. Please try again later", "count" : 0, @@ -81,7 +88,7 @@ def do_detection(self, img: any, score_threshold: float): "inferenceMs" : 0 } - result = do_detect(img, score_threshold) + result = do_detect(opts, img, score_threshold) sem.release() predictions = result["predictions"] @@ -111,7 +118,7 @@ def do_detection(self, img: any, score_threshold: float): except Exception as ex: # await self.report_error_async(ex, __file__) self.report_error(ex, __file__) - return { "success": False, "error": "Error occured on the server"} + return { "success": False, "error": "Error occurred on the server"} if __name__ == "__main__": diff --git a/src/modules/ObjectDetectionTFLite/options.py b/src/modules/ObjectDetectionTFLite/options.py index ece4de44..138125a5 100644 --- a/src/modules/ObjectDetectionTFLite/options.py +++ b/src/modules/ObjectDetectionTFLite/options.py @@ -72,8 +72,8 @@ def __init__(self): # dump the important variables if self._show_env_variables: - print(f"MODULE_PATH: {self.module_path}") - print(f"MODELS_DIR: {self.models_dir}") - print(f"MODEL_SIZE: {self.model_size}") - print(f"CPU_MODEL_NAME: {self.cpu_model_name}") - print(f"TPU_MODEL_NAME: {self.tpu_model_name}") + print(f"Debug: MODULE_PATH: {self.module_path}") + print(f"Debug: MODELS_DIR: {self.models_dir}") + print(f"Debug: MODEL_SIZE: {self.model_size}") + print(f"Debug: CPU_MODEL_NAME: {self.cpu_model_name}") + print(f"Debug: TPU_MODEL_NAME: {self.tpu_model_name}") diff --git a/src/modules/ObjectDetectionTFLite/requirements.linux.arm64.txt b/src/modules/ObjectDetectionTFLite/requirements.linux.arm64.txt index 7b69d5e2..17248c45 100644 --- a/src/modules/ObjectDetectionTFLite/requirements.linux.arm64.txt +++ b/src/modules/ObjectDetectionTFLite/requirements.linux.arm64.txt @@ -3,15 +3,12 @@ # Install Tensorflow. One way or another... # tensorflow # Installing Tensorflow, the open source machine learning framework for everyone # tflite-runtime==2.5.0.post1 # Installing Tensorflow Lite -# ./third_party/wheels/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl # Installing Tensorflow Lite -https://github.com/google-coral/pycoral/releases/download/v2.0.0/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl +https://github.com/google-coral/pycoral/releases/download/v2.0.0/tflite_runtime-2.5.0.post1-cp39-cp39-linux_aarch64.whl # Installing Tensorflow Lite -# numpy>=1.16.0 # Installing NumPy, the fundamental package for array computing with Python. numpy # Installing NumPy, a package for scientific computing -Pillow>=4.0.0 # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library -#OpenCV-Python # Installing OpenCV, the Open source Computer Vision library # See https://raspberrypi-guide.github.io/programming/install-opencv opencv-python==4.5.3.56 # Installing OpenCV, the Open source Computer Vision library diff --git a/src/modules/ObjectDetectionTFLite/requirements.linux.txt b/src/modules/ObjectDetectionTFLite/requirements.linux.txt index 30b19840..4a3b7dc1 100644 --- a/src/modules/ObjectDetectionTFLite/requirements.linux.txt +++ b/src/modules/ObjectDetectionTFLite/requirements.linux.txt @@ -1,11 +1,12 @@ #! Python3.8 tensorflow # Installing Tensorflow, the open source machine learning framework for everyone +# tflite-runtime # numpy>=1.16.0 # Installing NumPy, the fundamental package for array computing with Python. numpy==1.16.2 # Installing NumPy, a package for scientific computing -Pillow>=4.0.0 # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library OpenCV-Python # Installing OpenCV, the Open source Computer Vision library # tflite-runtime==2.5.0.post1 # Installing Tensorflow Lite diff --git a/src/modules/ObjectDetectionTFLite/requirements.macos.arm64.txt b/src/modules/ObjectDetectionTFLite/requirements.macos.arm64.txt index adebebca..79661dd5 100644 --- a/src/modules/ObjectDetectionTFLite/requirements.macos.arm64.txt +++ b/src/modules/ObjectDetectionTFLite/requirements.macos.arm64.txt @@ -13,7 +13,7 @@ numpy==1.16.2 # Installing NumPy, a package for scientific compu # numpy -Pillow>=4.0.0 # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library OpenCV-Python # Installing OpenCV, the Open source Computer Vision library diff --git a/src/modules/ObjectDetectionTFLite/requirements.macos.txt b/src/modules/ObjectDetectionTFLite/requirements.macos.txt index 05e8e733..6a076e34 100644 --- a/src/modules/ObjectDetectionTFLite/requirements.macos.txt +++ b/src/modules/ObjectDetectionTFLite/requirements.macos.txt @@ -8,7 +8,7 @@ tensorflow # Installing Tensorflow, the open source machine l # numpy>=1.16.0 # Installing NumPy, the fundamental package for array computing with Python. numpy==1.16.2 # Installing NumPy, a package for scientific computing -Pillow>=4.0.0 # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library OpenCV-Python # Installing OpenCV, the Open source Computer Vision library # tflite-runtime==2.5.0.post1 # Installing Tensorflow Lite diff --git a/src/modules/ObjectDetectionTFLite/requirements.txt b/src/modules/ObjectDetectionTFLite/requirements.txt index 0901d879..9c3f65dc 100644 --- a/src/modules/ObjectDetectionTFLite/requirements.txt +++ b/src/modules/ObjectDetectionTFLite/requirements.txt @@ -1,13 +1,19 @@ #! Python3.8 +urllib3<2.0 # Installing urllib3, the HTTP client for Python + # For edge devices only # tflite-runtime==2.5.0.post1 # Installing Tensorflow Lite +# IF we wish to try a coral-specific solution then +# Use with https://github.com/google-coral/pycoral/blob/master/examples/detect_image.py +# https://github.com/google-coral/pycoral/releases/download/v2.0.0/pycoral-2.0.0-cp36-cp36m-win_amd64.whl + # Tensorflow 2.10 was the last version that supported GPU on windows -tensorflow==2.10 # Installing Tensorflow +tensorflow==2.10 # Installing Tensorflow -numpy # Installing NumPy, a package for scientific computing -Pillow>=4.0.0 # Installing Pillow, a Python Image Library -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +numpy # Installing NumPy, a package for scientific computing +Pillow<10.0.0 # Installing Pillow, a Python Image Library +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library # last line empty \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/tflite_util.py b/src/modules/ObjectDetectionTFLite/tflite_util.py index 62e86caa..158949b7 100644 --- a/src/modules/ObjectDetectionTFLite/tflite_util.py +++ b/src/modules/ObjectDetectionTFLite/tflite_util.py @@ -15,26 +15,25 @@ from typing import Tuple import numpy as np -def make_interpreter(model_file: str, cpu_model_file: str = None, num_of_threads: int = 1) -> Tuple[any, bool]: +def make_interpreter(tpu_model_file: str, cpu_model_file: str = None, + num_of_threads: int = 1) -> Tuple[any, bool]: """ make tf-lite interpreter. - If model_file is provided, but no cpu_model_file, then we assume the caller has - determined the libraries and hardware that is available and has supplied a - suitable file. Otherwise, this method will assume the model file is an edgetpu - model but will sniff libraries and hardware and fallback to cpu_model_file if - edge TPU support isn't available. + If tpu_model_file is provided, but no cpu_model_file, then we assume the + caller has determined the libraries and hardware that is available and has + supplied a suitable file. Otherwise, this method will assume the model file + is an edgetpu model but will sniff libraries and hardware and fallback to + cpu_model_file if edge TPU support isn't available. Args: - model_file: Model file path. + tpu_model_file: Model file path for TPUs. + cpu_model_file: Model file path for CPUs. num_of_threads: Num of threads. - delegate_library: Delegate file path. Return: tf-lite interpreter. """ - cpu_model_file = cpu_model_file or model_file - # First determine if we have TensorFlow-Lite runtime installed, or the whole Tensorflow # In either case we're looking to load TFLite models try: @@ -49,28 +48,37 @@ def make_interpreter(model_file: str, cpu_model_file: str = None, num_of_threads # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu delegates = None - # On Windows, the interpreter.__init__ method accepts experimental delegates. - # These are used in self._interpreter.ModifyGraphWithDelegate, which fails on Windows - try: - import platform - if platform.system() != "Windows": + # Only try and load delegates if we're trying to use a TPU + if tpu_model_file: + try: + import platform delegate = { 'Linux': 'libedgetpu.so.1', 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] delegates = [load_delegate(delegate)] - except Exception as ex: - pass - - edge_tpu = False - if delegates: - # TensorFlow-Lite loading a TF-Lite TPU model - interpreter = Interpreter(model_path=model_file, experimental_delegates=delegates) - edge_tpu = True + except Exception as ex: + pass + + interpreter = None + edge_tpu = False + + if delegates and tpu_model_file: + try: + # TensorFlow-Lite loading a TF-Lite TPU model + # CRASH: On Windows, the interpreter.__init__ method accepts experimental + # delegates. These are used in self._interpreter.ModifyGraphWithDelegate, + # which fails on Windows + interpreter = Interpreter(model_path=tpu_model_file, experimental_delegates=delegates) + edge_tpu = True + except Exception as ex: + # Fall back + if cpu_model_file: + interpreter = Interpreter(model_path=cpu_model_file) else: # TensorFlow loading a TF-Lite CPU model - interpreter = Interpreter(model_path=cpu_model_file) - edge_tpu = False + if cpu_model_file: + interpreter = Interpreter(model_path=cpu_model_file) return (interpreter, edge_tpu) diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator.cat b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator.cat deleted file mode 100644 index 4e95c615..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator.cat and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator.inf b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator.inf deleted file mode 100644 index 017db1c1..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator.inf +++ /dev/null @@ -1,56 +0,0 @@ -; WinUSB-based INF. See below URL for more details. -; https://docs.microsoft.com/en-us/windows-hardware/drivers/usbcon/winusb-installation#inf - -[Version] -Signature = "$Windows NT$" -Class = USBDevice -ClassGUID = {88BAE032-5A81-49f0-BC3D-A4FF138216D6} -Provider = %ManufacturerName% -CatalogFile = Coral_USB_Accelerator.cat -DriverVer = 09/23/2019,11.25.00.000 - -[Manufacturer] -%ManufacturerName% = Standard,NTamd64 - -[Standard.NTamd64] -%DeviceName% = USB_Install, USB\VID_18D1&PID_9302 - -[USB_Install] -Include = winusb.inf -Needs = WINUSB.NT - -[USB_Install.Services] -Include = winusb.inf -Needs = WINUSB.NT.Services - -[USB_Install.HW] -AddReg = Dev_AddReg - -[Dev_AddReg] -HKR,,DeviceInterfaceGUIDs,0x10000,"{01152DA2-8235-43B7-A67C-28FF9BD3F8A4}" - -[USB_Install.CoInstallers] -AddReg = CoInstallers_AddReg -CopyFiles = CoInstallers_CopyFiles - -[CoInstallers_AddReg] -HKR,,CoInstallers32,0x00010000,"WdfCoInstaller01009.dll,WdfCoInstaller","WinUsbCoInstaller2.dll" - -[CoInstallers_CopyFiles] -WinUsbCoInstaller2.dll -WdfCoInstaller01009.dll - -[DestinationDirs] -CoInstallers_CopyFiles = 11 - -[SourceDisksNames] -1 = %DiskName% - -[SourceDisksFiles.amd64] -WinUsbCoInstaller2.dll = 1,amd64 -WdfCoInstaller01009.dll = 1,amd64 - -[Strings] -ManufacturerName = "Google" -DeviceName = "Coral USB Accelerator" -DiskName = "Coral USB Accelerator Install Disk" \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator_(DFU).cat b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator_(DFU).cat deleted file mode 100644 index a977e645..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator_(DFU).cat and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator_(DFU).inf b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator_(DFU).inf deleted file mode 100644 index a5bc268b..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/Coral_USB_Accelerator_(DFU).inf +++ /dev/null @@ -1,56 +0,0 @@ -; WinUSB-based INF. See below URL for more details. -; https://docs.microsoft.com/en-us/windows-hardware/drivers/usbcon/winusb-installation#inf - -[Version] -Signature = "$Windows NT$" -Class = USBDevice -ClassGUID = {88BAE032-5A81-49f0-BC3D-A4FF138216D6} -Provider = %ManufacturerName% -CatalogFile = Coral_USB_Accelerator_(DFU).cat -DriverVer = 09/23/2019,11.25.00.000 - -[Manufacturer] -%ManufacturerName% = Standard,NTamd64 - -[Standard.NTamd64] -%DeviceName% = USB_Install, USB\VID_1A6E&PID_089A - -[USB_Install] -Include = winusb.inf -Needs = WINUSB.NT - -[USB_Install.Services] -Include = winusb.inf -Needs = WINUSB.NT.Services - -[USB_Install.HW] -AddReg = Dev_AddReg - -[Dev_AddReg] -HKR,,DeviceInterfaceGUIDs,0x10000,"{3413A96E-B1D1-4026-961B-6082E4DD4947}" - -[USB_Install.CoInstallers] -AddReg = CoInstallers_AddReg -CopyFiles = CoInstallers_CopyFiles - -[CoInstallers_AddReg] -HKR,,CoInstallers32,0x00010000,"WdfCoInstaller01009.dll,WdfCoInstaller","WinUsbCoInstaller2.dll" - -[CoInstallers_CopyFiles] -WinUsbCoInstaller2.dll -WdfCoInstaller01009.dll - -[DestinationDirs] -CoInstallers_CopyFiles = 11 - -[SourceDisksNames] -1 = %DiskName% - -[SourceDisksFiles.amd64] -WinUsbCoInstaller2.dll = 1,amd64 -WdfCoInstaller01009.dll = 1,amd64 - -[Strings] -ManufacturerName = "Google" -DeviceName = "Coral USB Accelerator (DFU)" -DiskName = "Coral USB Accelerator Install Disk" \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/WdfCoInstaller01009.dll b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/WdfCoInstaller01009.dll deleted file mode 100644 index 1731b962..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/WdfCoInstaller01009.dll and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/license.rtf b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/license.rtf deleted file mode 100644 index 00269419..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/license.rtf +++ /dev/null @@ -1,103 +0,0 @@ -{\rtf1\ansi\ansicpg1252\deff0\nouicompat\deflang1033\deflangfe1033{\fonttbl{\f0\fswiss\fprq2\fcharset0 Tahoma;}{\f1\froman\fprq2\fcharset2 Symbol;}} -{\colortbl ;\red0\green0\blue255;\red255\green0\blue0;\red0\green0\blue0;} -{\stylesheet{ Normal;}{\s1 heading 1;}{\s2 heading 2;}{\s3 heading 3;}} -{\*\generator Riched20 6.2.8426}{\*\mmathPr\mnaryLim0\mdispDef1\mwrapIndent1440 }\viewkind4\uc1 -\pard\nowidctlpar\sb120\sa120\b\f0\fs20 MICROSOFT SOFTWARE LICENSE TERMS\par - -\pard\brdrb\brdrs\brdrw10\brsp20 \nowidctlpar\sb120\sa120 MICROSOFT WINDOWS DRIVER FRAMEWORKS 1.11\par - -\pard\nowidctlpar\sb120\sa120\b0 These license terms are an agreement between Microsoft Corporation (or based on where you live, one of its affiliates) and you. Please read them. They apply to the software named above, which includes the media on which you received it, if any. The terms also apply to any Microsoft\par - -\pard\nowidctlpar\fi-360\li360\sb120\sa120\f1\'b7\tab\f0 updates,\par -\f1\'b7\tab\f0 supplements,\par -\f1\'b7\tab\f0 Internet-based services, and\par -\f1\'b7\tab\f0 support services\par - -\pard\nowidctlpar\sb120\sa120 for this software, unless other terms accompany those items. If so, those terms apply.\par -\b By using the software, you accept these terms. If you do not accept them, do not use the software.\par - -\pard\brdrt\brdrs\brdrw10\brsp20 \nowidctlpar\sb120\sa120 If you comply with these license terms, you have the perpetual rights below.\par - -\pard\nowidctlpar\fi-357\li357\sb120\sa120 1.\tab INSTALLATION AND USE RIGHTS. \b0\par - -\pard\widctlpar\fi-363\li720\sb120\sa120\b a.\tab\b0 One user may install and use any number of copies of the software on your devices\b\fs19 \b0\fs20 to design, develop and test your programs.\par -\b\lang9 b.\tab Build Server List.\~ \b0 The software contains certain components that are identified in the Build Server List located at http://go.microsoft.com/fwlink/?LinkId=249022.\~\~ You may install copies of the files listed in it, onto your build machines, solely for the purpose of compiling, building, verifying and archiving your programs.\~ These components may only be used in order to create and configure build systems internal to your organization to support your internal build environment.\~ These\~components\cf2 \cf0 do not provide external distribution rights to any of the software or enable you to provide a build environment as a service to third parties.\~We may add additional files to this list from time to time.\~\~ \lang1033\par - -\pard\nowidctlpar\fi-357\li357\sb120\sa120\b 2.\tab ADDITIONAL LICENSING REQUIREMENTS AND/OR USE RIGHTS.\par - -\pard\nowidctlpar\fi-363\li720\sb120\sa120 a.\tab Distributable Code.\b0 The software contains code that you are permitted to distribute in programs you develop if you comply with the terms below.\par - -\pard\nowidctlpar\s3\fi-357\li1077\sb120\sa120\b i.\tab Right to Use and Distribute. The code and text files listed below are \ldblquote Distributable Code.\rdblquote\par - -\pard\nowidctlpar\fi-358\li1435\sb120\sa120\cf3\b0\f1\'b7\tab\cf0\ul\f0 REDIST.TXT Files\ulnone . You may copy and distribute the object code form of the code listed in REDIST.TXT files.\par -\cf3\f1\'b7\tab\cf0\ul\f0 Third Party Distribution\ulnone . You may permit distributors of your programs to copy and distribute the Distributable Code as part of those programs.\par - -\pard\nowidctlpar\s3\fi-357\li1077\sb120\sa120\b ii.\tab Distribution Requirements. For any Distributable Code you distribute, you must\par - -\pard\nowidctlpar\fi-358\li1435\sb120\sa120\cf3\b0\f1\'b7\tab\cf0\f0 add significant primary functionality to it in your programs;\par -\cf3\f1\'b7\tab\cf0\f0 for any Distributable Code having a filename extension of .lib, distribute only the results of running such Distributable Code through a linker with your program;\par -\cf3\f1\'b7\tab\cf0\f0 distribute Distributable Code included in a setup program only as part of that setup program without modification;\par -\cf3\f1\'b7\tab\cf0\f0 require distributors and external end users to agree to terms that protect it at least as much as this agreement; \par -\cf3\f1\'b7\tab\cf0\f0 display your valid copyright notice on your programs; and\par -\cf3\f1\'b7\tab\cf0\f0 indemnify, defend, and hold harmless Microsoft from any claims, including attorneys\rquote fees, related to the distribution or use of your programs.\par - -\pard\nowidctlpar\s3\fi-357\li1077\sb120\sa120\b iii.\tab Distribution Restrictions. You may not\par - -\pard\nowidctlpar\fi-358\li1435\sb120\sa120\cf3\b0\f1\'b7\tab\cf0\f0 alter any copyright, trademark or patent notice in the Distributable Code;\par -\cf3\f1\'b7\tab\cf0\f0 use Microsoft\rquote s trademarks in your programs\rquote names or in a way that suggests your programs come from or are endorsed by Microsoft;\par -\cf3\f1\'b7\tab\cf0\f0 distribute Distributable Code to run on a platform other than the Windows platform;\par -\cf3\f1\'b7\tab\cf0\f0 include Distributable Code in malicious, deceptive or unlawful programs; or\par -\cf3\f1\'b7\tab\cf0\f0 modify or distribute the source code of any Distributable Code so that any part of it becomes subject to an Excluded License. An Excluded License is one that requires, as a condition of use, modification or distribution, that\par - -\pard\nowidctlpar\fi-357\li1792\sb120\sa120\f1\'b7\tab\f0 the code be disclosed or distributed in source code form; or\par -\f1\'b7\tab\f0 others have the right to modify it.\par - -\pard\nowidctlpar\fi-357\li357\sb120\sa120\b 3.\tab SCOPE OF LICENSE.\b0 The software is licensed, not sold. This agreement only gives you some rights to use the software. Microsoft reserves all other rights. Unless applicable law gives you more rights despite this limitation, you may use the software only as expressly permitted in this agreement. In doing so, you must comply with any technical limitations in the software that only allow you to use it in certain ways. You may not\par - -\pard\nowidctlpar\fi-363\li720\sb120\sa120\f1\'b7\tab\f0 work around any technical limitations in the software;\par -\f1\'b7\tab\f0 reverse engineer, decompile or disassemble the software, except and only to the extent that applicable law expressly permits, despite this limitation;\par -\f1\'b7\tab\f0 make more copies of the software than specified in this agreement or allowed by applicable law, despite this limitation;\par -\f1\'b7\tab\f0 publish the software for others to copy;\par -\f1\'b7\tab\f0 rent, lease or lend the software;\par -\f1\'b7\tab\f0 transfer the software or this agreement to any third party; or\par -\f1\'b7\tab\f0 use the software for commercial software hosting services.\par - -\pard\nowidctlpar\fi-357\li357\sb120\sa120\b 4.\tab BACKUP COPY.\b0 You may make one backup copy of the software. You may use it only to reinstall the software.\par -\b 5.\tab DOCUMENTATION.\b0 Any person that has valid access to your computer or internal network may copy and use the documentation for your internal, reference purposes.\par -\b 6.\tab EXPORT RESTRICTIONS.\b0 The software is subject to United States export laws and regulations. You must comply with all domestic and international export laws and regulations that apply to the software. These laws include restrictions on destinations, end users and end use. For additional information, see {{\field{\*\fldinst{HYPERLINK www.microsoft.com/exporting }}{\fldrslt{www.microsoft.com/exporting\ul0\cf0}}}}\f0\fs20 .\par -\b 7.\tab SUPPORT SERVICES. \b0 Because this software is \ldblquote as is,\rdblquote we may not provide support services for it.\par -\b 8.\tab ENTIRE AGREEMENT.\b0 This agreement, and the terms for supplements, updates, Internet-based services and support services that you use, are the entire agreement for the software and support services.\par - -\pard\nowidctlpar\fi-360\li360\sb120\sa120\b 9.\tab APPLICABLE LAW.\par - -\pard\nowidctlpar\fi-363\li720\sb120\sa120 a.\tab United States.\b0 If you acquired the software in the United States, Washington state law governs the interpretation of this agreement and applies to claims for breach of it, regardless of conflict of laws principles. The laws of the state where you live govern all other claims, including claims under state consumer protection laws, unfair competition laws, and in tort.\par -\b b.\tab Outside the United States.\b0 If you acquired the software in any other country, the laws of that country apply.\par - -\pard\nowidctlpar\fi-357\li357\sb120\sa120\b 10.\tab LEGAL EFFECT.\b0 This agreement describes certain legal rights. You may have other rights under the laws of your country. You may also have rights with respect to the party from whom you acquired the software. This agreement does not change your rights under the laws of your country if the laws of your country do not permit it to do so.\par - -\pard\widctlpar\fi-357\li357\sb120\sa120\b 11.\tab DISCLAIMER OF WARRANTY. The software is licensed \ldblquote as-is.\rdblquote You bear the risk of using it. Microsoft gives no express warranties, guarantees or conditions. You may have additional consumer rights or statutory guarantees under your local laws which this agreement cannot change. To the extent permitted under your local laws, Microsoft excludes the implied warranties of merchantability, fitness for a particular purpose and non-infringement.\par - -\pard\nowidctlpar\li357\sb120\sa120 FOR AUSTRALIA \endash You have statutory guarantees under the Australian Consumer Law and nothing in these terms is intended to affect those rights.\par - -\pard\widctlpar\fi-357\li357\sb120\sa120 12.\tab LIMITATION ON AND EXCLUSION OF REMEDIES AND DAMAGES. You can recover from Microsoft and its suppliers only direct damages up to U.S. $5.00. You cannot recover any other damages, including consequential, lost profits, special, indirect or incidental damages.\par - -\pard\nowidctlpar\li357\sb120\sa120\b0 This limitation applies to\par - -\pard\nowidctlpar\fi-363\li720\sb120\sa120\f1\'b7\tab\f0 anything related to the software, services, content (including code) on third party Internet sites, or third party programs; and\par -\f1\'b7\tab\f0 claims for breach of contract, breach of warranty, guarantee or condition, strict liability, negligence, or other tort to the extent permitted by applicable law.\par - -\pard\nowidctlpar\li360\sb120\sa120 It also applies even if Microsoft knew or should have known about the possibility of the damages. The above limitation or exclusion may not apply to you because your country may not allow the exclusion or limitation of incidental, consequential or other damages.\par -\b Please note: As this software is distributed in Quebec, Canada, some of the clauses in this agreement are provided below in French.\b0\par -\b\lang1036 Remarque : Ce logiciel \'e9tant distribu\'e9 au Qu\'e9bec, Canada, certaines des clauses dans ce contrat sont fournies ci-dessous en fran\'e7ais.\b0\par -\b EXON\'c9RATION DE GARANTIE.\b0 Le logiciel vis\'e9 par une licence est offert \'ab tel quel \'bb. Toute utilisation de ce logiciel est \'e0 votre seule risque et p\'e9ril. Microsoft n\rquote accorde aucune autre garantie expresse. Vous pouvez b\'e9n\'e9ficier de droits additionnels en vertu du droit local sur la protection des consommateurs, que ce contrat ne peut modifier. La ou elles sont permises par le droit locale, les garanties implicites de qualit\'e9 marchande, d\rquote ad\'e9quation \'e0 un usage particulier et d\rquote absence de contrefa\'e7on sont exclues.\par -\b LIMITATION DES DOMMAGES-INT\'c9R\'caTS ET EXCLUSION DE RESPONSABILIT\'c9 POUR LES DOMMAGES.\b0 Vous pouvez obtenir de Microsoft et de ses fournisseurs une indemnisation en cas de dommages directs uniquement \'e0 hauteur de 5,00 $ US. Vous ne pouvez pr\'e9tendre \'e0 aucune indemnisation pour les autres dommages, y compris les dommages sp\'e9ciaux, indirects ou accessoires et pertes de b\'e9n\'e9fices.\par -\lang1033 Cette limitation concerne :\par - -\pard\nowidctlpar\fi-360\li720\sb120\sa120\f1\lang1036\'b7\tab\f0 tout ce qui est reli\'e9 au logiciel, aux services ou au contenu (y compris le code) figurant sur des sites Internet tiers ou dans des programmes tiers ; et\par -\f1\'b7\tab\f0 les r\'e9clamations au titre de violation de contrat ou de garantie, ou au titre de responsabilit\'e9 stricte, de n\'e9gligence ou d\rquote une autre faute dans la limite autoris\'e9e par la loi en vigueur.\par - -\pard\nowidctlpar\li360\sb120\sa120 Elle s\rquote applique \'e9galement, m\'eame si Microsoft connaissait ou devrait conna\'eetre l\rquote\'e9ventualit\'e9 d\rquote un tel dommage. Si votre pays n\rquote autorise pas l\rquote exclusion ou la limitation de responsabilit\'e9 pour les dommages indirects, accessoires ou de quelque nature que ce soit, il se peut que la limitation ou l\rquote exclusion ci-dessus ne s\rquote appliquera pas \'e0 votre \'e9gard.\par -\b EFFET JURIDIQUE. Le pr\'e9sent contrat d\'e9crit certains droits juridiques. Vous pourriez avoir d\rquote autres droits pr\'e9vus par les lois de votre pays. Le pr\'e9sent contrat ne modifie pas les droits que vous conf\'e8rent les lois de votre pays si celles-ci ne le permettent pas.\b0\par -\lang1033\par -} - \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/winusbcoinstaller2.dll b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/winusbcoinstaller2.dll deleted file mode 100644 index 30e55025..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/amd64/winusbcoinstaller2.dll and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.cat b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.cat deleted file mode 100644 index ff177070..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.cat and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.inf b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.inf deleted file mode 100644 index 2c3cb4eb..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.inf +++ /dev/null @@ -1,119 +0,0 @@ -;// SPDX -;/* -; * Module name: coral.inf; install file for the windows coral driver -; * -; * Driver for the Apex chip. -; * -; * Copyright (C) 2020 Google, Inc. -; */ - -[Version] -Signature="$WINDOWS NT$" -; -Class=coral_Device -ClassGuid={E8C7BB5B-6BD1-44C5-B852-2E5038239871} -; -Provider=%ManufacturerName% -; -CatalogFile=coral.cat -; -DriverVer = 07/09/2021,16.56.20.950 - -[DestinationDirs] -DefaultDestDir = 12 -coral_Device_CoInstaller_CopyFiles = 11 - -; ================= Class section ===================== -[ClassInstall32] -Addreg=coralClassReg - -[coralClassReg] -HKR,,,0,%coralstr% -HKR,,Icon,,-14 - -[SourceDisksNames] -1 = %DiskName%,,,"" - -[SourceDisksFiles] -coral.sys = 1,, -; - - -;***************************************** -; Install Section -;***************************************** -[Manufacturer] -%ManufacturerName%=Standard,NTamd64 - -[Standard.NTamd64] -%coral.DeviceDesc%=coral_Device, PCI\VEN_1AC1&DEV_089A&SUBSYS_089A1AC1&REV_00 - -[coral_Device.NT] -CopyFiles=Drivers_Dir - -[Drivers_Dir] -coral.sys - -;;;MSI interrupt configuration -[coral_Device.NT.HW] -AddReg = coral_Device.NT.HW.AddReg - -[coral_Device.NT.HW.AddReg] -HKR, "Interrupt Management", , 0x00000010 -HKR, "Interrupt Management\MessageSignaledInterruptProperties", , 0x00000010 -HKR, "Interrupt Management\MessageSignaledInterruptProperties", MSISupported, 0x00010001, 1 -HKR, "Interrupt Management\MessageSignaledInterruptProperties", MessageNumberLimit, 0x00010001, 0x00000004 - -;-------------- Service installation -[coral_Device.NT.Services] -AddService = coral,%SPSVCINST_ASSOCSERVICE%, coral_Service_Inst - -; -------------- coral driver install sections -; -[coral_Service_Inst] -;DisplayName = %coral.SVCDESC% -ServiceType = 1 ; SERVICE_KERNEL_DRIVER -StartType = 3 ; SERVICE_DEMAND_START -ErrorControl = 1 ; SERVICE_ERROR_NORMAL -ServiceBinary = %12%\coral.sys -AddReg = coral_Device.NT.Services.AddReg - -[coral_Device.NT.Services.AddReg] -HKR, "Parameters", "trip_point0_temp", 0x00010001, 85000 -HKR, "Parameters", "trip_point1_temp", 0x00010001, 90000 -HKR, "Parameters", "trip_point2_temp", 0x00010001, 95000 -; -HKR, "Parameters", "hw_temp_warn1_en", 0x00010001, 0 -HKR, "Parameters", "hw_temp_warn2_en", 0x00010001, 1 -HKR, "Parameters", "hw_temp_warn1", 0x00010001, 100000 -HKR, "Parameters", "hw_temp_warn2", 0x00010001, 100000 -HKR, "Parameters", "temp_poll_interval", 0x00010001, 5000 - -;--- coral_Device Coinstaller installation ------ -; -[coral_Device.NT.CoInstallers] -AddReg=coral_Device_CoInstaller_AddReg -CopyFiles=coral_Device_CoInstaller_CopyFiles - -[coral_Device_CoInstaller_AddReg] -; - - -[coral_Device_CoInstaller_CopyFiles] -; - - -[coral_Device.NT.Wdf] -KmdfService = coral, coral_wdfsect -; -[coral_wdfsect] -KmdfLibraryVersion = 1.27 - -[Strings] -SPSVCINST_ASSOCSERVICE= 0x00000002 -ManufacturerName="Google LLC" -ClassName="CoralPCIeClass" ; -DiskName = "Coral Installation Disk" -coralstr = "Coral Accelerator devices" -coral.DeviceDesc = "Coral PCIe Accelerator" -coral.SVCDESC = "Coral PCIe Accelerator Service" diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.man b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.man deleted file mode 100644 index 419d3748..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.man +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.sys b/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.sys deleted file mode 100644 index 7c71f33d..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/coral_accelerator_windows/coral.sys and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/aarch64/libedgetpu.so.1 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/aarch64/libedgetpu.so.1 deleted file mode 100644 index 90ac68cb..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/aarch64/libedgetpu.so.1 +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.so.1.0 \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/aarch64/libedgetpu.so.1.0 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/aarch64/libedgetpu.so.1.0 deleted file mode 100644 index b64dfa76..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/aarch64/libedgetpu.so.1.0 and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/armv7a/libedgetpu.so.1 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/armv7a/libedgetpu.so.1 deleted file mode 100644 index 90ac68cb..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/armv7a/libedgetpu.so.1 +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.so.1.0 \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/armv7a/libedgetpu.so.1.0 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/armv7a/libedgetpu.so.1.0 deleted file mode 100644 index 6945e207..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/armv7a/libedgetpu.so.1.0 and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_arm64/libedgetpu.1.0.dylib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_arm64/libedgetpu.1.0.dylib deleted file mode 100644 index 0af1389a..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_arm64/libedgetpu.1.0.dylib and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_arm64/libedgetpu.1.dylib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_arm64/libedgetpu.1.dylib deleted file mode 100644 index e2c75846..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_arm64/libedgetpu.1.dylib +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.1.0.dylib \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_x86_64/libedgetpu.1.0.dylib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_x86_64/libedgetpu.1.0.dylib deleted file mode 100644 index 204804de..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_x86_64/libedgetpu.1.0.dylib and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_x86_64/libedgetpu.1.dylib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_x86_64/libedgetpu.1.dylib deleted file mode 100644 index e2c75846..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/darwin_x86_64/libedgetpu.1.dylib +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.1.0.dylib \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/k8/libedgetpu.so.1 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/k8/libedgetpu.so.1 deleted file mode 100644 index 90ac68cb..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/k8/libedgetpu.so.1 +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.so.1.0 \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/k8/libedgetpu.so.1.0 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/k8/libedgetpu.so.1.0 deleted file mode 100644 index 774993bb..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/k8/libedgetpu.so.1.0 and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/x64_windows/edgetpu.dll b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/x64_windows/edgetpu.dll deleted file mode 100644 index 606b1e8e..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/x64_windows/edgetpu.dll and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/x64_windows/edgetpu.dll.if.lib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/x64_windows/edgetpu.dll.if.lib deleted file mode 100644 index 24e6a48f..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/direct/x64_windows/edgetpu.dll.if.lib and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/aarch64/libedgetpu.so.1 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/aarch64/libedgetpu.so.1 deleted file mode 100644 index 90ac68cb..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/aarch64/libedgetpu.so.1 +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.so.1.0 \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/aarch64/libedgetpu.so.1.0 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/aarch64/libedgetpu.so.1.0 deleted file mode 100644 index b3d4d21a..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/aarch64/libedgetpu.so.1.0 and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/armv7a/libedgetpu.so.1 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/armv7a/libedgetpu.so.1 deleted file mode 100644 index 90ac68cb..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/armv7a/libedgetpu.so.1 +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.so.1.0 \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/armv7a/libedgetpu.so.1.0 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/armv7a/libedgetpu.so.1.0 deleted file mode 100644 index 7a4821b3..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/armv7a/libedgetpu.so.1.0 and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_arm64/libedgetpu.1.0.dylib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_arm64/libedgetpu.1.0.dylib deleted file mode 100644 index 66b3636e..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_arm64/libedgetpu.1.0.dylib and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_arm64/libedgetpu.1.dylib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_arm64/libedgetpu.1.dylib deleted file mode 100644 index e2c75846..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_arm64/libedgetpu.1.dylib +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.1.0.dylib \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_x86_64/libedgetpu.1.0.dylib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_x86_64/libedgetpu.1.0.dylib deleted file mode 100644 index 217ab205..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_x86_64/libedgetpu.1.0.dylib and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_x86_64/libedgetpu.1.dylib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_x86_64/libedgetpu.1.dylib deleted file mode 100644 index e2c75846..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/darwin_x86_64/libedgetpu.1.dylib +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.1.0.dylib \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/k8/libedgetpu.so.1 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/k8/libedgetpu.so.1 deleted file mode 100644 index 90ac68cb..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/k8/libedgetpu.so.1 +++ /dev/null @@ -1 +0,0 @@ -libedgetpu.so.1.0 \ No newline at end of file diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/k8/libedgetpu.so.1.0 b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/k8/libedgetpu.so.1.0 deleted file mode 100644 index 19167da0..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/k8/libedgetpu.so.1.0 and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/x64_windows/edgetpu.dll b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/x64_windows/edgetpu.dll deleted file mode 100644 index ef9e415b..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/x64_windows/edgetpu.dll and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/x64_windows/edgetpu.dll.if.lib b/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/x64_windows/edgetpu.dll.if.lib deleted file mode 100644 index 738407f5..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libedgetpu/throttled/x64_windows/edgetpu.dll.if.lib and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/libusb_win/README b/src/modules/ObjectDetectionTFLite/third_party/libusb_win/README deleted file mode 100644 index d63f6133..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/libusb_win/README +++ /dev/null @@ -1,3 +0,0 @@ -This folder contains an unmodified libusb-1.0.26 for 64-bit Windows. -It is extracted from the archive available at https://github.com/libusb/libusb/releases/download/v1.0.26/libusb-1.0.26-binaries.7z -The library is licensed under LGPL 2.1. diff --git a/src/modules/ObjectDetectionTFLite/third_party/libusb_win/libusb-1.0.dll b/src/modules/ObjectDetectionTFLite/third_party/libusb_win/libusb-1.0.dll deleted file mode 100644 index 137897ca..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/libusb_win/libusb-1.0.dll and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/usbdk/Coral USB inference not working on Windows 10 (ValueError- Failed to load delegate from edgetpu.dll) - pi3g.com.url b/src/modules/ObjectDetectionTFLite/third_party/usbdk/Coral USB inference not working on Windows 10 (ValueError- Failed to load delegate from edgetpu.dll) - pi3g.com.url deleted file mode 100644 index 6fa9a8e5..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/usbdk/Coral USB inference not working on Windows 10 (ValueError- Failed to load delegate from edgetpu.dll) - pi3g.com.url +++ /dev/null @@ -1,2 +0,0 @@ -[InternetShortcut] -URL=https://pi3g.com/2022/10/19/coral-usb-inference-not-working-on-windows-10-valueerror-failed-to-load-delegate-from-edgetpu-dll/ diff --git a/src/modules/ObjectDetectionTFLite/third_party/usbdk/LICENSE b/src/modules/ObjectDetectionTFLite/third_party/usbdk/LICENSE deleted file mode 100644 index 67db8588..00000000 --- a/src/modules/ObjectDetectionTFLite/third_party/usbdk/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/src/modules/ObjectDetectionTFLite/third_party/usbdk/UsbDk_1.0.21_x64.msi b/src/modules/ObjectDetectionTFLite/third_party/usbdk/UsbDk_1.0.21_x64.msi deleted file mode 100644 index 2c616ccc..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/usbdk/UsbDk_1.0.21_x64.msi and /dev/null differ diff --git a/src/modules/ObjectDetectionTFLite/third_party/usbdk/UsbDk_1.0.22_x64.msi b/src/modules/ObjectDetectionTFLite/third_party/usbdk/UsbDk_1.0.22_x64.msi deleted file mode 100644 index b95c5b95..00000000 Binary files a/src/modules/ObjectDetectionTFLite/third_party/usbdk/UsbDk_1.0.22_x64.msi and /dev/null differ diff --git a/src/modules/ObjectDetectionYolo/ObjectDetectionYolo.pyproj b/src/modules/ObjectDetectionYolo/ObjectDetectionYolo.pyproj index ec7e05a5..82798bd5 100644 --- a/src/modules/ObjectDetectionYolo/ObjectDetectionYolo.pyproj +++ b/src/modules/ObjectDetectionYolo/ObjectDetectionYolo.pyproj @@ -46,6 +46,9 @@ modulesettings.json + + modulesettings.json + modulesettings.json diff --git a/src/modules/ObjectDetectionYolo/detect.py b/src/modules/ObjectDetectionYolo/detect.py index 5b750bcc..4d38ecda 100644 --- a/src/modules/ObjectDetectionYolo/detect.py +++ b/src/modules/ObjectDetectionYolo/detect.py @@ -2,7 +2,6 @@ from os.path import exists import sys import time -import traceback from threading import Lock import torch @@ -15,32 +14,11 @@ # Setup a global bucket of YOLO detectors. One for each model detectors = {} # We'll use this to cache the detectors based on models -models_lock = Lock() - - -def init_detect(opts: Options): - - # This method needs to be rewritten. opts should be checked and updated - # in the adapter, not here. - if opts.use_CUDA: - try: - opts.use_CUDA = torch.cuda.is_available() - except: - print("Unable to test for CUDA support: " + str(ex)) - opts.use_CUDA = False - - try: - import cpuinfo - cpu_brand = cpuinfo.get_cpu_info().get('brand_raw') - if cpu_brand and cpu_brand.startswith("Apple M"): - opts.use_MPS = hasattr(torch.backends, "mps") and torch.backends.mps.is_available() - except Exception as ex: - print("Unable to import test for Apple Silicon: " + str(ex)) - +ODYOLO_models_lock = Lock() def get_detector(module_runner, models_dir: str, model_name: str, resolution: int, use_Cuda: bool, accel_device_name: int, use_MPS: bool, - half_precision: str) -> any: + use_DirectML: bool, half_precision: str) -> any: """ We have a detector for each custom model. Lookup the detector, or if it's @@ -49,31 +27,26 @@ def get_detector(module_runner, models_dir: str, model_name: str, resolution: in detector = detectors.get(model_name, None) if detector is None: - with models_lock: + with ODYOLO_models_lock: detector = detectors.get(model_name, None) + half = False if detector is None: model_path = os.path.join(models_dir, model_name + ".pt") if use_Cuda: device_type = "cuda" - if accel_device_name: device = torch.device(accel_device_name) else: device = torch.device("cuda") - device_name = torch.cuda.get_device_name(device) print(f"GPU compute capability is {torch.cuda.get_device_capability()[0]}.{torch.cuda.get_device_capability()[1]}") # Use half-precision if possible. There's a bunch of Nvidia cards where # this won't work - if half_precision == 'disable': - half = False - else: - half = half_precision == 'force' or torch.cuda.get_device_capability()[0] >= 6 - + half = half_precision != 'disable' if half: print(f"Using half-precision for the device '{device_name}'") else: @@ -81,15 +54,20 @@ def get_detector(module_runner, models_dir: str, model_name: str, resolution: in elif use_MPS: device_type = "mps" - device = torch.device(device_type) device_name = "Apple Silicon GPU" - half = False + device = torch.device(device_type) + + elif use_DirectML: + device_type = "cpu" + device_name = "DirectML" + # Torch-DirectlML throws "Cannot set version_counter for inference tensor" + import torch_directml + device = torch_directml.device() else: device_type = "cpu" - device = torch.device(device_type) device_name = "CPU" - half = False + device = torch.device(device_type) print(f"Inference processing will occur on device '{device_name}'") @@ -133,7 +111,7 @@ def get_detector(module_runner, models_dir: str, model_name: str, resolution: in def do_detection(module_runner, models_dir: str, model_name: str, resolution: int, use_Cuda: bool, accel_device_name: int, use_MPS: bool, - half_precision: str, img: any, threshold: float): + use_DirectML: bool, half_precision: str, img: any, threshold: float): # We have a detector for each custom model. Lookup the detector, or if it's # not found, create a new one and add it to our lookup. @@ -142,10 +120,11 @@ def do_detection(module_runner, models_dir: str, model_name: str, resolution: in start_process_time = time.perf_counter() + detector = None try: detector = get_detector(module_runner, models_dir, model_name, resolution, use_Cuda, accel_device_name, use_MPS, - half_precision) + use_DirectML, half_precision) except Exception as ex: create_err_msg = f"{create_err_msg} ({str(ex)})" @@ -159,7 +138,9 @@ def do_detection(module_runner, models_dir: str, model_name: str, resolution: in # YoloV5?6 is 1280 start_inference_time = time.perf_counter() + det = detector(img, size=640) + inferenceMs = int((time.perf_counter() - start_inference_time) * 1000) outputs = [] @@ -207,5 +188,5 @@ def do_detection(module_runner, models_dir: str, model_name: str, resolution: in except Exception as ex: module_runner.report_error(ex, __file__) - return { "success": False, "error": "Error occured on the server" } + return { "success": False, "error": "Error occurred on the server" } diff --git a/src/modules/ObjectDetectionYolo/detect_adapter.py b/src/modules/ObjectDetectionYolo/detect_adapter.py index 4bb3dc3f..85b87e82 100644 --- a/src/modules/ObjectDetectionYolo/detect_adapter.py +++ b/src/modules/ObjectDetectionYolo/detect_adapter.py @@ -17,7 +17,7 @@ from PIL import Image from options import Options -from detect import init_detect, do_detection +from detect import do_detection class YOLO62_adapter(ModuleRunner): @@ -28,6 +28,15 @@ def __init__(self): self.models_last_checked = None self.model_names = [] # We'll use this to cache the available model names + # These will be adjusted based on the hardware / packages found + self.use_CUDA = self.opts.use_CUDA + self.use_MPS = self.opts.use_MPS + self.use_DirectML = self.opts.use_DirectML + + if self.use_CUDA and self.half_precision == 'enable' and not self.hasTorchHalfPrecision: + self.half_precision = 'disable' + + def initialise(self): # if the module was launched outside of the server then the queue name @@ -36,12 +45,34 @@ def initialise(self): if not self.launched_by_server: self.queue_name = "objectdetection_queue" - init_detect(self.opts) + # CUDA takes precedence + if self.use_CUDA: + self.use_CUDA = self.hasTorchCuda - if self.opts.use_CUDA: + # If no CUDA, maybe we're on an Apple Silicon Mac? + if self.use_CUDA: + self.use_MPS = False + self.use_DirectML = False + else: + self.use_MPS = self.hasTorchMPS + + # If we're not on Apple Silicon and we're not already using CUDA, and we're in WSL or + # Windows, then DirectML is a good option if allowed and available. + # if self.use_DirectML and \ + # (self.in_WSL or self.system == "Windows") and \ + # not self.use_CUDA and not self.use_MPS: + # self.use_DirectML = self.hasTorchDirectML + # else: + # self.use_DirectML = False + self.use_DirectML = False # Unfortunately we can't get PyTorch-DirectML working + + if self.use_CUDA: self.execution_provider = "CUDA" - elif self.opts.use_MPS: + elif self.use_MPS: self.execution_provider = "MPS" + elif self.use_DirectML: + self.execution_provider = "DirectML" + def process(self, data: RequestData) -> JSON: @@ -62,8 +93,9 @@ def process(self, data: RequestData) -> JSON: response = do_detection(self, self.opts.models_dir, self.opts.std_model_name, self.opts.resolution_pixels, - self.opts.use_CUDA, self.accel_device_name, - self.opts.use_MPS, self.half_precision, img, threshold) + self.use_CUDA, self.accel_device_name, + self.use_MPS, self.use_DirectML, self.half_precision, + img, threshold) elif data.command == "custom": # Perform custom object detection @@ -97,9 +129,10 @@ def process(self, data: RequestData) -> JSON: use_mX_GPU = False # self.opts.use_MPS - Custom models don't currently work with pyTorch on MPS response = do_detection(self, model_dir, model_name, - self.opts.resolution_pixels, self.opts.use_CUDA, + self.opts.resolution_pixels, self.use_CUDA, self.accel_device_name, use_mX_GPU, - self.half_precision, img, threshold) + self.use_DirectML, self.half_precision, + img, threshold) else: self.report_error(None, __file__, f"Unknown command {data.command}") @@ -107,6 +140,21 @@ def process(self, data: RequestData) -> JSON: return response + def selftest(self) -> None: + + file_name = os.path.join("test", "pexels-huseyn-kamaladdin-667838.jpg") + + request_data = RequestData() + request_data.queue = self.queue_name + request_data.command = "detect" + request_data.add_file(file_name) + request_data.add_value("confidence", 0.4) + + result = self.process(request_data) + print(f"[INFO] Self-test for {self.module_id}. Success: {result['success']}") + print(f"[INFO] Self-test output for {self.module_id}: {result}") + + def list_models(self, models_path: str): """ diff --git a/src/modules/ObjectDetectionYolo/install.bat b/src/modules/ObjectDetectionYolo/install.bat index 72a13873..ad4d8edd 100644 --- a/src/modules/ObjectDetectionYolo/install.bat +++ b/src/modules/ObjectDetectionYolo/install.bat @@ -10,6 +10,8 @@ @goto:eof ) +:: set verbosity=loud + :: Install python and the required dependencies call "%sdkScriptsPath%\utils.bat" SetupPython 3.7 "Shared" if errorlevel 1 exit /b 1 @@ -42,6 +44,7 @@ call "%sdkScriptsPath%\utils.bat" GetFromServer "custom-models-yolo5-pt.zip" "cu :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/ObjectDetectionYolo/install.sh b/src/modules/ObjectDetectionYolo/install.sh index 0033baaa..9f0b4b40 100644 --- a/src/modules/ObjectDetectionYolo/install.sh +++ b/src/modules/ObjectDetectionYolo/install.sh @@ -14,30 +14,79 @@ if [ "$1" != "install" ]; then exit 1 fi -if [ "$os" == "linux" ] && [ "$hasCUDA" == "true" ]; then - sudo apt install nvidia-cudnn +# verbosity="loud" + +location="Shared" +pythonVersion="3.8" + +pythonName="python${pythonVersion/./}" +if [ "${location}" == "Local" ]; then + virtualEnv="${modulePath}/bin/${os}/${pythonName}/venv" +else + virtualEnv="${runtimesPath}/bin/${os}/${pythonName}/venv" +fi +pythonCmd="${virtualEnv}/bin/python${pythonVersion}" +packagesPath="${virtualEnv}/lib/python${pythonVersion}/site-packages/" + +# Docker images already have the drivers installed +if [ "$inDocker" != "true" ] && [ "$os" == "linux" ]; then + + # cuDNN needed for linux + if [ "$hasCUDA" == "true" ]; then + writeLine 'Installing nvidia-cudnn...' + sudo apt install nvidia-cudnn -y >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" + fi + + # ROCm needed for linux + # if [ "$hasROCm" == "true" ]; then + # writeLine 'Installing ROCm driver scripts...' + # sudo apt-get update + # #Ubuntu v20.04 + # #wget https://repo.radeon.com/amdgpu-install/5.4.2/ubuntu/focal/amdgpu-install_5.4.50402-1_all.deb + # + # #Ubuntu v22.04 + # wget https://repo.radeon.com/amdgpu-install/5.4.2/ubuntu/jammy/amdgpu-install_5.4.50402-1_all.deb + # + # sudo apt-get install ./amdgpu-install_5.4.50402-1_all.deb + # spin $! + # writeLine "Done" "$color_success" + # + # writeLine 'Installing ROCm drivers...' + # sudo amdgpu-install --usecase=dkms,graphics,multimedia,opencl,hip,hiplibsdk,rocm + # spin $! + # writeLine "Done" "$color_success" + #fi + fi # Install python and the required dependencies. If we find torch then asssume it's all there -setupPython 3.8 +setupPython "${pythonVersion}" "${location}" if [ $? -ne 0 ]; then quit 1; fi -installPythonPackages 3.8 "${modulePath}" "Shared" + +# PyTorch-DirectML not working for this module +# if [ "$hasCUDA" != "true" ] && [ "$os" == "linux" ]; then +# writeLine 'Installing PyTorch-DirectML...' +# "${pythonCmd}" -m pip install torch-directml --target "${packagesPath}" +# fi + +installPythonPackages "${pythonVersion}" "${modulePath}" "${location}" if [ $? -ne 0 ]; then quit 1; fi -installPythonPackages 3.8 "${absoluteAppRootDir}/SDK/Python" "Shared" +installPythonPackages "${pythonVersion}" "${absoluteAppRootDir}/SDK/Python" "${location}" if [ $? -ne 0 ]; then quit 1; fi -# Download the models and store in /assets and /custom-models +# Download the models and store in /assets and /custom-models (already in place in docker) getFromServer "models-yolo5-pt.zip" "assets" "Downloading Standard YOLO models..." if [ $? -ne 0 ]; then quit 1; fi getFromServer "custom-models-yolo5-pt.zip" "custom-models" "Downloading Custom YOLO models..." - # -- Install script cheatsheet -- # # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -46,6 +95,8 @@ getFromServer "custom-models-yolo5-pt.zip" "custom-models" "Downloading Custom Y # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/ObjectDetectionYolo/modulesettings.docker.build.arm64.json b/src/modules/ObjectDetectionYolo/modulesettings.docker.build.arm64.json index 57dce377..2b563970 100644 --- a/src/modules/ObjectDetectionYolo/modulesettings.docker.build.arm64.json +++ b/src/modules/ObjectDetectionYolo/modulesettings.docker.build.arm64.json @@ -14,7 +14,7 @@ */ // This NEEDS to be 'shared' for docker pre-installed - "RuntimeLocation": "Shared", // Can be Local or Shareds + "RuntimeLocation": "Shared", // Can be Local or Shared // ObjectDetectNet is faster, but set this up just in case "AutoStart": false, diff --git a/src/modules/ObjectDetectionYolo/modulesettings.docker.build.gpu.json b/src/modules/ObjectDetectionYolo/modulesettings.docker.build.gpu.json new file mode 100644 index 00000000..4269850f --- /dev/null +++ b/src/modules/ObjectDetectionYolo/modulesettings.docker.build.gpu.json @@ -0,0 +1,23 @@ +{ + "Modules": { + "ObjectDetectionYolo": { + /* In Docker, when building the image, we copy over the code and config files, which includes + this special modulesettings.docker.build.json file that enables us to point the Python + interpreter to the shared, pre-installed python venv in the Docker image. This file will be + renamed to modulesettings.docker.json during the Docker image build process. + + If this module were downloaded and installed during runtime, then the usual + modulesettings.linux.json would be loaded, followed by the modulesettings.docker.json file. + The modulesettings.docker.build.json file would be ignored. Downloaded modules would have + their Python intepreter point to a Local install of Python, not the shared, so that Python + packages can be installed and persisted. + */ + + // This NEEDS to be 'shared' for docker pre-installed, but due to PIP issues on GPU docker we + // added this file so we could experiment with local installed packages + "RuntimeLocation": "Shared", // Can be Local or Shared + + "PreInstalled": "true" + } + } +} \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/modulesettings.docker.build.json b/src/modules/ObjectDetectionYolo/modulesettings.docker.build.json index 5a470302..a80d1de0 100644 --- a/src/modules/ObjectDetectionYolo/modulesettings.docker.build.json +++ b/src/modules/ObjectDetectionYolo/modulesettings.docker.build.json @@ -14,7 +14,7 @@ */ // This NEEDS to be 'shared' for docker pre-installed - "RuntimeLocation": "Shared", // Can be Local or Shareds + "RuntimeLocation": "Shared", // Can be Local or Shared "AutoStart": true, "PreInstalled": "true" diff --git a/src/modules/ObjectDetectionYolo/modulesettings.docker.build.rpi64.json b/src/modules/ObjectDetectionYolo/modulesettings.docker.build.rpi64.json index 59ae0f9d..c464fde4 100644 --- a/src/modules/ObjectDetectionYolo/modulesettings.docker.build.rpi64.json +++ b/src/modules/ObjectDetectionYolo/modulesettings.docker.build.rpi64.json @@ -14,9 +14,9 @@ */ // This NEEDS to be 'shared' for docker pre-installed - "RuntimeLocation": "Shared", // Can be Local or Shareds + "RuntimeLocation": "Shared", // Can be Local or Shared - // YOLO too slow for Raspberry Pi, but set it up just in case + // YOLO too slow for Raspberry Pi / Orange Pi, but set it up just in case "AutoStart": false, "PreInstalled": "true", diff --git a/src/modules/ObjectDetectionYolo/modulesettings.json b/src/modules/ObjectDetectionYolo/modulesettings.json index d128fe6d..601335b9 100644 --- a/src/modules/ObjectDetectionYolo/modulesettings.json +++ b/src/modules/ObjectDetectionYolo/modulesettings.json @@ -3,19 +3,21 @@ "ObjectDetectionYolo": { "Name": "Object Detection (YOLOv5 6.2)", - "Version": "1.2", + "Version": "1.4", // Publishing info - "Description": "Provides Object Detection using YOLOv5 v6.2 library with support for CPUs and CUDA enabled GPUs.", + "Description": "Provides Object Detection using YOLOv5 6.2 targeting CUDA 11.7/Torch 1.13 for newer GPUs.", "Platforms": [ "all" ], "License": "GPL-3.0", "LicenseUrl": "https://opensource.org/licenses/GPL-3.0", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-03-01" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } - ], + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-03-01" }, + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1.0", "2.1.6" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1.7", "" ], "ReleaseDate": "2023-04-29", "ReleaseNotes": "Updated module settings", "Importance": "Minor" }, + { "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-08-12", "ReleaseNotes": "PyTorch version downgrade" } + ], // Launch instructions "AutoStart": true, diff --git a/src/modules/ObjectDetectionYolo/options.py b/src/modules/ObjectDetectionYolo/options.py index 480ba5b8..fa7c7436 100644 --- a/src/modules/ObjectDetectionYolo/options.py +++ b/src/modules/ObjectDetectionYolo/options.py @@ -36,7 +36,8 @@ def __init__(self): self.model_size = ModuleOptions.getEnvVariable("MODEL_SIZE", "Medium") # small, medium, large //, nano, x-large self.use_CUDA = ModuleOptions.getEnvVariable("USE_CUDA", "True") # True / False - self.use_MPS = False # Default is False, but we'll enable if possible + self.use_MPS = ModuleOptions.support_GPU # only if available... + self.use_DirectML = ModuleOptions.support_GPU # only if available... # Normalise input self.model_size = self.model_size.lower() @@ -54,6 +55,6 @@ def __init__(self): # dump the important variables if self._show_env_variables: - print(f"APPDIR: {self.app_dir}") - print(f"MODEL_SIZE: {self.model_size}") - print(f"MODELS_DIR: {self.models_dir}") + print(f"Debug: APPDIR: {self.app_dir}") + print(f"Debug: MODEL_SIZE: {self.model_size}") + print(f"Debug: MODELS_DIR: {self.models_dir}") diff --git a/src/modules/ObjectDetectionYolo/requirements.linux.arm64.txt b/src/modules/ObjectDetectionYolo/requirements.linux.arm64.txt index 0d310fb8..e111d1e7 100644 --- a/src/modules/ObjectDetectionYolo/requirements.linux.arm64.txt +++ b/src/modules/ObjectDetectionYolo/requirements.linux.arm64.txt @@ -1,26 +1,26 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files # Specific versions that match the models we're using. This requires <= Python 3.9. Any # version higher can use Python 3.10 -# Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks -# TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI +# Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks +# TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI -# This is annoying. +# Annoyingly, we need to drop down a version. # https://discuss.pytorch.org/t/failed-to-load-image-python-extension-could-not-find-module/140278/15 -torch==1.9.0 -torchvision==0.10.0 +torch==1.9.0 # Installing Torch, for Tensor computation and Deep neural networks +torchvision==0.10.0 # Installing TorchVision, for Computer Vision based AI # the Ultralytics Yolov5 package -yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images # We need this, but we don't need this. -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line empty. \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/requirements.linux.cuda.txt b/src/modules/ObjectDetectionYolo/requirements.linux.cuda.txt index 22f87485..9b1b278f 100644 --- a/src/modules/ObjectDetectionYolo/requirements.linux.cuda.txt +++ b/src/modules/ObjectDetectionYolo/requirements.linux.cuda.txt @@ -1,21 +1,21 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -# For CUDA: (Using v1.10 since 1.11 has an issue with UpSample Module Layer) +## For CUDA 11.7 (NOT torch 2.0+) --find-links https://download.pytorch.org/whl/torch_stable.html -torch==1.10.2+cu113 # Installing PyTorch, an open source machine learning framework +torch==1.13.0+cu117 # Installing PyTorch, an open source machine learning framework --find-links https://download.pytorch.org/whl/torch_stable.html -torchvision==0.11.3+cu113 # Installing TorchVision, for working with computer vision models +torchvision==0.14.0+cu117 # Installing TorchVision, for working with computer vision models -yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images # We need this, but we don't need this. -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line empty.. \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/requirements.linux.rocm.txt b/src/modules/ObjectDetectionYolo/requirements.linux.rocm.txt new file mode 100644 index 00000000..495296f8 --- /dev/null +++ b/src/modules/ObjectDetectionYolo/requirements.linux.rocm.txt @@ -0,0 +1,20 @@ +#! Python3.7 + +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files + +--index-url https://download.pytorch.org/whl/rocm5.4.2 +torch # Installing PyTorch, an open source machine learning framework +--index-url https://download.pytorch.org/whl/rocm5.4.2 +torchvision # Installing TorchVision, for working with computer vision models + +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images + +# We need this, but we don't need this. +Seaborn # Installing Seaborn, a data visualization library based on matplotlib + +# last line empty.. \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/requirements.linux.txt b/src/modules/ObjectDetectionYolo/requirements.linux.txt index a0eed566..8fcaea67 100644 --- a/src/modules/ObjectDetectionYolo/requirements.linux.txt +++ b/src/modules/ObjectDetectionYolo/requirements.linux.txt @@ -1,22 +1,31 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -# Specific versions that match the models we're using. +# PyTorch-DirectML not working for this module +# torch-directml # Installing the PyTorch DirectML plugin + +# Specific versions that match the models we're using. Size is ~830Mb +# ** Don't do this if we're installing Torch-DirectML ** --extra-index-url https://download.pytorch.org/whl/cpu -Torch==1.10.2+cpu # Installing Torch, for Tensor computation and Deep neural networks +Torch==1.10.2+cpu # Installing Torch, for Tensor computation and Deep neural networks --extra-index-url https://download.pytorch.org/whl/cpu -TorchVision==0.11.3+cpu # Installing TorchVision, for Computer Vision based AI +TorchVision==0.11.3+cpu # Installing TorchVision, for Computer Vision based AI -# We need this, but we don't need this. -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +# CPU specific Torch for Linux. This is Torch 2.0, though, which seems to be...troublesome. +# --index-url https://download.pytorch.org/whl/cpu +# Torch # Installing Torch, for Tensor computation and Deep neural networks +# --index-url https://download.pytorch.org/whl/cpu +# TorchVision # Installing TorchVision, for Computer Vision based AI -# the Ultralytics Yolov5 package -yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images + +# We need this, but we don't need this. +Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line empty. \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/requirements.macos.arm64.txt b/src/modules/ObjectDetectionYolo/requirements.macos.arm64.txt index 83747cd2..b35a5d75 100644 --- a/src/modules/ObjectDetectionYolo/requirements.macos.arm64.txt +++ b/src/modules/ObjectDetectionYolo/requirements.macos.arm64.txt @@ -2,25 +2,19 @@ # Looking for more info on M1 chips? https://developer.apple.com/forums/thread/695963 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -## Bleeding edge versions of torch for Apple Silicon. -#--pre -#--extra-index-url https://download.pytorch.org/whl/nightly/cpu -torch # Installing PyTorch, for Tensor computation and Deep neural networks -#--pre -#s -torchvision # Installing TorchVision, for Computer Vision based AI +torch # Installing PyTorch, for Tensor computation and Deep neural networks +torchvision # Installing TorchVision, for Computer Vision based AI -# the Ultralytics Yolov5 package -yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images # We need this, but we don't need this. -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line empty. \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/requirements.macos.txt b/src/modules/ObjectDetectionYolo/requirements.macos.txt index bcbd70ed..e7beea3a 100644 --- a/src/modules/ObjectDetectionYolo/requirements.macos.txt +++ b/src/modules/ObjectDetectionYolo/requirements.macos.txt @@ -1,20 +1,21 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -# Specific versions that match the models we're using. -Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks -TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI +# Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks +# TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI -# the Ultralytics Yolov5 package -yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +Torch # Installing Torch, for Tensor computation and Deep neural networks +TorchVision # Installing TorchVision, for Computer Vision based AI + +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images # We need this, but we don't need this. -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line empty. \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/requirements.txt b/src/modules/ObjectDetectionYolo/requirements.txt index f7bb17f5..06efa6dd 100644 --- a/src/modules/ObjectDetectionYolo/requirements.txt +++ b/src/modules/ObjectDetectionYolo/requirements.txt @@ -1,20 +1,200 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +urllib3<2.0 # Installing urllib3, the HTTP client for Python ---find-links https://download.pytorch.org/whl/torch_stable.html -torch==1.10.1+cpu # Installing PyTorch, for Tensor computation and Deep neural networks ---find-links https://download.pytorch.org/whl/torch_stable.html -torchvision==0.11.2+cpu # Installing TorchVision, for Computer Vision based AI +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +# PyTorch-DirectML not working for this module +# torch-directml # Installing the PyTorch DirectML plugin + +# CPU specific Torch 1.13.0. Size ~830Mb +--extra-index-url https://download.pytorch.org/whl/cpu +torch==1.13.0+cpu # Installing PyTorch, for Tensor computation and Deep neural networks +--extra-index-url https://download.pytorch.org/whl/cpu +torchvision==0.14.0+cpu # Installing TorchVision, for Computer Vision based AI + +# as per https://pytorch.org/get-started/locally/, this should install CPU versions +# (currently 2.0.1 on python 3.8+, 1.13.1 on Python 3.7). ~1.3GB for CPU, ~3.9GB for GPU +# torch # Installing PyTorch, for Tensor computation and Deep neural networks +# torchvision # Installing TorchVision, for Computer Vision based AI + +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images # We need this, but we don't need this. -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +Seaborn # Installing Seaborn, a data visualization library based on matplotlib + +# last line empty. + +# In case you were morbidly curious: -# last line empty. \ No newline at end of file +# yolov5==6.2.3 +# - boto3 [required: >=1.19.1, installed: 1.26.132] +# - botocore [required: >=1.29.132,<1.30.0, installed: 1.29.132] +# - jmespath [required: >=0.7.1,<2.0.0, installed: 1.0.1] +# - python-dateutil [required: >=2.1,<3.0.0, installed: 2.8.2] +# - six [required: >=1.5, installed: 1.16.0] +# - urllib3 [required: >=1.25.4,<1.27, installed: 1.26.15] +# - jmespath [required: >=0.7.1,<2.0.0, installed: 1.0.1] +# - s3transfer [required: >=0.6.0,<0.7.0, installed: 0.6.1] +# - botocore [required: >=1.12.36,<2.0a.0, installed: 1.29.132] +# - jmespath [required: >=0.7.1,<2.0.0, installed: 1.0.1] +# - python-dateutil [required: >=2.1,<3.0.0, installed: 2.8.2] +# - six [required: >=1.5, installed: 1.16.0] +# - urllib3 [required: >=1.25.4,<1.27, installed: 1.26.15] +# - fire [required: Any, installed: 0.5.0] +# - six [required: Any, installed: 1.16.0] +# - termcolor [required: Any, installed: 2.3.0] +# - ipython [required: Any, installed: 7.34.0] +# - backcall [required: Any, installed: 0.2.0] +# - colorama [required: Any, installed: 0.4.6] +# - decorator [required: Any, installed: 5.1.1] +# - jedi [required: >=0.16, installed: 0.18.2] +# - parso [required: >=0.8.0,<0.9.0, installed: 0.8.3] +# - matplotlib-inline [required: Any, installed: 0.1.6] +# - traitlets [required: Any, installed: 5.9.0] +# - pickleshare [required: Any, installed: 0.7.5] +# - prompt-toolkit [required: >=2.0.0,<3.1.0,!=3.0.1,!=3.0.0, installed: 3.0.38] +# - wcwidth [required: Any, installed: 0.2.6] +# - pygments [required: Any, installed: 2.15.1] +# - setuptools [required: >=18.5, installed: 47.1.0] +# - traitlets [required: >=4.2, installed: 5.9.0] +# - matplotlib [required: >=3.2.2, installed: 3.5.3] +# - cycler [required: >=0.10, installed: 0.11.0] +# - fonttools [required: >=4.22.0, installed: 4.38.0] +# - kiwisolver [required: >=1.0.1, installed: 1.4.4] +# - typing-extensions [required: Any, installed: 4.5.0] +# - numpy [required: >=1.17, installed: 1.21.6] +# - packaging [required: >=20.0, installed: 23.1] +# - pillow [required: >=6.2.0, installed: 9.5.0] +# - pyparsing [required: >=2.2.1, installed: 3.0.9] +# - python-dateutil [required: >=2.7, installed: 2.8.2] +# - six [required: >=1.5, installed: 1.16.0] +# - numpy [required: >=1.18.5, installed: 1.21.6] +# - opencv-python [required: >=4.1.1, installed: 4.7.0.72] +# - numpy [required: >=1.17.0, installed: 1.21.6] +# - pandas [required: >=1.1.4, installed: 1.3.5] +# - numpy [required: >=1.17.3, installed: 1.21.6] +# - python-dateutil [required: >=2.7.3, installed: 2.8.2] +# - six [required: >=1.5, installed: 1.16.0] +# - pytz [required: >=2017.3, installed: 2023.3] +# - Pillow [required: >=7.1.2, installed: 9.5.0] +# - psutil [required: Any, installed: 5.9.5] +# - PyYAML [required: >=5.3.1, installed: 6.0] +# - requests [required: >=2.23.0, installed: 2.30.0] +# - certifi [required: >=2017.4.17, installed: 2023.5.7] +# - charset-normalizer [required: >=2,<4, installed: 2.1.1] +# - idna [required: >=2.5,<4, installed: 3.4] +# - urllib3 [required: >=1.21.1,<3, installed: 1.26.15] +# - sahi [required: >=0.10.5, installed: 0.11.13] +# - click [required: ==8.0.4, installed: 8.0.4] +# - colorama [required: Any, installed: 0.4.6] +# - importlib-metadata [required: Any, installed: 6.6.0] +# - typing-extensions [required: >=3.6.4, installed: 4.5.0] +# - zipp [required: >=0.5, installed: 3.15.0] +# - fire [required: Any, installed: 0.5.0] +# - six [required: Any, installed: 1.16.0] +# - termcolor [required: Any, installed: 2.3.0] +# - opencv-python [required: >=4.2.0.32, installed: 4.7.0.72] +# - numpy [required: >=1.17.0, installed: 1.21.6] +# - pillow [required: >=8.2.0, installed: 9.5.0] +# - pybboxes [required: ==0.1.6, installed: 0.1.6] +# - numpy [required: Any, installed: 1.21.6] +# - pyyaml [required: Any, installed: 6.0] +# - requests [required: Any, installed: 2.30.0] +# - certifi [required: >=2017.4.17, installed: 2023.5.7] +# - charset-normalizer [required: >=2,<4, installed: 2.1.1] +# - idna [required: >=2.5,<4, installed: 3.4] +# - urllib3 [required: >=1.21.1,<3, installed: 1.26.15] +# - shapely [required: >=1.8.0, installed: 2.0.1] +# - numpy [required: >=1.14, installed: 1.21.6] +# - terminaltables [required: Any, installed: 3.1.10] +# - tqdm [required: >=4.48.2, installed: 4.65.0] +# - colorama [required: Any, installed: 0.4.6] +# - scipy [required: >=1.4.1, installed: 1.7.3] +# - numpy [required: >=1.16.5,<1.23.0, installed: 1.21.6] +# - seaborn [required: >=0.11.0, installed: 0.12.2] +# - matplotlib [required: >=3.1,!=3.6.1, installed: 3.5.3] +# - cycler [required: >=0.10, installed: 0.11.0] +# - fonttools [required: >=4.22.0, installed: 4.38.0] +# - kiwisolver [required: >=1.0.1, installed: 1.4.4] +# - typing-extensions [required: Any, installed: 4.5.0] +# - numpy [required: >=1.17, installed: 1.21.6] +# - packaging [required: >=20.0, installed: 23.1] +# - pillow [required: >=6.2.0, installed: 9.5.0] +# - pyparsing [required: >=2.2.1, installed: 3.0.9] +# - python-dateutil [required: >=2.7, installed: 2.8.2] +# - six [required: >=1.5, installed: 1.16.0] +# - numpy [required: >=1.17,!=1.24.0, installed: 1.21.6] +# - pandas [required: >=0.25, installed: 1.3.5] +# - numpy [required: >=1.17.3, installed: 1.21.6] +# - python-dateutil [required: >=2.7.3, installed: 2.8.2] +# - six [required: >=1.5, installed: 1.16.0] +# - pytz [required: >=2017.3, installed: 2023.3] +# - typing-extensions [required: Any, installed: 4.5.0] +# - tensorboard [required: >=2.4.1, installed: 2.11.2] +# - absl-py [required: >=0.4, installed: 1.4.0] +# - google-auth [required: >=1.6.3,<3, installed: 2.18.0] +# - cachetools [required: >=2.0.0,<6.0, installed: 5.3.0] +# - pyasn1-modules [required: >=0.2.1, installed: 0.3.0] +# - pyasn1 [required: >=0.4.6,<0.6.0, installed: 0.5.0] +# - rsa [required: >=3.1.4,<5, installed: 4.9] +# - pyasn1 [required: >=0.1.3, installed: 0.5.0] +# - six [required: >=1.9.0, installed: 1.16.0] +# - urllib3 [required: <2.0, installed: 1.26.15] +# - google-auth-oauthlib [required: >=0.4.1,<0.5, installed: 0.4.6] +# - google-auth [required: >=1.0.0, installed: 2.18.0] +# - cachetools [required: >=2.0.0,<6.0, installed: 5.3.0] +# - pyasn1-modules [required: >=0.2.1, installed: 0.3.0] +# - pyasn1 [required: >=0.4.6,<0.6.0, installed: 0.5.0] +# - rsa [required: >=3.1.4,<5, installed: 4.9] +# - pyasn1 [required: >=0.1.3, installed: 0.5.0] +# - six [required: >=1.9.0, installed: 1.16.0] +# - urllib3 [required: <2.0, installed: 1.26.15] +# - requests-oauthlib [required: >=0.7.0, installed: 1.3.1] +# - oauthlib [required: >=3.0.0, installed: 3.2.2] +# - requests [required: >=2.0.0, installed: 2.30.0] +# - certifi [required: >=2017.4.17, installed: 2023.5.7] +# - charset-normalizer [required: >=2,<4, installed: 2.1.1] +# - idna [required: >=2.5,<4, installed: 3.4] +# - urllib3 [required: >=1.21.1,<3, installed: 1.26.15] +# - grpcio [required: >=1.24.3, installed: 1.54.0] +# - markdown [required: >=2.6.8, installed: 3.4.3] +# - importlib-metadata [required: >=4.4, installed: 6.6.0] +# - typing-extensions [required: >=3.6.4, installed: 4.5.0] +# - zipp [required: >=0.5, installed: 3.15.0] +# - numpy [required: >=1.12.0, installed: 1.21.6] +# - protobuf [required: >=3.9.2,<4, installed: 3.20.3] +# - requests [required: >=2.21.0,<3, installed: 2.30.0] +# - certifi [required: >=2017.4.17, installed: 2023.5.7] +# - charset-normalizer [required: >=2,<4, installed: 2.1.1] +# - idna [required: >=2.5,<4, installed: 3.4] +# - urllib3 [required: >=1.21.1,<3, installed: 1.26.15] +# - setuptools [required: >=41.0.0, installed: 47.1.0] +# - tensorboard-data-server [required: >=0.6.0,<0.7.0, installed: 0.6.1] +# - tensorboard-plugin-wit [required: >=1.6.0, installed: 1.8.1] +# - werkzeug [required: >=1.0.1, installed: 2.2.3] +# - MarkupSafe [required: >=2.1.1, installed: 2.1.2] +# - wheel [required: >=0.26, installed: 0.40.0] +# - thop [required: >=0.1.1, installed: 0.1.1.post2209072238] +# - torch [required: Any, installed: 1.13.1] +# - typing-extensions [required: Any, installed: 4.5.0] +# - torch [required: >=1.7.0, installed: 1.13.1] +# - typing-extensions [required: Any, installed: 4.5.0] +# - torchvision [required: >=0.8.1, installed: 0.14.1] +# - numpy [required: Any, installed: 1.21.6] +# - pillow [required: >=5.3.0,!=8.3.*, installed: 9.5.0] +# - requests [required: Any, installed: 2.30.0] +# - certifi [required: >=2017.4.17, installed: 2023.5.7] +# - charset-normalizer [required: >=2,<4, installed: 2.1.1] +# - idna [required: >=2.5,<4, installed: 3.4] +# - urllib3 [required: >=1.21.1,<3, installed: 1.26.15] +# - torch [required: ==1.13.1, installed: 1.13.1] +# - typing-extensions [required: Any, installed: 4.5.0] +# - typing-extensions [required: Any, installed: 4.5.0] +# - tqdm [required: >=4.64.0, installed: 4.65.0] +# - colorama [required: Any, installed: 0.4.6] \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/requirements.windows.cuda.txt b/src/modules/ObjectDetectionYolo/requirements.windows.cuda.txt index e5ba2f9d..b567b5d1 100644 --- a/src/modules/ObjectDetectionYolo/requirements.windows.cuda.txt +++ b/src/modules/ObjectDetectionYolo/requirements.windows.cuda.txt @@ -1,21 +1,28 @@ #! Python3.7 -Pandas # Installing Pandas, a data analysis / data manipulation tool -CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models -OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library -SciPy # Installing SciPy, a library for mathematics, science, and engineering -PyYAML # Installing PyYAML, a library for reading configuration files +Pandas # Installing Pandas, a data analysis / data manipulation tool +CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models +OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +SciPy # Installing SciPy, a library for mathematics, science, and engineering +PyYAML # Installing PyYAML, a library for reading configuration files -# For CUDA: (Using v1.10 since 1.11 has an issue with UpSample Module Layer) +# https://pytorch.org/get-started/locally/ says to do this, but if you do, (a) Pandas used to get +# upset, and (b) this installs Torch 2.0 or 1.3 depending on the python version, and 2.0 has issues +# --index-url https://download.pytorch.org/whl/cu117 +# torch # Installing PyTorch, an open source machine learning framework +# --index-url https://download.pytorch.org/whl/cu117 +# torchvision # Installing TorchVision, for working with computer vision models + +## For CUDA 11.7 --find-links https://download.pytorch.org/whl/torch_stable.html -torch==1.10.2+cu113 # Installing PyTorch, an open source machine learning framework +torch==1.13.0+cu117 # Installing PyTorch, an open source machine learning framework --find-links https://download.pytorch.org/whl/torch_stable.html -torchvision==0.11.3+cu113 # Installing TorchVision, for working with computer vision models +torchvision==0.14.0+cu117 # Installing TorchVision, for working with computer vision models -yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images # We need this, but we don't need this. -Seaborn # Installing Seaborn, a data visualization library based on matplotlib +Seaborn # Installing Seaborn, a data visualization library based on matplotlib # last line empty. \ No newline at end of file diff --git a/src/modules/ObjectDetectionYolo/test/pexels-huseyn-kamaladdin-667838.jpg b/src/modules/ObjectDetectionYolo/test/pexels-huseyn-kamaladdin-667838.jpg new file mode 100644 index 00000000..a314a8f4 Binary files /dev/null and b/src/modules/ObjectDetectionYolo/test/pexels-huseyn-kamaladdin-667838.jpg differ diff --git a/src/modules/ObjectDetectionYoloRKNN/install.sh b/src/modules/ObjectDetectionYoloRKNN/install.sh new file mode 100644 index 00000000..59c16dd5 --- /dev/null +++ b/src/modules/ObjectDetectionYoloRKNN/install.sh @@ -0,0 +1,102 @@ +# Development mode setup script :::::::::::::::::::::::::::::::::::::::::::::: +# +# ObjectDetection (Fast Deploy RKNN) +# +# This script is called from the ObjectDetectionFastDeployRKNN directory using: +# +# bash ../../setup.sh +# +# The setup.sh script will find this install.sh file and execute it. + +if [ "$1" != "install" ]; then + read -t 3 -p "This script is only called from: bash ../../setup.sh" + echo + exit 1 +fi + +# FastDeploy requires a version of GCLIB higher than what's in the RPi Ubuntu OS. +# We need to work around this - except this lib is tied hard to the OS. Most likely +# solution is we use the standard FastDeploy libs for non-RockNPU hardware + +# systemName wasn't getting the correct name in a docker environment. This fixes it. +modelInfo=$(tr -d '\0' /venv. +# install-location - [optional] "Local" or "Shared" (see above) +# +# installPythonPackages Version requirements-file-directory +# Version - version number, as per SetupPython +# requirements-file-directory - directory containing the requirements.txt file +# install-location - [optional] "Local" (installed in the module's local venv) or +# "Shared" (installed in the shared $runtimesPath/bin venv folder) \ No newline at end of file diff --git a/src/modules/ObjectDetectionYoloRKNN/modulesettings.json b/src/modules/ObjectDetectionYoloRKNN/modulesettings.json new file mode 100644 index 00000000..d65dbb7e --- /dev/null +++ b/src/modules/ObjectDetectionYoloRKNN/modulesettings.json @@ -0,0 +1,167 @@ +{ + "Modules": { + "ObjectDetectionYoloRKNN": { + "Name": "Object Detection (YOLOv5 RKNN)", + "Version": "1.1", + + // Publishing info + "Description": "Provides Object Detection using YOLOv5 RKNN models. This module only works with Rockchip RK3588/RK3588S NPUs like the Orange Pi 5/5B/5 Plus", + "Platforms": [ "linux-arm64" ], // OrangePi, specifically + "License": "Apache-2.0", + "LicenseUrl": "https://opensource.org/licenses/Apache-2.0", + + // Which server version is compatible with each version of this module. + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": ["2.1", ""], "ReleaseDate": "2023-08-06" }, + { "ModuleVersion": "1.1", "ServerVersionRange": ["2.1", ""], "ReleaseDate": "2023-08-06", "ReleaseNotes": "Corrected installer in docker environment" } + ], + + // Launch instructions + "AutoStart": true, + "FilePath": "objectdetection_fd_rknn_adapter.py", + "Runtime": "python39", + "RuntimeLocation": "Local", // Can be Local or Shared + + // These are all optional. Defaults are usually fine + "SupportGPU": true, + "AcceleratorDeviceName": null, // = default + "Parallelism": 1, // 0 = Default (number of CPUs - 1) + "HalfPrecision": "enable", // "Force", "Enable", "Disable": whether to force on, allow, or disable half-precision ops + "PostStartPauseSecs": 1, // 1 if using GPU, 0 for CPU + + // Deliberately not using the default queue: We make all Object detectors use the same queue. + "Queue": "objectdetection_queue", + + "EnvironmentVariables": { + "MODELS_DIR": "%CURRENT_MODULE_PATH%/assets", + "CUSTOM_MODELS_DIR": "%CURRENT_MODULE_PATH%/custom-models", + "MODEL_SIZE": "Small" + }, + + "RouteMaps": [ + { + "Name": "Object Detector", + "Path": "vision/detection", + "Method": "POST", + "Command": "detect", + "Description": "Detects multiple objects in an image.", + "Inputs": [ + { + "Name": "image", + "Type": "File", + "Description": "The HTTP file object (image) to be analyzed." + }, + { + "Name": "min_confidence", + "Type": "Float", + "Description": "The minimum confidence level for an object will be detected. In the range 0.0 to 1.0. Default 0.3.", + "DefaultValue": 0.30, + "MinValue": 0.0, + "MaxValue": 1.0 + } + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + }, + { + "Name": "predictions", + "Type": "Object", + "Description": "An array of objects with the x_max, x_min, max, y_min, label and confidence." + }, + { + "Name": "inferenceMs", + "Type": "Integer", + "Description": "The time (ms) to perform the AI inference." + }, + { + "Name": "processMs", + "Type": "Integer", + "Description": "The time (ms) to process the image (includes inference and image manipulation operations)." + }, + { + "Name": "analysisRoundTripMs", + "Type": "Integer", + "Description": "The time (ms) for the round trip to the analysis module and back." + } + ] + }, + + { + "Name": "Custom Object Detector RKNN", + "Path": "vision/custom", + "Method": "POST", + "Command": "custom", + "Description": "Detects objects based on YOLO PyTorch models. Models are stored as .rknn files in the /ObjectDetectionYoloRKNN/custom-models directory, and to make a call to a specific model use /vision/custom/model-name, where 'model-name' is the name of the model's .rknn file", + "Inputs": [ + { + "Name": "image", + "Type": "File", + "Description": "The HTTP file object (image) to be analyzed." + }, + { + "Name": "min_confidence", + "Type": "Float", + "Description": "The minimum confidence level for an object will be detected. In the range 0.0 to 1.0. Default 0.4." + } + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + }, + { + "Name": "predictions", + "Type": "Object[]", + "Description": "An array of objects with the x_max, x_min, max, y_min, label and confidence." + }, + { + "Name": "inferenceMs", + "Type": "Integer", + "Description": "The time (ms) to perform the AI inference." + }, + { + "Name": "processMs", + "Type": "Integer", + "Description": "The time (ms) to process the image (includes inference and image manipulation operations)." + }, + { + "Name": "analysisRoundTripMs", + "Type": "Integer", + "Description": "The time (ms) for the round trip to the analysis module and back." + } + ] + }, + + { + "Name": "Object Detector List Custom Models (RKNN)", + "Path": "vision/custom/list", + "Method": "POST", + "Command": "list-custom", + "Description": "Returns a list of models available.", + "Inputs": [], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + }, + { + "Name": "models", + "Type": "String", + "Description": "An array of strings containing the names of the models installed." + } + ] + } + ] + } + } +} + + + + + diff --git a/src/modules/ObjectDetectionYoloRKNN/objectdetection_fd_rknn.py b/src/modules/ObjectDetectionYoloRKNN/objectdetection_fd_rknn.py new file mode 100644 index 00000000..4f81bde0 --- /dev/null +++ b/src/modules/ObjectDetectionYoloRKNN/objectdetection_fd_rknn.py @@ -0,0 +1,181 @@ +""" + Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import os +from os.path import exists +import sys +import time +from threading import Lock + +from numpy import array +from PIL import UnidentifiedImageError + +from module_logging import LogMethod +from options import Options + +# import fastdeploy as fd # rknn +from fastdeploy import RuntimeOption, vision, ModelFormat +from utils.tools import resize_image, convert_bounding_boxes, count_labels, extract_label_from_file + +# Setup a global bucket of YOLO detectors. One for each model +detectors = {} # We'll use this to cache the detectors based on models +models_lock = Lock() +max_size = None + + +def init_detect(opts: Options) -> None: + + global max_size + max_size = opts.resolution + +def get_detector(module_runner, models_dir: str, model_name: str) -> any: + + """ + We have a detector for each custom model. Lookup the detector, or if it's + not found, create a new one and add it to our lookup. + """ + + detector = detectors.get(model_name, None) + + if detector is None: + + with models_lock: + detector = detectors.get(model_name, None) + + if detector is None: + + model_path = os.path.join(models_dir, model_name + ".rknn") + label_path = os.path.join(models_dir, model_name + ".txt") + + if exists(model_path): + + try: + runtime_option = RuntimeOption() + runtime_option.use_rknpu2() + + detector = vision.detection.RKYOLOV5(model_path, + runtime_option=runtime_option, + model_format=ModelFormat.RKNN) + + detector.postprocessor.class_num = count_labels(label_path) + + detectors[model_name] = detector + + module_runner.log(LogMethod.Server, + { + "filename": __file__, + "method": sys._getframe().f_code.co_name, + "loglevel": "debug", + "message": f"Model Path is {model_path}" + }) + + except Exception as ex: + module_runner.report_error(ex, __file__, f"Unable to load model at {model_path} ({str(ex)})") + detector = None + + else: + module_runner.report_error(None, __file__, f"{model_path} does not exist") + + return detector + + +def do_detect(module_runner, models_dir, model_name, img: any, score_threshold: float):# rknn + + create_err_msg = f"Unable to create YOLO detector for model {model_name}" + + start_process_time = time.perf_counter() + + try: + detector = get_detector(module_runner, models_dir, model_name) + + except Exception as ex: + create_err_msg = f"{create_err_msg} ({str(ex)})" + + if detector is None: + module_runner.report_error(None, __file__, create_err_msg) + return { "success": False, "error": create_err_msg } + + # We have a detector for this model, so let's go ahead and detect + try: + + # Predicting Image Results + im = array(img) + + # Resize the image to a maximum size of 640 + resized_image, x_scaling_factor, y_scaling_factor = resize_image(im, max_size) + + start_inference_time = time.perf_counter() + result = detector.predict(resized_image, conf_threshold=score_threshold, nms_iou_threshold=0.45) + inferenceMs = int((time.perf_counter() - start_inference_time) * 1000) + + result = str(result) + lines = result.strip().split("\n") + + label_path = os.path.join(models_dir, model_name + ".txt") + outputs = [] + + for line in lines[1:]: + # Split the line by comma to get a list of values + values = line.split(",") + values = [x.strip(' ') for x in values] + + box = values[0], values[1], values[2], values[3] + + box = convert_bounding_boxes(box, x_scaling_factor, y_scaling_factor) + + # Convert the values to appropriate data types + xmin = int(float(box[0])) + ymin = int(float(box[1])) + xmax = int(float(box[2])) + ymax = int(float(box[3])) + score = float(values[4]) + label_id = int(values[5]) + label = str(extract_label_from_file(label_id, label_path)) + + detection = { + "confidence": score, + "label": label, + "x_min": xmin, + "y_min": ymin, + "x_max": xmax, + "y_max": ymax, + } + + outputs.append(detection) + + if len(outputs) > 3: + message = 'Found ' + (', '.join(det["label"] for det in outputs[0:3])) + "..." + elif len(outputs) > 0: + message = 'Found ' + (', '.join(det["label"] for det in outputs)) + else: + message = "No objects found" + + return { + "success" : True, + "count" : len(outputs), + "predictions" : outputs, + "message" : message, + "processMs" : int((time.perf_counter() - start_process_time) * 1000), + "inferenceMs" : inferenceMs + } + + except UnidentifiedImageError as img_ex: + module_runner.report_error(img_ex, __file__, "The image provided was of an unknown type") + return { "success": False, "error": "invalid image file"} + + except Exception as ex: + module_runner.report_error(ex, __file__) + return { "success": False, "error": "Error occurred on the server" } diff --git a/src/modules/ObjectDetectionYoloRKNN/objectdetection_fd_rknn_adapter.py b/src/modules/ObjectDetectionYoloRKNN/objectdetection_fd_rknn_adapter.py new file mode 100644 index 00000000..1a121023 --- /dev/null +++ b/src/modules/ObjectDetectionYoloRKNN/objectdetection_fd_rknn_adapter.py @@ -0,0 +1,117 @@ +# Import our general libraries +import os +import sys +from time import time + +# Import the CodeProject.AI SDK. This will add to the PATH var for future imports +sys.path.append("../../SDK/Python") +from common import JSON +from request_data import RequestData +from module_runner import ModuleRunner +from module_logging import LogMethod + +# Import the method of the module we're wrapping +from options import Options +from PIL import Image + +# Import the method of the module we're wrapping +from objectdetection_fd_rknn import init_detect, do_detect + + +class FastDeploy_adapter(ModuleRunner): + + def __init__(self): + super().__init__() + self.opts = Options() + self.models_last_checked = None + self.model_names = [] # We'll use this to cache the available model names + + + def initialise(self) -> None: + # if the module was launched outside of the server then the queue name + # wasn't set. This is normally fine, but here we want the queue to be + # the same as the other object detection queues + + if not self.launched_by_server: + self.queue_name = "objectdetection_queue" + + if self.support_GPU: + self.support_GPU = self.hasFastDeployRockNPU + + if self.support_GPU: + print("Rockchip NPU detected") + self.execution_provider = "RKNPU" + + init_detect(self.opts) + + + def process(self, data: RequestData) -> JSON: + + response = None + + # The route to here is /v1/vision/custom/list list all models available + if data.command == "list-custom": + response = self.list_models(self.opts.custom_models_dir) + + elif data.command == "detect": # Perform 'standard' object detection + + # The route to here is /v1/vision/detection + threshold: float = float(data.get_value("min_confidence", self.opts.min_confidence)) + img: Image = data.get_image(0) + + response = do_detect(self, self.opts.models_dir, self.opts.std_model_name, img, threshold) + + elif data.command == "custom": # Perform custom object detection + + threshold: float = float(data.get_value("min_confidence", self.opts.min_confidence)) + img: Image = data.get_image(0) + + # The route to here is /v1/vision/custom/. if mode-name = general, + # or no model provided, then a built-in general purpose mode will be used. + models_dir:str = self.opts.custom_models_dir + model_name:str = "general" + if data.segments and data.segments[0]: + model_name = data.segments[0] + + # Map the "general" model to our current "general" model + if model_name == "general": # Use the custom IP Cam general model + models_dir = self.opts.custom_models_dir + model_name = "ipcam-general-small" + + self.log(LogMethod.Info | LogMethod.Server, + { + "filename": __file__, + "loglevel": "information", + "method": sys._getframe().f_code.co_name, + "message": f"Detecting using {model_name}" + }) + + response = do_detect(self, models_dir, model_name, img, threshold) + + else: + self.report_error(None, __file__, f"Unknown command {data.command}") + response = { "success": False, "error": "unsupported command" } + + return response + + + def list_models(self, models_path: str): + + """ + Lists the custom models we have in the assets folder. This ignores the + yolo* files. + """ + + # We'll only refresh the list of models at most once a minute + if self.models_last_checked is None or (time() - self.models_last_checked) >= 60: + self.model_names = [entry.name[:-5] for entry in os.scandir(models_path) + if (entry.is_file() + and entry.name.endswith(".rknn") + and not entry.name.startswith("yolov5"))] + self.models_last_checked = time() + + return { "success": True, "models": self.model_names } + + +if __name__ == "__main__": + FastDeploy_adapter().start_loop() diff --git a/src/modules/ObjectDetectionYoloRKNN/options.py b/src/modules/ObjectDetectionYoloRKNN/options.py new file mode 100644 index 00000000..a3ab056e --- /dev/null +++ b/src/modules/ObjectDetectionYoloRKNN/options.py @@ -0,0 +1,63 @@ +import os +from module_options import ModuleOptions + +class Settings: + def __init__(self, RESOLUTION, STD_MODEL_NAME): + self.RESOLUTION = RESOLUTION + self.STD_MODEL_NAME = STD_MODEL_NAME + + +class Options: + + def __init__(self): + + # ------------------------------------------------------------------------- + # Setup constants + + # Models at https://github.com/MikeLud/CodeProject.AI-Custom-IPcam-Models/tree/main/RKNN_Models/yolov5 + self.MODEL_SETTINGS = { + # Large: yolov5-large 80 objects, COCO 640x640x3 RKNN-2 + "large": Settings(640, 'yolov5-large'), + # Medium: yolov5-medium 80 objects, COCO 640x640x3 RKNN-2 + "medium": Settings(640, 'yolov5-medium'), + # Small: yolov5-small 80 objects, COCO 640x640x3 RKNN-2 + "small": Settings(640, 'yolov5-small'), + # Tiny: yolov5-tiny 80 objects, COCO 640x640x3 RKNN-2 + "tiny": Settings(640, 'yolov5-tiny') + } + + self.NUM_THREADS = 1 + self.MIN_CONFIDENCE = 0.30 + + # ------------------------------------------------------------------------- + # Setup values + + self._show_env_variables = True + + self.module_path = ModuleOptions.module_path + self.models_dir = os.path.normpath(ModuleOptions.getEnvVariable("MODELS_DIR", f"{self.module_path}/assets")) + self.model_size = ModuleOptions.getEnvVariable("MODEL_SIZE", "Small").lower() # tiny, small, medium, large + self.custom_models_dir = os.path.normpath(ModuleOptions.getEnvVariable("CUSTOM_MODELS_DIR", f"{self.module_path}/custom-models")) + + self.num_threads = int(ModuleOptions.getEnvVariable("NUM_THREADS", self.NUM_THREADS)) + self.min_confidence = float(ModuleOptions.getEnvVariable("MIN_CONFIDENCE", self.MIN_CONFIDENCE)) + + self.sleep_time = 0.01 + + if self.model_size not in [ "tiny", "small", "medium", "large" ]: + self.model_size = "small" + + # Get settings + settings = self.MODEL_SETTINGS[self.model_size] + self.resolution = settings.RESOLUTION + self.std_model_name = settings.STD_MODEL_NAME + + # ------------------------------------------------------------------------- + # dump the important variables + + if self._show_env_variables: + print(f"MODULE_PATH: {self.module_path}") + print(f"MODELS_DIR: {self.models_dir}") + print(f"custom_models_dir: {self.custom_models_dir}") + print(f"MODEL_SIZE: {self.model_size}") + print(f"STD_MODEL_NAME: {self.std_model_name}") diff --git a/src/modules/ObjectDetectionYoloRKNN/package.bat b/src/modules/ObjectDetectionYoloRKNN/package.bat new file mode 100644 index 00000000..a210f08c --- /dev/null +++ b/src/modules/ObjectDetectionYoloRKNN/package.bat @@ -0,0 +1,10 @@ +@Echo off +REM Module Packaging script. To be called from create_packages.bat + +set moduleId=%~1 +set version=%~2 + +REM NOTE: No install.bat. This doesn't work on Windows + +tar -caf %moduleId%-%version%.zip --exclude=__pycache__ --exclude=*.development.* --exclude=*.log ^ + utils\* *.py modulesettings.* requirements.* install.sh \ No newline at end of file diff --git a/src/modules/ObjectDetectionYoloRKNN/requirements.linux.arm64.txt b/src/modules/ObjectDetectionYoloRKNN/requirements.linux.arm64.txt new file mode 100644 index 00000000..40e8b058 --- /dev/null +++ b/src/modules/ObjectDetectionYoloRKNN/requirements.linux.arm64.txt @@ -0,0 +1,14 @@ +#! Python3.9 + +# Note that this wheel was built on, and for, an Orange Pi. It will not run on a Raspberry Pi +https://github.com/MikeLud/CodeProject.AI-Custom-IPcam-Models/raw/main/RKNN_Models/fastdeploy_python-1.4.2b0-cp39-cp39-linux_aarch64.whl # Installing the RK NPU specific FastDeploy 1.4 + +# numpy>=1.16.0 # Installing NumPy, the fundamental package for array computing with Python. +numpy # Installing NumPy, a package for scientific computing +Pillow>=4.0.0 # Installing Pillow, a Python Image Library + +#OpenCV-Python # Installing OpenCV, the Open source Computer Vision library +# See https://raspberrypi-guide.github.io/programming/install-opencv +opencv-python==4.5.3.56 # Installing OpenCV, the Open source Computer Vision library + +# last line empty test \ No newline at end of file diff --git a/src/modules/ObjectDetectionYoloRKNN/utils/tools.py b/src/modules/ObjectDetectionYoloRKNN/utils/tools.py new file mode 100644 index 00000000..9617bd76 --- /dev/null +++ b/src/modules/ObjectDetectionYoloRKNN/utils/tools.py @@ -0,0 +1,97 @@ +import cv2 +import numpy as np +from threading import Lock + +stored_lines = {} # Dictionary to store lines for each file_path + + +def resize_image(img, max_size): + + resize_height, resize_width, _ = img.shape + target_shape = (max_size, max_size) + + # Calculate the aspect ratio of the image + img_aspect_ratio = resize_width / resize_height + + # Calculate the target width and height while maintaining aspect ratio + if target_shape[1] / target_shape[0] > img_aspect_ratio: + target_width = int(target_shape[0] * img_aspect_ratio) + target_height = target_shape[0] + else: + target_width = target_shape[1] + target_height = int(target_shape[1] / img_aspect_ratio) + + # Resize the image to the target dimensions + resized_img = cv2.resize(img, (target_width, target_height), interpolation=cv2.INTER_AREA) + + # Create the output image with the target shape + output_img = np.zeros((*target_shape, 3), dtype=np.uint8) + + # Embed the resized image into the output image + output_img[:target_height, :target_width] = resized_img + + # Calculate the empty dimensions + empty_height = target_shape[0] - target_height + empty_width = target_shape[1] - target_width + + # Calculate the scaling factors + y_scaling_factor = (max_size - empty_height) / resize_height + x_scaling_factor = (max_size - empty_width) / resize_width + + # Convert color space + output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2RGB) + + return output_img, x_scaling_factor, y_scaling_factor + + +def convert_bounding_boxes(bounding_boxes, x_scaling_factor, y_scaling_factor): + converted_boxes = [] + + xmin, ymin, xmax, ymax = bounding_boxes + + # Convert bounding box coordinates to the original image size + xmin = int(float(xmin) / x_scaling_factor) + ymin = int(float(ymin) / y_scaling_factor) + xmax = int(float(xmax) / x_scaling_factor) + ymax = int(float(ymax) / y_scaling_factor) + converted_boxes = xmin, ymin, xmax, ymax + return converted_boxes + + +def count_labels(file_path): + with open(file_path, 'r') as file: + label_count = sum(1 for line in file) + return label_count + + +def read_file(file_path): + global stored_lines + if file_path not in stored_lines: + with open(file_path, 'r') as file: + lines = file.readlines() + stored_lines[file_path] = lines + + +def extract_label_from_file(line_number, file_path): + """ + Extracts the label from the stored lines at the given line number. + + Args: + line_number (int): The line number to extract the label from. + file_path (str): The path to the text file. + + Returns: + str: The extracted label. + """ + global stored_lines + if file_path not in stored_lines: + read_file(file_path) + + lines = stored_lines.get(file_path) + if lines is not None: + if line_number >= 0 and line_number < len(lines): + label = lines[line_number].strip() + return label + else: + # print(f"Line number {line_number} is out of range.") + return None diff --git a/src/modules/PortraitFilter/Lib/DeepPersonLab.cs b/src/modules/PortraitFilter/Lib/DeepPersonLab.cs index f18943c0..67adc477 100644 --- a/src/modules/PortraitFilter/Lib/DeepPersonLab.cs +++ b/src/modules/PortraitFilter/Lib/DeepPersonLab.cs @@ -29,10 +29,10 @@ public DeepPersonLab(string modelPath, SessionOptions? sessionOptions = null) { sessionOptions = sessionOptions ?? new SessionOptions(); - var tic = Environment.TickCount; + var tickCount = Environment.TickCount; // Console.WriteLine("Starting inference session..."); _session = new InferenceSession(modelPath, sessionOptions); - // Console.WriteLine($"Session started in {Environment.TickCount - tic} mls."); + // Console.WriteLine($"Session started in {Environment.TickCount - tickCount} ms."); } /// /// Returns segmentation mask. @@ -45,33 +45,33 @@ public DeepPersonLab(string modelPath, SessionOptions? sessionOptions = null) public Bitmap Fit(Bitmap image) { // scaling image - var width = image.Width; + var width = image.Width; var height = image.Height; - var ratio = 1.0f * _size / Math.Max(width, height); - var size = new Size( + var ratio = 1.0f * _size / Math.Max(width, height); + var size = new Size( (int)(ratio * width), (int)(ratio * height)); var resized = new Bitmap(image, size); // creating tensor // Console.WriteLine("Creating image tensor..."); - var tic = Environment.TickCount; - var inputMeta = _session.InputMetadata; - var name = inputMeta.Keys.ToArray()[0]; + var tickCount = Environment.TickCount; + var inputMeta = _session.InputMetadata; + var name = inputMeta.Keys.ToArray()[0]; var dimentions = new int[] { 1, size.Height, size.Width, 3 }; - var inputData = Onnx.ToTensor(resized); + var inputData = Onnx.ToTensor(resized); resized.Dispose(); - // Console.WriteLine($"Tensor was created in {Environment.TickCount - tic} mls."); + // Console.WriteLine($"Tensor was created in {Environment.TickCount - tickCount} ms."); // prediction // Console.WriteLine("Creating segmentation mask..."); - tic = Environment.TickCount; - var t1 = new DenseTensor(inputData, dimentions); - var inputs = new List() { NamedOnnxValue.CreateFromTensor(name, t1) }; + tickCount = Environment.TickCount; + var t1 = new DenseTensor(inputData, dimentions); + var inputs = new List() { NamedOnnxValue.CreateFromTensor(name, t1) }; var results = _session.Run(inputs).ToArray(); - var map = results[0].AsTensor().ToArray(); - var mask = DeepPersonLab.FromSegmentationMap(map, size.Width, size.Height); - // Console.WriteLine($"Segmentation was created in {Environment.TickCount - tic} mls."); + var map = results[0].AsTensor().ToArray(); + var mask = DeepPersonLab.FromSegmentationMap(map, size.Width, size.Height); + // Console.WriteLine($"Segmentation was created in {Environment.TickCount - tickCount} ms."); // return mask return new Bitmap(mask, width, height); diff --git a/src/modules/PortraitFilter/PortraitFilter.csproj b/src/modules/PortraitFilter/PortraitFilter.csproj index dcb8361d..2978b8b9 100644 --- a/src/modules/PortraitFilter/PortraitFilter.csproj +++ b/src/modules/PortraitFilter/PortraitFilter.csproj @@ -13,7 +13,7 @@ PortraitFilter CodeProject.AI.Modules.PortraitFilter PortraitFilter - 2.1.0 + 1.3 CodeProject true enable @@ -87,6 +87,7 @@ --> + diff --git a/src/modules/PortraitFilter/PortraitFilterWorker.cs b/src/modules/PortraitFilter/PortraitFilterWorker.cs index c21d8a40..6dc89360 100644 --- a/src/modules/PortraitFilter/PortraitFilterWorker.cs +++ b/src/modules/PortraitFilter/PortraitFilterWorker.cs @@ -8,8 +8,8 @@ using Microsoft.Extensions.Logging; using Microsoft.ML.OnnxRuntime; -using SkiaSharp; using SkiaSharp.Views.Desktop; + using CodeProject.AI.SDK.Common; using CodeProject.AI.SDK; @@ -57,6 +57,67 @@ public PortraitFilterWorker(ILogger logger, } } + /// + /// The work happens here. + /// + /// The request. + /// The response. + protected override BackendResponseBase ProcessRequest(BackendRequest request) + { + if (_deepPersonLab == null) + return new BackendErrorResponse($"{ModuleName} missing _deepPersonLab object."); + + // ignoring the file name + var file = request.payload?.files?.FirstOrDefault(); + var strengthStr = request.payload?.GetValue("strength", "0.5"); + + if (!float.TryParse(strengthStr, out var strength)) + strength = 0.5f; + + if (file?.data is null) + return new BackendErrorResponse("Portrait Filter File or file data is null."); + + Logger.LogInformation($"Processing {file.filename}"); + + Stopwatch sw = Stopwatch.StartNew(); + + // dummy result + byte[]? result = null; + + try + { + var portraitModeFilter = new PortraitModeFilter(strength); + + byte[]? imageData = file.data; + Bitmap? image = ImageUtils.GetImage(imageData)?.ToBitmap(); + + if (image is null) + return new BackendErrorResponse("Portrait Filter unable to get image from file data."); + + Stopwatch stopWatch = Stopwatch.StartNew(); + Bitmap mask = _deepPersonLab.Fit(image); + stopWatch.Stop(); + + if (mask is not null) + { + Bitmap? filteredImage = portraitModeFilter.Apply(image, mask); + result = ImageToByteArray(filteredImage); + } + } + catch (Exception ex) + { + return new BackendErrorResponse($"Portrait Filter Error for {file.filename}: {ex.Message}."); + } + + if (result is null) + return new BackendErrorResponse("Portrait Filter returned null."); + + return new PortraitResponse { + filtered_image = result, + inferenceMs = sw.ElapsedMilliseconds + }; + } + private SessionOptions GetSessionOptions() { var sessionOpts = new SessionOptions(); @@ -128,89 +189,6 @@ private SessionOptions GetSessionOptions() return sessionOpts; } - /// - /// Sniff the hardware in use so we can report to the API server. This method is empty - /// since we already sniffed hardware in GetSessionOptions. - /// - protected async override void GetHardwareInfo() - { - await System.Threading.Tasks.Task.Delay(0); - } - - /// - /// The work happens here. - /// - /// The request. - /// The response. - public override BackendResponseBase ProcessRequest(BackendRequest request) - { - if (_deepPersonLab == null) - return new BackendErrorResponse($"{ModuleName} missing _deepPersonLab object."); - - // ignoring the file name - var file = request.payload?.files?.FirstOrDefault(); - var strengthStr = request.payload?.GetValue("strength", "0.5"); - - if (!float.TryParse(strengthStr, out var strength)) - strength = 0.5f; - - if (file?.data is null) - return new BackendErrorResponse("Portrait Filter File or file data is null."); - - Logger.LogInformation($"Processing {file.filename}"); - - Stopwatch sw = Stopwatch.StartNew(); - - // dummy result - byte[]? result = null; - - try - { - var portraitModeFilter = new PortraitModeFilter(strength); - - byte[]? imageData = file.data; - Bitmap? image = GetImage(imageData); - - if (image is null) - return new BackendErrorResponse("Portrait Filter unable to get image from file data."); - - Stopwatch stopWatch = Stopwatch.StartNew(); - Bitmap mask = _deepPersonLab.Fit(image); - stopWatch.Stop(); - - if (mask is not null) - { - Bitmap? filteredImage = portraitModeFilter.Apply(image, mask); - result = ImageToByteArray(filteredImage); - } - } - catch (Exception ex) - { - return new BackendErrorResponse($"Portrait Filter Error for {file.filename}: {ex.Message}."); - } - - if (result is null) - return new BackendErrorResponse("Portrait Filter returned null."); - - return new PortraitResponse { - filtered_image = result, - inferenceMs = sw.ElapsedMilliseconds - }; - } - - // Using SkiaSharp as it handles more formats. - private static Bitmap? GetImage(byte[] imageData) - { - if (imageData == null) - return null; - - var skiaImage = SKImage.FromEncodedData(imageData); - if (skiaImage is null) - return null; - - return skiaImage.ToBitmap(); - } - public static byte[]? ImageToByteArray(Image img) { if (img is null) @@ -228,6 +206,6 @@ public override BackendResponseBase ProcessRequest(BackendRequest request) #pragma warning restore CA1416 // Validate platform compatibility return stream.ToArray(); - } + } } } \ No newline at end of file diff --git a/src/modules/PortraitFilter/Program.cs b/src/modules/PortraitFilter/Program.cs index 216b56a0..34b31c0c 100644 --- a/src/modules/PortraitFilter/Program.cs +++ b/src/modules/PortraitFilter/Program.cs @@ -10,4 +10,6 @@ }) .Build(); +#pragma warning disable CA2007 // Consider calling ConfigureAwait on the awaited task await host.RunAsync(); +#pragma warning restore CA2007 // Consider calling ConfigureAwait on the awaited task diff --git a/src/modules/PortraitFilter/install.bat b/src/modules/PortraitFilter/install.bat index b45c3689..4f16f119 100644 --- a/src/modules/PortraitFilter/install.bat +++ b/src/modules/PortraitFilter/install.bat @@ -26,6 +26,7 @@ :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/PortraitFilter/install.sh b/src/modules/PortraitFilter/install.sh index 206bd4e6..8951e05a 100644 --- a/src/modules/PortraitFilter/install.sh +++ b/src/modules/PortraitFilter/install.sh @@ -21,7 +21,7 @@ fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -30,6 +30,8 @@ fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/PortraitFilter/modulesettings.json b/src/modules/PortraitFilter/modulesettings.json index bdef3688..81ce7816 100644 --- a/src/modules/PortraitFilter/modulesettings.json +++ b/src/modules/PortraitFilter/modulesettings.json @@ -4,7 +4,7 @@ "Modules": { "PortraitFilter": { "Name": "Portrait Filter", - "Version": "1.1", + "Version": "1.4", "Description": "Provides a depth-of-field (bokeh) effect on images. Great for selfies.", "Platforms": [ "windows" ], // errors with Microsoft.ML.OnnxRuntime.NativeMethods in macOS, and System.Drawing issues in Linux @@ -12,9 +12,12 @@ "LicenseUrl": "https://opensource.org/licenses/MIT", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - // { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-06-01" }, - { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-06-01" }, + { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "2.1.6" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "2.1.7" ], "ReleaseDate": "2023-04-20", "ReleaseNotes": "Updated launch command" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "2.1.8" ], "ReleaseDate": "2023-05-03", "ReleaseNotes": "Minor module initialisation changes" }, + { "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-17", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } ], // Launch instructions diff --git a/src/modules/PortraitFilter/modulesettings.linux.development.json b/src/modules/PortraitFilter/modulesettings.linux.development.json index f0f6cedf..87c56aaf 100644 --- a/src/modules/PortraitFilter/modulesettings.linux.development.json +++ b/src/modules/PortraitFilter/modulesettings.linux.development.json @@ -1,8 +1,7 @@ { "Modules": { "PortraitFilter": { - "FilePath": "bin/Debug/net7.0/PortraitFilter.dll", - "WorkingDirectory": "PortraitFilter" + "FilePath": "bin/Debug/net7.0/PortraitFilter.dll" } } } diff --git a/src/modules/PortraitFilter/modulesettings.linux.json b/src/modules/PortraitFilter/modulesettings.linux.json index 6be81ac6..fcf9c61b 100644 --- a/src/modules/PortraitFilter/modulesettings.linux.json +++ b/src/modules/PortraitFilter/modulesettings.linux.json @@ -1,7 +1,8 @@ { "Modules": { "PortraitFilter": { - "FilePath": "PortraitFilter" + "Runtime": "dotnet", + "FilePath": "PortraitFilter.dll" } } } diff --git a/src/modules/PortraitFilter/modulesettings.macos.development.json b/src/modules/PortraitFilter/modulesettings.macos.development.json index f196a283..87c56aaf 100644 --- a/src/modules/PortraitFilter/modulesettings.macos.development.json +++ b/src/modules/PortraitFilter/modulesettings.macos.development.json @@ -1,7 +1,7 @@ { "Modules": { "PortraitFilter": { - "FilePath": "bin/Debug/net7.0/PortraitFilter" + "FilePath": "bin/Debug/net7.0/PortraitFilter.dll" } } } diff --git a/src/modules/PortraitFilter/modulesettings.macos.json b/src/modules/PortraitFilter/modulesettings.macos.json new file mode 100644 index 00000000..fcf9c61b --- /dev/null +++ b/src/modules/PortraitFilter/modulesettings.macos.json @@ -0,0 +1,8 @@ +{ + "Modules": { + "PortraitFilter": { + "Runtime": "dotnet", + "FilePath": "PortraitFilter.dll" + } + } +} diff --git a/src/modules/SceneClassifier/install.bat b/src/modules/SceneClassifier/install.bat index 8b2bce7a..3684d9df 100644 --- a/src/modules/SceneClassifier/install.bat +++ b/src/modules/SceneClassifier/install.bat @@ -39,6 +39,7 @@ if errorlevel 1 exit /b 1 :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/SceneClassifier/install.sh b/src/modules/SceneClassifier/install.sh index 8d7c73cd..1106b1ea 100644 --- a/src/modules/SceneClassifier/install.sh +++ b/src/modules/SceneClassifier/install.sh @@ -34,7 +34,7 @@ if [ $? -ne 0 ]; then quit 1; fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -43,6 +43,8 @@ if [ $? -ne 0 ]; then quit 1; fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/SceneClassifier/modulesettings.json b/src/modules/SceneClassifier/modulesettings.json index f862967d..4b8fb4d6 100644 --- a/src/modules/SceneClassifier/modulesettings.json +++ b/src/modules/SceneClassifier/modulesettings.json @@ -3,7 +3,7 @@ "SceneClassifier": { "Name": "Scene Classification", - "Version": "1.2", + "Version": "1.3", // Publishing info "Description": "Classifies an image according to one of 365 pre-trained scenes", @@ -12,9 +12,11 @@ "LicenseUrl": "http://www.apache.org/licenses/", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-03-01" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "2.1.6" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "2.1.8" ], "ReleaseDate": "2023-05-03", "ReleaseNotes": "Minor module initialisation changes" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-17", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } ], // Launch instructions diff --git a/src/modules/SceneClassifier/options.py b/src/modules/SceneClassifier/options.py index 21406018..84597441 100644 --- a/src/modules/SceneClassifier/options.py +++ b/src/modules/SceneClassifier/options.py @@ -20,6 +20,6 @@ def __init__(self): # dump the important variables if self._show_env_variables: - print(f"APPDIR: {self.app_dir}") - print(f"MODEL_SIZE: {self.model_size}") - print(f"MODELS_DIR: {self.models_dir}") + print(f"Debug: APPDIR: {self.app_dir}") + print(f"Debug: MODEL_SIZE: {self.model_size}") + print(f"Debug: MODELS_DIR: {self.models_dir}") diff --git a/src/modules/SceneClassifier/requirements.linux.arm64.txt b/src/modules/SceneClassifier/requirements.linux.arm64.txt index 2caede89..1f979f1a 100644 --- a/src/modules/SceneClassifier/requirements.linux.arm64.txt +++ b/src/modules/SceneClassifier/requirements.linux.arm64.txt @@ -3,7 +3,7 @@ Pandas # Installing Pandas, a data analysis / data manipulation tool CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/SceneClassifier/requirements.linux.txt b/src/modules/SceneClassifier/requirements.linux.txt index 1d3f0ee3..a6b993f4 100644 --- a/src/modules/SceneClassifier/requirements.linux.txt +++ b/src/modules/SceneClassifier/requirements.linux.txt @@ -3,16 +3,25 @@ Pandas # Installing Pandas, a data analysis / data manipulation tool CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files -## Specific versions that match the models we're using. -Torch==1.10.2 # Installing Torch, for Tensor computation and Deep neural networks -TorchVision==0.11.3 # Installing TorchVision, for Computer Vision based AI + +# Specific versions that match the models we're using. Size is ~830Mb +# ** Don't do this if we're installing Torch-DirectML ** +--extra-index-url https://download.pytorch.org/whl/cpu +Torch==1.10.2+cpu # Installing Torch, for Tensor computation and Deep neural networks +--extra-index-url https://download.pytorch.org/whl/cpu +TorchVision==0.11.3+cpu # Installing TorchVision, for Computer Vision based AI + +# CPU specific Torch for Linux. This is Torch 2.0, though, which seems to be...troublesome. +# --index-url https://download.pytorch.org/whl/cpu +# Torch # Installing Torch, for Tensor computation and Deep neural networks +# --index-url https://download.pytorch.org/whl/cpu +# TorchVision # Installing TorchVision, for Computer Vision based AI ## These to be removed (not needed for inference) -# matlabplotlib Seaborn # Installing Seaborn, a data visualization library based on matplotlib ## last line empty. \ No newline at end of file diff --git a/src/modules/SceneClassifier/requirements.macos.arm64.txt b/src/modules/SceneClassifier/requirements.macos.arm64.txt index 85d62a9f..d7b71193 100644 --- a/src/modules/SceneClassifier/requirements.macos.arm64.txt +++ b/src/modules/SceneClassifier/requirements.macos.arm64.txt @@ -3,7 +3,7 @@ Pandas # Installing Pandas, a data analysis / data manipulation tool CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/SceneClassifier/requirements.macos.txt b/src/modules/SceneClassifier/requirements.macos.txt index d27888cc..a852f09c 100644 --- a/src/modules/SceneClassifier/requirements.macos.txt +++ b/src/modules/SceneClassifier/requirements.macos.txt @@ -3,7 +3,7 @@ Pandas # Installing Pandas, a data analysis / data manipulation tool CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/SceneClassifier/requirements.txt b/src/modules/SceneClassifier/requirements.txt index 9916f661..15aa7041 100644 --- a/src/modules/SceneClassifier/requirements.txt +++ b/src/modules/SceneClassifier/requirements.txt @@ -3,7 +3,7 @@ Pandas # Installing Pandas, a data analysis / data manipulation tool CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files @@ -12,8 +12,7 @@ torch==1.10.1+cpu # Installing PyTorch, for Tensor computation and Deep neural --find-links https://download.pytorch.org/whl/torch_stable.html torchvision==0.11.2+cpu # Installing TorchVision, for Computer Vision based AI -## These to be removed (not needed for inference) -# matlabplotlib +# We need this, but we don't need this. Seaborn # Installing Seaborn, a data visualization library based on matplotlib ## YOLOv5 base ---------------------------------------- @@ -31,5 +30,4 @@ Seaborn # Installing Seaborn, a data visualization library based on matp ## tqdm>=4.64.0 ## protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 - ## last line empty. \ No newline at end of file diff --git a/src/modules/SceneClassifier/requirements.windows.cuda.txt b/src/modules/SceneClassifier/requirements.windows.cuda.txt index 5af6653d..177fed70 100644 --- a/src/modules/SceneClassifier/requirements.windows.cuda.txt +++ b/src/modules/SceneClassifier/requirements.windows.cuda.txt @@ -3,7 +3,7 @@ Pandas # Installing Pandas, a data analysis / data manipulation tool CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/SceneClassifier/scene_adapter.py b/src/modules/SceneClassifier/scene_adapter.py index 903c4743..fe70ed19 100644 --- a/src/modules/SceneClassifier/scene_adapter.py +++ b/src/modules/SceneClassifier/scene_adapter.py @@ -51,6 +51,7 @@ def __init__(self): super().__init__() self.opts = Options() self.models_lock = Lock() + self.classes = list() self.place_names = None self.classifier = None # Lazy load later on @@ -94,7 +95,9 @@ def process(self: ModuleRunner, data: RequestData) -> JSON: self.init_models() start_inference_time = time.perf_counter() + name, conf = self.classifier.predict(img) + inferenceMs = int((time.perf_counter() - start_inference_time) * 1000) name = self.place_names[name] @@ -111,11 +114,11 @@ def process(self: ModuleRunner, data: RequestData) -> JSON: except UnidentifiedImageError as img_ex: self.report_error(img_ex, __file__, "The image provided was of an unknown type") - return { "success": False, "error": "Error occured on the server" } + return { "success": False, "error": "Error occurred on the server" } except Exception as ex: self.report_error(ex, __file__) - return { "success": False, "error": "Error occured on the server" } + return { "success": False, "error": "Error occurred on the server" } def init_models(self, re_entered: bool = False) -> None: diff --git a/src/modules/SentimentAnalysis/Program.cs b/src/modules/SentimentAnalysis/Program.cs index b5aa2654..711dd6ab 100644 --- a/src/modules/SentimentAnalysis/Program.cs +++ b/src/modules/SentimentAnalysis/Program.cs @@ -12,4 +12,6 @@ }) .Build(); +#pragma warning disable CA2007 // Consider calling ConfigureAwait on the awaited task await host.RunAsync(); +#pragma warning restore CA2007 // Consider calling ConfigureAwait on the awaited task diff --git a/src/modules/SentimentAnalysis/SentimentAnalysis.csproj b/src/modules/SentimentAnalysis/SentimentAnalysis.csproj index efa2881b..a61b6dcb 100644 --- a/src/modules/SentimentAnalysis/SentimentAnalysis.csproj +++ b/src/modules/SentimentAnalysis/SentimentAnalysis.csproj @@ -7,7 +7,7 @@ SentimentAnalysis CodeProject.AI.Modules.SentimentAnalysis - 2.1.0 + 2.1.5 enable disable dotnet-SentimentAnalysis-EBFB5149-7C8C-4D4E-96D1-6C0227E67E29 diff --git a/src/modules/SentimentAnalysis/SentimentAnalysisWorker.cs b/src/modules/SentimentAnalysis/SentimentAnalysisWorker.cs index edfb8962..0aa3354b 100644 --- a/src/modules/SentimentAnalysis/SentimentAnalysisWorker.cs +++ b/src/modules/SentimentAnalysis/SentimentAnalysisWorker.cs @@ -38,12 +38,21 @@ public SentimentAnalysisWorker(ILogger logger, _textClassifier = textClassifier; } + /// + /// Called before the main processing loops are started + /// + protected override void InitModule() + { + HardwareType = _textClassifier.HardwareType; + ExecutionProvider = _textClassifier.ExecutionProvider; + } + /// /// The work happens here. /// /// The request. /// The response. - public override BackendResponseBase ProcessRequest(BackendRequest request) + protected override BackendResponseBase ProcessRequest(BackendRequest request) { string? text = request?.payload?.GetValue("text"); if (text is null) @@ -66,13 +75,5 @@ public override BackendResponseBase ProcessRequest(BackendRequest request) return response; } - - protected async override void GetHardwareInfo() - { - await System.Threading.Tasks.Task.Run(() => { - HardwareType = _textClassifier.HardwareType; - ExecutionProvider = _textClassifier.ExecutionProvider; - }); - } } } \ No newline at end of file diff --git a/src/modules/SentimentAnalysis/TextClassifier.cs b/src/modules/SentimentAnalysis/TextClassifier.cs index d8c011bb..0221c256 100644 --- a/src/modules/SentimentAnalysis/TextClassifier.cs +++ b/src/modules/SentimentAnalysis/TextClassifier.cs @@ -21,6 +21,52 @@ namespace CodeProject.AI.Modules.SentimentAnalysis { public class TextClassifier { + /// + /// Class to hold original input data. + /// + public class InputData + { + public string? Text { get; set; } + } + + /// + /// Class to contain the output values from the transformation. + /// + public class SentimentPrediction + { + [VectorType(2)] + public float[]? Prediction { get; set; } + } + + /// + /// Class to hold the variable length feature vector. Used to define the + /// column names used as input to the custom mapping action. + /// + public class VariableLength + { + /// + /// This is a variable length vector designated by VectorType attribute. Variable length + /// vectors are produced by applying operations such as 'TokenizeWords' on strings + /// resulting in vectors of tokens of variable lengths. + /// + [VectorType] + public int[]? VariableLengthFeatures { get; set; } + } + + /// + /// Class to hold the fixed length feature vector. Used to define the + /// column names used as output from the custom mapping action, + /// + public class FixedLength + { + /// + /// This is a fixed length vector designated by VectorType attribute. + /// + [VectorType(FeatureLength)] + public int[]? Features { get; set; } + } + + public const int FeatureLength = 600; private readonly MLContext _mlContext; @@ -86,7 +132,7 @@ public TextClassifier(ILogger logger, IConfiguration config) IEstimator pipeline = // Split the text into individual words - _mlContext.Transforms.Text.TokenizeIntoWords("TokenizedWords", "ReviewText") + _mlContext.Transforms.Text.TokenizeIntoWords("TokenizedWords", "Text") // Map each word to an integer value. The array of integer makes up the input features. .Append(_mlContext.Transforms.Conversion.MapValue("VariableLengthFeatures", lookupMap, @@ -102,67 +148,22 @@ public TextClassifier(ILogger logger, IConfiguration config) .Append(_mlContext.Transforms.CopyColumns("Prediction", "Prediction/Softmax")); // Create an executable model from the estimator pipeline - IDataView dataView = _mlContext.Data.LoadFromEnumerable(new List()); + IDataView dataView = _mlContext.Data.LoadFromEnumerable(new List()); _model = pipeline.Fit(dataView); } - public MovieReviewSentimentPrediction PredictSentiment(string reviewText) + public SentimentPrediction PredictSentiment(string inputText) { - var engine = _mlContext.Model.CreatePredictionEngine(_model); + var engine = _mlContext.Model.CreatePredictionEngine(_model); - var review = new MovieReview() + var review = new InputData() { - ReviewText = reviewText + Text = inputText }; // Predict with TensorFlow pipeline. var sentimentPrediction = engine.Predict(review); return sentimentPrediction; } - - /// - /// Class to hold original sentiment data. - /// - public class MovieReview - { - public string? ReviewText { get; set; } - } - - /// - /// Class to contain the output values from the transformation. - /// - public class MovieReviewSentimentPrediction - { - [VectorType(2)] - public float[]? Prediction { get; set; } - } - - /// - /// Class to hold the variable length feature vector. Used to define the - /// column names used as input to the custom mapping action. - /// - public class VariableLength - { - /// - /// This is a variable length vector designated by VectorType attribute. - /// Variable length vectors are produced by applying operations such as 'TokenizeWords' on strings - /// resulting in vectors of tokens of variable lengths. - /// - [VectorType] - public int[]? VariableLengthFeatures { get; set; } - } - - /// - /// Class to hold the fixed length feature vector. Used to define the - /// column names used as output from the custom mapping action, - /// - public class FixedLength - { - /// - /// This is a fixed length vector designated by VectorType attribute. - /// - [VectorType(FeatureLength)] - public int[]? Features { get; set; } - } } } diff --git a/src/modules/SentimentAnalysis/install.bat b/src/modules/SentimentAnalysis/install.bat index b45c3689..4f16f119 100644 --- a/src/modules/SentimentAnalysis/install.bat +++ b/src/modules/SentimentAnalysis/install.bat @@ -26,6 +26,7 @@ :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/SentimentAnalysis/install.sh b/src/modules/SentimentAnalysis/install.sh index 206bd4e6..8951e05a 100644 --- a/src/modules/SentimentAnalysis/install.sh +++ b/src/modules/SentimentAnalysis/install.sh @@ -21,7 +21,7 @@ fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -30,6 +30,8 @@ fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/SentimentAnalysis/modulesettings.json b/src/modules/SentimentAnalysis/modulesettings.json index db5d0eca..83e8a2f2 100644 --- a/src/modules/SentimentAnalysis/modulesettings.json +++ b/src/modules/SentimentAnalysis/modulesettings.json @@ -4,24 +4,27 @@ "Modules": { "SentimentAnalysis": { "Name": "Sentiment Analysis", - "Version": "1.1", + "Version": "1.3", // Publishing info - "Description": "Provides an alaysis of the sentiment of a piece of text. Positive or negative?", - "Platforms": [ "windows", "macos" ], // No tensorflow found in linux, and no "*-arm64" because ML.NET only suports x86 and x64 chips + "Description": "Provides an analysis of the sentiment of a piece of text. Positive or negative?", + "Platforms": [ "windows", "macos" ], // No tensorflow found in linux, and no "*-arm64" because ML.NET only supports x86 and x64 chips "License": "CC-BY-4.0", "LicenseUrl": "https://github.com/dotnet/samples/blob/main/LICENSE", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - // { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-06-01" }, - { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-06-01" }, + { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1", "2.1.6" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "2.1.8" ], "ReleaseDate": "2023-05-03", "ReleaseNotes": "Minor module initialisation changes" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-17", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } ], // Launch instructions "AutoStart": true, - "FilePath": "SentimentAnalysis.exe", "Runtime": "execute", + "FilePath": "SentimentAnalysis.exe", + "RuntimeLocation": "Shared", // Can be Local or Shared. .NET so moot point here // These are all optional. Defaults are usually fine diff --git a/src/modules/SentimentAnalysis/modulesettings.linux.development.json b/src/modules/SentimentAnalysis/modulesettings.linux.development.json index 5e0f92c9..37b9e777 100644 --- a/src/modules/SentimentAnalysis/modulesettings.linux.development.json +++ b/src/modules/SentimentAnalysis/modulesettings.linux.development.json @@ -1,8 +1,7 @@ { "Modules": { "SentimentAnalysis": { - "FilePath": "bin/Debug/net7.0/SentimentAnalysis.dll", - "WorkingDirectory": "SentimentAnalysis" + "FilePath": "bin/Debug/net7.0/SentimentAnalysis.dll" } } } diff --git a/src/modules/SentimentAnalysis/modulesettings.linux.json b/src/modules/SentimentAnalysis/modulesettings.linux.json index 72aabc67..a4682862 100644 --- a/src/modules/SentimentAnalysis/modulesettings.linux.json +++ b/src/modules/SentimentAnalysis/modulesettings.linux.json @@ -1,7 +1,8 @@ { "Modules": { "SentimentAnalysis": { - "FilePath": "SentimentAnalysis" + "Runtime": "dotnet", + "FilePath": "SentimentAnalysis.dll" } } } diff --git a/src/modules/SentimentAnalysis/modulesettings.macos.development.json b/src/modules/SentimentAnalysis/modulesettings.macos.development.json index 25aa2df1..472ea9de 100644 --- a/src/modules/SentimentAnalysis/modulesettings.macos.development.json +++ b/src/modules/SentimentAnalysis/modulesettings.macos.development.json @@ -1,8 +1,7 @@ { "Modules": { "SentimentAnalysis": { - "FilePath": "bin/Debug/net7.0/SentimentAnalysis", - "WorkingDirectory": "SentimentAnalysis" - } + "FilePath": "bin/Debug/net7.0/SentimentAnalysis.dll" + } } } diff --git a/src/modules/SentimentAnalysis/modulesettings.macos.json b/src/modules/SentimentAnalysis/modulesettings.macos.json new file mode 100644 index 00000000..a4682862 --- /dev/null +++ b/src/modules/SentimentAnalysis/modulesettings.macos.json @@ -0,0 +1,8 @@ +{ + "Modules": { + "SentimentAnalysis": { + "Runtime": "dotnet", + "FilePath": "SentimentAnalysis.dll" + } + } +} diff --git a/src/modules/SuperResolution/install.bat b/src/modules/SuperResolution/install.bat index cc7dbc2d..af6a5f93 100644 --- a/src/modules/SuperResolution/install.bat +++ b/src/modules/SuperResolution/install.bat @@ -34,6 +34,7 @@ if errorlevel 1 exit /b 1 :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/SuperResolution/install.sh b/src/modules/SuperResolution/install.sh index 099d7a19..48d45ce5 100644 --- a/src/modules/SuperResolution/install.sh +++ b/src/modules/SuperResolution/install.sh @@ -31,7 +31,7 @@ if [ $? -ne 0 ]; then quit 1; fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -40,6 +40,8 @@ if [ $? -ne 0 ]; then quit 1; fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/SuperResolution/modulesettings.json b/src/modules/SuperResolution/modulesettings.json index 6bf3fb5e..d77b999d 100644 --- a/src/modules/SuperResolution/modulesettings.json +++ b/src/modules/SuperResolution/modulesettings.json @@ -3,7 +3,7 @@ "SuperResolution": { "Name": "Super Resolution", - "Version": "1.3", + "Version": "1.5", // Publishing info "Description": "Increases the resolution of an image using AI", @@ -12,13 +12,15 @@ "LicenseUrl": "http://www.apache.org/licenses/", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.6.8" ], "ReleaseDate": "2022-03-01" }, + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.6.8" ], "ReleaseDate": "2022-03-01" }, { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.6.9", "2.0.8" ], "ReleaseDate": "2022-11-01" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "2.1" ], "ReleaseDate": "2023-03-20" }, - { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-11" } + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-11", "ReleaseNotes": "Missing assets restored" }, + { "ModuleVersion": "1.4", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-04-11", "ReleaseNotes": "Corrected inferenceMs type" }, + { "ModuleVersion": "1.5", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-17", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } ], - + // Launch instructions "AutoStart": true, "FilePath": "superres_adapter.py", diff --git a/src/modules/SuperResolution/requirements.linux.txt b/src/modules/SuperResolution/requirements.linux.txt index 139bfcf5..89ed4e30 100644 --- a/src/modules/SuperResolution/requirements.linux.txt +++ b/src/modules/SuperResolution/requirements.linux.txt @@ -3,7 +3,7 @@ ONNX # Installing ONNX, the Open Neural Network Exchange library ONNXRuntime # Installing ONNX runtime, the scoring engine for ONNX models python-resize-image # Installing resizeimage, which provides functions for easily resizing images -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library ## --extra-index-url https://download.pytorch.org/whl/cpu ## Torch==1.10.2+cpu # Installing Torch, for Tensor computation and Deep neural networks diff --git a/src/modules/SuperResolution/requirements.macos.arm64.txt b/src/modules/SuperResolution/requirements.macos.arm64.txt index 69f803e8..a2e82ed1 100644 --- a/src/modules/SuperResolution/requirements.macos.arm64.txt +++ b/src/modules/SuperResolution/requirements.macos.arm64.txt @@ -4,7 +4,7 @@ ONNX # Installing ONNX, the Open Neural Network Exchange library ONNXRuntime # Installing ONNX runtime, the scoring engine for ONNX models torch # Installing PyTorch, for Tensor computation and Deep neural networks python-resize-image # Installing resizeimage, which provides functions for easily resizing images -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library # We may need to rework this. See https://stackoverflow.com/a/66536896 numpy==1.22.4 # Installing NumPy, a package for scientific computing diff --git a/src/modules/SuperResolution/requirements.macos.txt b/src/modules/SuperResolution/requirements.macos.txt index 69f803e8..a2e82ed1 100644 --- a/src/modules/SuperResolution/requirements.macos.txt +++ b/src/modules/SuperResolution/requirements.macos.txt @@ -4,7 +4,7 @@ ONNX # Installing ONNX, the Open Neural Network Exchange library ONNXRuntime # Installing ONNX runtime, the scoring engine for ONNX models torch # Installing PyTorch, for Tensor computation and Deep neural networks python-resize-image # Installing resizeimage, which provides functions for easily resizing images -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library # We may need to rework this. See https://stackoverflow.com/a/66536896 numpy==1.22.4 # Installing NumPy, a package for scientific computing diff --git a/src/modules/SuperResolution/requirements.txt b/src/modules/SuperResolution/requirements.txt index dd62489a..634c805c 100644 --- a/src/modules/SuperResolution/requirements.txt +++ b/src/modules/SuperResolution/requirements.txt @@ -3,7 +3,7 @@ ONNX # Installing ONNX, the Open Neural Network Exchange library ONNXRuntime # Installing ONNX runtime, the scoring engine for ONNX models python-resize-image # Installing resizeimage, which provides functions for easily resizing images -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library # Sticking with a versioned install because in a shared python install we need this to # play well with torchvision (which will be 0.11.3 in the shared site packages folder) diff --git a/src/modules/SuperResolution/superres_adapter.py b/src/modules/SuperResolution/superres_adapter.py index 7054324b..5b6463db 100644 --- a/src/modules/SuperResolution/superres_adapter.py +++ b/src/modules/SuperResolution/superres_adapter.py @@ -11,6 +11,7 @@ from common import JSON from request_data import RequestData from module_runner import ModuleRunner +from threading import Lock # Import libraries needed from PIL import Image @@ -18,7 +19,6 @@ # Import the method of the module we're wrapping from superresolution import superresolution, load_pretrained_weights - class SuperRes_adapter(ModuleRunner): def initialise(self) -> None: @@ -33,11 +33,12 @@ def process(self, data: RequestData) -> JSON: img: Image = data.get_image(0) start_time = time.perf_counter() + (out_img, inferenceMs) = superresolution(img) return { "success": True, - "imageBase64": data.encode_image(out_img), + "imageBase64": RequestData.encode_image(out_img), "processMs" : int((time.perf_counter() - start_time) * 1000), "inferenceMs": inferenceMs } diff --git a/src/modules/TextSummary/install.bat b/src/modules/TextSummary/install.bat index d757297c..d171158d 100644 --- a/src/modules/TextSummary/install.bat +++ b/src/modules/TextSummary/install.bat @@ -35,6 +35,7 @@ if errorlevel 1 exit /b 1 :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/TextSummary/install.sh b/src/modules/TextSummary/install.sh index 609800d1..3104a10b 100644 --- a/src/modules/TextSummary/install.sh +++ b/src/modules/TextSummary/install.sh @@ -31,7 +31,7 @@ if [ $? -ne 0 ]; then quit 1; fi # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -40,6 +40,8 @@ if [ $? -ne 0 ]; then quit 1; fi # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/TextSummary/modulesettings.json b/src/modules/TextSummary/modulesettings.json index eab54ca2..14f953f8 100644 --- a/src/modules/TextSummary/modulesettings.json +++ b/src/modules/TextSummary/modulesettings.json @@ -2,7 +2,7 @@ "Modules": { "TextSummary": { "Name": "Text Summary", - "Version": "1.2", + "Version": "1.3", // Publishing info "Description": "Summarizes text content by selecting a number of sentences that are most representitive of the content.", @@ -11,10 +11,11 @@ "LicenseUrl": "https://github.com/edubey/text-summarizer", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ - { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "1.6.8" ], "ReleaseDate": "2022-11-01" }, + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "1.6.8" ], "ReleaseDate": "2022-11-01" }, { "ModuleVersion": "1.1", "ServerVersionRange": [ "1.6.9", "2.0.8" ], "ReleaseDate": "2022-11-01" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "2.1.6" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-17", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } ], // Launch instructions diff --git a/src/modules/TrainingYoloV5/TrainingYoloV5.py b/src/modules/TrainingYoloV5/TrainingYoloV5.py new file mode 100644 index 00000000..af55d576 --- /dev/null +++ b/src/modules/TrainingYoloV5/TrainingYoloV5.py @@ -0,0 +1,1000 @@ +# print("============= Starting TrainingYolov5 module =============") + +# Import our general libraries +from operator import truediv +import os +import functools + +import asyncio +from datetime import datetime, timedelta +from enum import Enum +import platform +import random +import psutil +import shutil +import sys +from typing import List + +from scipy.special import k0 + +# Import the CodeProject.AI SDK. This will add to the PATH var for future imports +sys.path.append("../../SDK/Python") +from common import JSON, timedelta_format, get_folder_size +from request_data import RequestData +from module_runner import ModuleRunner +from module_logging import LogMethod +from module_options import ModuleOptions + +# Import libraries specific to training +import tqdm +from pyexpat import model +import yaml +from urllib.request import Request + +from yolov5.train import parse_opt +from yolov5.train import main as train_main +from yolov5.utils.callbacks import Callbacks +from yolov5.utils.plots import plot_results + + +# HACK: ======================================================================== +# Monkey Patch tqdm so that all instances are disabled. This stops the training +# from filling the log with tons of stuff written to the console. This must be +# after all the imports that directly or indirectly import tqdm. +# Note that we only do this for modules launched by the server. Modules launched +# from the debugger or otherwise separately won't have their stdout/stderr +# captured and so should continue to use the console for output + +if ModuleOptions.launched_by_server: + original_tqdm_init = tqdm.tqdm.__init__ + def new_init(self, iterable=None, desc=None, total=None, leave=True, file=None, + ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, + ascii=None, disable=False, unit='it', unit_scale=False, + dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, + position=None, postfix=None, unit_divisor=1000, write_bytes=False, + lock_args=None, nrows=None, colour=None, delay=0, gui=False, + **kwargs): + original_tqdm_init(self, iterable=iterable, desc=desc, total=total, + leave=leave, file=file, ncols=ncols, mininterval=mininterval, + maxinterval=maxinterval, miniters=miniters, ascii=ascii, + disable=True, unit=unit, unit_scale=unit_scale, + dynamic_ncols=dynamic_ncols, smoothing=smoothing, + bar_format=bar_format, initial=initial, position=position, + postfix=postfix, unit_divisor=unit_divisor, + write_bytes=write_bytes, lock_args=lock_args, nrows=nrows, + colour=colour, delay=delay, gui=gui, **kwargs) + + tqdm.tqdm.__init__ = new_init + + +# Enums ------------------------------------------------------------------------ + +# Actions are the actions that can be executed for the long running background +# tasks. +class Actions(Enum): + Idle = 0 # The module has restarted and nothing is happening. + InvalidCommand = 1 # an invalid Action was requested. + TrainModel = 2 # Training a model + ResumeTrainModel = 3 # Resuming training a model + CreateDataset = 4 # Create a dataset + +# ActionStates are the states that the background tasks can be in. +class ActionStates(Enum): + Idle = 0 # Nothing is happening + Initializing = 1 # the Action is Initializing + Running = 2 # the Action is Running + Completed = 3 # the Action successfully completed + Cancelling = 4 # a request to cancel the Action was received + Cancelled = 5 # the Action was Cancelled + Failed = 6 # the Action Failed due to an Error + +# A simple progress handler ---------------------------------------------------- + +class ProgressHandler: + def __init__(self): + self.progress_max = 100 + self.progress_value = 0 + + @property + def max(self): + return self.progress_max + + @max.setter + def max(self, max_value:int) -> None: + self.progress_max = max(1, max_value) + + @property + def value(self) -> int: + return self.progress_value + + @value.setter + def value(self, val: int) -> None: + self.progress_value = max(0, min(val, self.progress_max)) + + @property + def percent_done(self) -> float: + return self.progress_value * 100 / self.progress_max # progress_max is always >= 1 + + +# the ModuleRunner ------------------------------------------------------------ + +class YoloV5Trainer_adaptor(ModuleRunner): + + def initialise(self): + """ Initialises this module """ + + # Process settings + self.parallelism = 2 # One for background task + # one to process other requests + + # determine the device to use during training + self.default_device = "cpu" + if ModuleOptions.support_GPU: + if self.hasTorchCuda: + self.default_device = "0" + self.execution_provider = "CUDA" + if self.half_precision == 'enable' and not self.hasTorchHalfPrecision: + self.half_precision = 'disable' + elif self.hasTorchMPS: + self.default_device = "mps" + self.execution_provider = "MPS" + + # Global Settings + self.datasets_dirname = ModuleOptions.getEnvVariable("YOLO_DATASETS_DIRNAME", "datasets") + self.training_dirname = ModuleOptions.getEnvVariable("YOLO_TRAINING_DIRNAME", "train") + self.models_dirname = ModuleOptions.getEnvVariable("YOLO_MODELS_DIRNAME", "assets") + self.weights_dirname = ModuleOptions.getEnvVariable("YOLO_WEIGHTS_DIRNAME", "weights") + self.zoo_dirname = ModuleOptions.getEnvVariable("YOLO_DATASET_ZOO_DIRNAME", "zoo") + + # Training Settings + self.model_name = None + self.dataset_name = None + self.num_epochs = 0 + + self.current_action = Actions.Idle + self.action_state = ActionStates.Idle + self.worker_thread = None + self.worker_thread_aborted = False + self.action_message = "" + self.cancel_requested = False + + self.progress = ProgressHandler() + + self.init_fiftyone() + self.init_custom_callbacks() + + + @property + def is_busy(self) -> bool: + """ Returns True if we're currently running any major process """ + + # Since only one background action is allowed at a time, we are busy + # if the worker_thread exists and is not done. + if not self.worker_thread: + return False + + return not self.worker_thread.done() + + + async def process(self, data: RequestData) -> JSON: + """ + Processes a request from the server. Gets the command from the request + and dispatches to the appropriate function. + """ + + if not data or not hasattr(data, "command"): + return {"success": False, "error": "Request data has no command."} + + # Map of the available commands + available_actions = { + "create_dataset": self.start_create_dataset_action, + "train_model": self.start_train_model_action, + "resume_training": self.start_resume_train_model_action, + "list-classes": self.list_classes, + # "status": self.get_status, + "model_info": self.get_model_info, + "dataset_info": self.get_dataset_info, + "cancel": self.cancel_current_action + } + + # Get the command + requested_action = available_actions.get(data.command, self.handle_invalid_action) + + # Execute the command + return requested_action(data) + + + def status(self, data: RequestData = None) -> JSON: + """ + Called when this module has been asked to provide its current status. + """ + # print("Getting status for training") + return self.get_status(data) + + + def selftest(self, data: RequestData = None) -> JSON: + """ + Called to run general tests against this module to ensure it's in good + working order + """ + print("Running self test for training module") + + + # COMMAND SWITCHBOARD ------------------------------------------------------ + + def start_action(self, action, **kwargs): + + """ Sets things up and calls the model training routine """ + + # Initialize the settings + self.model_name = kwargs.get('model_name') + self.dataset_name = kwargs.get('dataset_name') + + # Initialise the state + self.current_action = action + self.action_state = ActionStates.Initializing + self.training_start_time = datetime.now() + self.progress.value = 0 + self.cancel_requested = False + self.custom_callbacks.stop_training = False + + # NOTE: We've observed, possibly hallucinated, thread/task abort issues + # where methods just fail and return without throwing exceptions. We need + # to ensure we only set 'success' if the methods actually return True. + # if this is not cleared by the finally code then the thread was aborted. + self.worker_thread_aborted = True + + try: + if action == Actions.CreateDataset: + self.check_memory() + success = self.create_dataset(**kwargs) + + elif action == Actions.TrainModel: + self.check_memory() + success = self.train_model(**kwargs) + + elif action == Actions.ResumeTrainModel: + self.check_memory() + # set the progress value to non-zero so that the graphs will display + self.progress.value = 1 + success = self.resume_train_model(**kwargs) + + else: + self.action_state = ActionStates.Failed + self.action_message = f"I don't know how to do {action}" + return + + # NOTE: on a task/thread abort, we won't get here and + # self.action_completed will not be set to False in the finally clause. + + if self.cancel_requested: + self.action_state = ActionStates.Cancelled + self.action_message = "Operation was cancelled" + else: + self.action_state = ActionStates.Completed + + except MemoryError as me: + self.report_error(me, __file__, str(me)) + self.action_state = ActionStates.Failed + self.action_message = "Memory: " + str(me) + + except Exception as e: + self.report_error(e, __file__) + self.action_state = ActionStates.Failed + self.action_message = str(e) + + finally: + self.worker_thread_aborted = False + + + def cancel_current_action(self, data: RequestData) -> any: + if self.is_busy: + self.action_state = ActionStates.Cancelling + self.cancel_requested = True + return { "success": True } + + return {"success": False, "error": "No Action in running to cancel."} + + def handle_invalid_action(self, data: Request) -> any: + self.current_action = Actions.InvalidCommand + self.action_state = ActionStates.Failed + self.report_error(None, __file__, f"Unknown command {data.command}") + return {"success": False, "error": f"Unknown command {data.command}"} + + + # DATASET CREATION METHODS ------------------------------------------------- + + def start_create_dataset_action(self, data: RequestData) -> any: + + # there can only be one background Action running at a time. + if self.is_busy: + return { "success": False, "error": "Action in Progress" } + + # Get parameters + dataset_name = data.get_value("dataset_name") + if not dataset_name: + return { "success": False, "error": "Dataset name is required." } + + classes = data.get_value("classes") + if not classes: + return { "success": False, "error": "Classes are required." } + + classes = classes.split(",") + for idx, item in enumerate(classes): + classes[idx] = item.strip() + + num_images = data.get_int("num_images", 100) + num_images = data.clamp(num_images, 10, 10000) + + loop = asyncio.get_running_loop() + + self.worker_thread = loop.run_in_executor(None, functools.partial( + self.start_action, + Actions.CreateDataset, + dataset_name = dataset_name, + classes = classes, + num_images = num_images) + ) + + return { "success": True, "message": f"Starting to create dataset {dataset_name}." } + + def create_dataset(self, **kwargs) -> bool: + """ Downloads a dataset """ + dataset_name = kwargs.get('dataset_name') + classes = kwargs.get('classes') + num_images = kwargs.get('num_images') + # Already imported, so these won't do any database setup (hopefully), + # but we need to 'import' again to get access to the namespace + import fiftyone as fo + import fiftyone.zoo as foz + import fiftyone.utils.openimages as fouo + + self.action_state = ActionStates.Running + + + download_splits = ['train', 'validation', 'test'] + export_splits = ['train', 'val', 'test'] + # Export the Dataset + export_dir = f'{self.datasets_dirname}/{dataset_name}' + + if os.path.exists(export_dir): + shutil.rmtree(export_dir) + + label_types = ["detections"] + + # This will throw on invalid class name. + normalized_classes = self.normalize_classlist(classes) + num_classes = len(normalized_classes) + # 1 init, 5 for each class/split (4 loading, 1 exporting). 'units' are arbitrary here + self.progress.max = 1 + num_classes * 5 * len(export_splits) + self.action_message = "Acquiring training data" + + if fo.dataset_exists(dataset_name): + fo.delete_dataset(dataset_name) + + self.progress.value += 1 # basic init done + + if self.cancel_requested: + return False + + if fo.dataset_exists(dataset_name): + fo.delete_dataset(dataset_name) + + class_index = 1 + for current_class in normalized_classes: + for split in download_splits: + self.action_message = f"{class_index}/{num_classes}: Loading {split} split for '{current_class}' from Open Images" + + # this results in a 60, 20, 20 split for train, validation, test + num_samples = num_images if split == 'train' else num_images // 3 + + dataset = foz.load_zoo_dataset('open-images-v7', + splits=split, + label_types=label_types, + classes = current_class, + #only_matching = True, + max_samples=num_samples, + #seed=42, + shuffle=True, + dataset_name=dataset_name) + + self.progress.value += 4 # This is a really long step, so boost it + + if self.cancel_requested: + return False + + self.action_message = f"Export {split} split for '{current_class}' to '{export_dir}'" + + dataset.export(export_dir = export_dir, + dataset_type= fo.types.YOLOv5Dataset, + label_field = 'ground_truth', + split = 'val' if split == 'validation' else split, + classes = normalized_classes) + + fo.delete_dataset(dataset_name); + + self.progress.value += 1 # +1 for each export, 3 in total + + if self.cancel_requested: + return False + + class_index += 1 + + self.action_state = ActionStates.Completed + self.action_message = "Dataset successfully created" + + # Here would be the place to write a marker or info file that would + # indicate that the dataset is complete + return True + + + # TRAINING METHODS --------------------------------------------------------- + + def start_train_model_action(self, data: RequestData) -> any: + + # there can only be one background Action running at a time. + if self.is_busy: + return { "success": False, "error": "Action in Progress" } + + # Get parameters + model_name = data.get_value("model_name") + if not model_name: + return { "success": False, "error": "Model name is required." } + + dataset_name = data.get_value("dataset_name") + if not dataset_name: + return { "success": False, "error": "Dataset name is required." } + + model_size = data.get_value("model_size", "small").lower() + model_size = data.restrict(model_size, [ "tiny", "small", "medium", "large" ], "small") + + # TODO: add min,max to data.get_* methods to have clamp done in same op + num_epochs = data.get_int("num_epochs", 100) + num_epochs = data.clamp(num_epochs, 10, 1000) + + # -1 = autosize + batch_size = data.get_int("batch", 8) + batch_size = data.clamp(batch_size, -1, 256) + + freeze = data.get_int("freeze", 10) + freeze = data.clamp(freeze, 0, 24) + + hyp_type = data.get_value("hyp", "fine") + hyp_type = data.restrict(hyp_type, [ "fine", "low", "medium", "high" ], "fine") + + patience = data.get_int("patience", 100) + patience = data.clamp(patience, 0, 1000) + + workers = data.get_int("workers", 8) + workers = data.clamp(workers, 1, 128) + + if not ModuleOptions.support_GPU: + device = "cpu" + elif self.hasTorchMPS: + device = "mps" + elif self.hasTorchCuda: + device = data.get_value("device", self.default_device) + else: + device = "cpu" + + loop = asyncio.get_running_loop() + + self.worker_thread = loop.run_in_executor(None, functools.partial( + self.start_action, + Actions.TrainModel, + model_name = model_name, + dataset_name = dataset_name, + model_size = model_size, + epochs = num_epochs, + batch_size = batch_size, + device = device, + freeze = freeze, + hyp_type = hyp_type, + patience = patience, + workers = workers) + ) + + # NOTE: The process we started is still running. From here on updates + # to progress are made via the status APIs + + return { "success": True, "message": F"Starting to train model {model_name}" } + + + def start_resume_train_model_action(self, data: RequestData) -> any: + + # there can only be one background Action running at a time. + if self.is_busy: + return { "success": False, "error": "Action in Progress" } + + # Get parameters + model_name = data.get_value("model_name") + if not model_name: + return { "success": False, "error": "Model name is required." } + + loop = asyncio.get_running_loop() + + self.worker_thread = loop.run_in_executor(None, functools.partial( + self.start_action, + Actions.ResumeTrainModel, + model_name = model_name) + ) + + # We won't wait for the task to end. We'll return now and let the + # (probably very long) task continue in the background. + # await self.task_executor + return { "success": True, "message": F"Resuming training for model '{model_name}'" } + + + # Callbacks for monitoring progress ---------------------------------------- + + def on_train_start(self): + self.action_message = f"Starting to train model '{self.model_name}'" + pass + + def on_train_epoch_start(self): + + self.epoch_start_time = datetime.now() + training_project_dir = f'{self.training_dirname}/{self.model_name}' + results_csv_path = os.path.join(training_project_dir, "results.csv") + + if os.path.exists(results_csv_path): + plot_results(results_csv_path) # plot 'results.csv' as 'results.png' + + self.check_for_cancel_requested() + + def on_fit_epoch_end(self, logvals, epoch, best_fitness, fi): + + epochs_processed = epoch + 1 + + self.progress.value = epochs_processed + + total_training_seconds = (datetime.now() - self.training_start_time).total_seconds() + current_epoch_seconds = (datetime.now() - self.epoch_start_time).total_seconds() + + # The time taken for each epoch changes. For best results we'll base time + # left on the latest epoch rather than the first, or the average of all + # epochs. We'll converge to a more accurate value faster. + seconds_left = (self.num_epochs - epochs_processed) * current_epoch_seconds + + time_spent = timedelta_format(timedelta(seconds=total_training_seconds)) + time_remaining = timedelta_format(timedelta(seconds=seconds_left)) + self.action_message = f"Epoch {epoch+1}/{self.num_epochs}. Duration: {time_spent} Remaining: {time_remaining}" + + def on_train_end(self, last, best, epoch, results): + self.progress.value = self.num_epochs + + def check_for_cancel_requested(self): + """ Checks to see if a request to cancel training has been received """ + if self.cancel_requested: + self.custom_callbacks.stop_training = True + + def init_custom_callbacks(self): + """ Sets up the callbacks for each training event """ + + self.custom_callbacks = Callbacks() + self.custom_callbacks.register_action("on_train_start", callback=self.check_for_cancel_requested) + self.custom_callbacks.register_action("on_train_epoch_start", callback=self.on_train_epoch_start) + self.custom_callbacks.register_action("on_train_batch_start", callback=self.check_for_cancel_requested) + self.custom_callbacks.register_action("on_val_start", callback=self.check_for_cancel_requested) + self.custom_callbacks.register_action("on_val_batch_start", callback=self.check_for_cancel_requested) + self.custom_callbacks.register_action("on_fit_epoch_end", callback=self.on_fit_epoch_end) + self.custom_callbacks.register_action("on_train_end", callback=self.on_train_end) + + + # The actual training ------------------------------------------------------ + + def train_model(self, **kwargs) -> bool: + """ Does the actual model training """ + model_name = kwargs.get('model_name') + num_epochs = kwargs.get('epochs') + model_size = kwargs.get('model_size') + dataset_name = kwargs.get('dataset_name') + hyp_type = kwargs.get('hyp_type') + + self.num_epochs = num_epochs + self.action_state = ActionStates.Initializing + self.action_message = f"Preparing to train model '{model_name}'" + + self.progress.max = num_epochs + + self.log(LogMethod.Info|LogMethod.Server, { + "message": f"Training the {model_name} model", + "loglevel": "information" + }) + + training_project_dir = f'{self.training_dirname}/{model_name}' + if os.path.exists(training_project_dir): + shutil.rmtree(training_project_dir) + + # NOTE: We're going to force model size and hyperparameter file type to + # be valid values even if the user inputs garbage. Our goal here + # is to teach and spread the love, and that sometimes means + # politely moving on rather than pointing out the user messed up. + + weights_filename = 'yolov5s.pt' + model_size = model_size.lower() + if model_size == "tiny": + weights_filename = 'yolov5n.pt' + elif model_size == "small": + weights_filename = 'yolov5s.pt' + elif model_size == "medium": + weights_filename = 'yolov5m.pt' + elif model_size == "large": + weights_filename = 'yolov5l.pt' + + self.action_message = f"Using {model_size} model {weights_filename} for training"; + + hyp_name = "hyp.VOC.yaml" + hyp_type = hyp_type.lower() + if hyp_type == "fine": + hyp_name = "hyp.VOC.yaml" # fine-tuned on the VOC dataset + elif hyp_type == "low": + hyp_name = "hyp.scratch-low.yaml" + elif hyp_type == "medium": + hyp_name = "hyp.scratch-med.yaml" + elif hyp_type == "high": + hyp_name = "hyp.scratch-high.yaml" + + hyp_file_path = { + 'Linux': "bin/linux/python38/venv/lib/python3.8/site-packages/yolov5/data/hyps/", + 'Darwin': "bin/macos/python38/venv/lib/python3.8/site-packages/yolov5/data/hyps/", + 'Windows': "bin/windows/python39/venv/Lib/site-packages/yolov5/data/hyps/" + }[platform.system()] + + # try to use the dataset name as a full path to the dataset directory. + dataset_yaml_path = os.path.join(dataset_name, 'dataset.yaml') + if not os.path.exists(dataset_yaml_path): + dataset_yaml_path = os.path.join(self.datasets_dirname, dataset_name,'dataset.yaml') + + if not os.path.exists(dataset_yaml_path): + raise FileNotFoundError(f"The Dataset {dataset_name} does not exist.") + + self.action_state = ActionStates.Running + kwargs['name'] = model_name + kwargs['weights'] = f"{self.models_dirname}/{weights_filename}" + kwargs['data'] = dataset_yaml_path + kwargs['project'] = self.training_dirname + kwargs['hyp'] = hyp_file_path + hyp_name + + return self.train(**kwargs) + + def resume_train_model(self, **kwargs) -> bool: + """ Does the actual model training """ + model_name = kwargs.get('model_name') + self.action_state = ActionStates.Initializing + self.action_message = f"Preparing to resume training model '{model_name}'" + self.log(LogMethod.Info|LogMethod.Server, { + "message": f"Resume Training model '{model_name}'", + "loglevel": "information" + }) + + last_checkpoint = os.path.join(self.training_dirname, model_name, "weights", "last.pt") + if not os.path.exists(last_checkpoint): + raise FileNotFoundError(f"A checkpoint does not exist for {model_name}") + + # read the num_epoch for the opt.yaml file + opt_yaml_path = os.path.join(self.training_dirname, model_name, "opt.yaml") + if not os.path.exists(opt_yaml_path): + raise FileNotFoundError(f"A opt.yaml file not exist for {model_name}") + + with open(opt_yaml_path, errors='ignore') as f: + d = yaml.safe_load(f) + + # Get the number of epochs for which the model is being trained. + num_epochs = d['epochs'] + self.num_epochs = num_epochs + self.progress.max = num_epochs + + # Get the name of the dataset on which the model is being trained. + dataset_name = d['data'] + parts = dataset_name.split('/') + if len(parts) > 1: + dataset_name = parts[len(parts) - 2] + self.dataset_name = dataset_name + + self.action_state = ActionStates.Running + + # pass the resume parameter to the train method with the checkpoint + return self.train(resume = last_checkpoint) + + + def train(self, **kwargs) -> bool: + """ Does the call to train the model """ + + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + + if not self.cancel_requested: + try: + self.training_start_time = datetime.now() + + train_main(opt, callbacks=self.custom_callbacks) + + duration = (datetime.now() - self.training_start_time).total_seconds() + time_spent = timedelta_format(timedelta(seconds=duration)) + + if self.cancel_requested: + return False + + self.action_state = ActionStates.Completed + self.action_message = f"Model '{self.model_name}' training completed in {time_spent}" + return True + + except Exception as e: + self.report_error(e, __file__, str(e)) + return False + + + # STATUS METHODS ----------------------------------------------------------- + + def get_status(self, data: RequestData) -> any: + """ + Returns the current status of the last started Action. + """ + + is_training = (self.current_action == Actions.TrainModel or \ + self.current_action == Actions.ResumeTrainModel) + is_creating_dataset = (self.current_action == Actions.CreateDataset) + + progress = self.progress.percent_done + training_progress = progress if is_training else 0 + dataset_progress = progress if is_creating_dataset else 0 + + if self.current_action == Actions.Idle: + # TODO: Think of a message that relays the idea that either the module + # just started and is ready, or the module was restarted and is ready + # for you to do things like resume training. + # self.action_message = "This module has been restarted." + self.action_message = "Ready" + + elif not self.is_busy and self.worker_thread_aborted: + # the background_worker was aborted with prejudice. + self.action_state = ActionStates.Failed + self.action_message = f"{self.current_action.name} was Aborted." + + return { + "success": True, + "model_name": self.model_name, + "dataset_name": self.dataset_name, + "action": self.current_action.name, + "state": self.action_state.name, + "message": self.action_message, + "is_busy": self.is_busy, + "progress": progress, + } + + def get_model_info(self, data: RequestData) -> any: + """ Returns an object representing the current state of the model """ + + model_name = data.get_value("model_name") + if not model_name: + return { "success": False, "error": "Model Name not specified." } + + training_project_dir = os.path.join(self.module_path, self.training_dirname, + model_name) + if not os.path.exists(training_project_dir): + return { "success": False, "error": "Training was not started on this model." } + + model_path = os.path.join(training_project_dir, self.weights_dirname, "best.pt") + results_graph_path = os.path.join(training_project_dir, "results.png") + results_csv_path = os.path.join(training_project_dir, "results.csv") + pr_curve_path = os.path.join(training_project_dir, "PR_curve.png") + + model_size = 0 + if os.path.exists(model_path): + stats = os.stat(model_path) + model_size = round(stats.st_size / (1024 * 1000), 1) + + # Trim the root from this path. This may cause gnashing of teeth to those + # who want the full path, but we're going to have people posting screen + # shots of their window and so we have to remove the sensitive info + rootPrefix = "" # "<app>"; + display_model_path = model_path or "" + if display_model_path.startswith(self.server_root_path): + display_model_path = rootPrefix + display_model_path[len(self.server_root_path):] + + display_graph_path = results_graph_path or "" + if display_graph_path.startswith(self.server_root_path): + display_graph_path = rootPrefix + display_graph_path[len(self.server_root_path):] + + display_csv_path = results_csv_path or "" + if display_csv_path.startswith(self.server_root_path): + display_csv_path = rootPrefix + display_csv_path[len(self.server_root_path):] + + display_curve_path = pr_curve_path or "" + if display_curve_path.startswith(self.server_root_path): + display_curve_path = rootPrefix + display_curve_path[len(self.server_root_path):] + + # Don't return graph image data if action==[TrainingModel, ResumeTrainingModel] + # and worker_thread is running and progress.value == 0 as the information + # is not yet valid. + # Reason: there is a gap between when training starts and the system has + # information about the current model. Until then, there may be information + # from a previous training of the Model. If this is called when not + # training, then we want to attempt to get the information as it currently + # exists + model_info_valid = not ( \ + (self.current_action in [Actions.TrainModel, Actions.ResumeTrainModel]) \ + and self.is_busy and self.progress.value == 0) + + model_created = model_info_valid and os.path.exists(results_graph_path) + + results_csv_exists = os.path.exists(results_csv_path) and model_info_valid + return_pr_curve = os.path.exists(pr_curve_path) and model_info_valid + return_results_graph = os.path.exists(results_graph_path) and model_info_valid + + return { + "success": True, + "training_dir": training_project_dir, + "model_created": model_created, + + "results_graph_path": display_graph_path, + "results_graph_image": RequestData.encode_file_contents(results_graph_path) if return_results_graph else "", + + "pr_curve_path": display_curve_path, + "pr_curve_image": RequestData.encode_file_contents(pr_curve_path) if return_pr_curve else "", + + "results_csv_path": display_csv_path, + "results_csv_file": RequestData.encode_file_contents(results_csv_path) if results_csv_exists else "", + + "model_size": model_size, + "model_path": display_model_path, + # "model_file": RequestData.encode_file_contents(model_path), # This could be HUGE. + + # To have this model_file automatically downloaded in the browser we + # could do something like: + # + # let file = new File(model_file, `{model_name}.pt``, {type: "application/octet-stream"}); + # let downloadUrl = window.URL.createObjectURL(model_file); + # let link = document.createElement('a'); + # link.style = 'display:none'; + # link.href = downloadUrl; + # link.download = filename; + # link.click(); + # window.URL.revokeObjectURL(downloadUrl); + # + # However, we should be providing sensible means to use the model from + # the UI itself rather than asking users to download / upload themselves. + } + + def get_dataset_info(self, data: RequestData) -> any: + """ Returns an object representing the current state of the model """ + + # Already imported, so these won't do any database setup (hopefully), + # but we need to 'import' again to get access to the namespace + import fiftyone as fo + import fiftyone.zoo as foz + import fiftyone.utils.openimages as fouo + + dataset_name = data.get_value("dataset_name") + if not dataset_name: + return { "success": False, "error": "Dataset name not specified." } + + dataset_path = os.path.join(self.module_path, self.datasets_dirname, dataset_name) + if not os.path.exists(dataset_path): + return { "success": False, "error": "No dataset exists with this name." } + + dataset_size = get_folder_size(dataset_path) + dataset_created = fo.dataset_exists(dataset_name) + # dataset_created = dataset_size > 0 + + # Trim the root from this path. This may cause gnashing of teeth to those + # who want the full path, but we're going to have people posting screen + # shots of their window and so we have to remove the sensitive info + rootPrefix = "" # "<app>"; + display_dataset_path = dataset_path or "" + if display_dataset_path.startswith(self.server_root_path): + display_dataset_path = rootPrefix + display_dataset_path[len(self.server_root_path):] + + return { + "success": True, + "training_dir": dataset_path, + "dataset_created": dataset_created, + "dataset_size": round(dataset_size / (1024 * 1000), 1), + "dataset_path": display_dataset_path, + } + + def list_classes(self, data: RequestData) ->any: + return { + "success": True, + "classes": self.available_classes + } + + + # UTILITY METHODS ---------------------------------------------------------- + + def check_memory(self) -> bool: + """ Check if we have enough memory, raises an error if not enough """ + + if self.required_MB: + available_MB = psutil.virtual_memory().available / (1024 * 1000) + if available_MB < self.required_MB: + raise MemoryError(f"Need {self.required_MB}Mb, only {round(available_MB,0)}Mb available") + + + def normalize_classlist(self, classes : List[str]) -> List[str]: + """ + This method converts a list of classes to the normalized values used by + Open Images. Class names are case sensitive. If a class can not be found, + then an Exception is Raised to quickly abort the operation and report + the error to the user so that they can correct the mistake. + """ + + if not classes: + raise Exception(f"The list of class names is empty.") + + # create the lookup if required. + if not self.available_classes: + # Already imported, so these won't do any database setup (hopefully), + # but we need to 'import' again to get access to the namespace + import fiftyone.utils.openimages as fouo + self.available_classes = fouo.get_classes() + + if not self.available_classes_lower: + self.available_classes_lower = [class_name.lower() for class_name in self.available_classes] + + # TODO: Rework this to use a dictionary keyed by class.lower() + + classes_lower = [class_name.lower() for class_name in classes] + found_classes = [] + for class_lower in classes_lower: + try: + idx = self.available_classes_lower.index(class_lower) + found_classes.append(self.available_classes[idx]) + except ValueError: + raise Exception(f"Cannot find class {class_lower} in available classes.") + + return found_classes + + + def init_fiftyone(self): + + # This module is reloaded by spawn.py inside numpy. There's some + # processing we need to do to import fiftyone, so let's do this only + # when we're actually running the code, not each time we import this + # module + + # We still need to import modules so we have access to the namespace, + # but once a module has been imported within a module, it's just accessed + # via a lookup, and doesn't actually go through all the init code. + + # Keep things neat, and also attempt to mitigate permission issues with the + # fiftyone mongodb by having it all sit under the current module's folder + fiftyone_dirname = ModuleOptions.getEnvVariable("FIFTYONE_DATABASE_DIRNAME", "fiftyone") + fiftyone_path = os.path.normpath(os.path.join(ModuleOptions.module_path, fiftyone_dirname)) + os.environ["FIFTYONE_DATABASE_DIR"] = fiftyone_path + + # We'll import and fail quickly if needed + try: + import fiftyone.zoo as foz + except Exception as zoo_ex: + # Clear the problem for next time + shutil.rmtree(fiftyone_path) + print("Unable to import and initialise the fiftyone.zoo package: " + str(zoo_ex)) + quit(1) + + try: + import fiftyone as fo + except Exception as ex: + if 'fiftyone.core.service.DatabaseService failed to bind to port' in str(ex): + print("Failed to connect to mongoDB server. Possibly it was left in a bad state") + else: + print("Unable to import and initialise the fiftyone package: " + str(zoo_ex)) + quit(1) + + import fiftyone.utils.openimages as fouo + + # configure FiftyOne + fo.config.default_ml_backend = "torch" + fo.config.dataset_zoo_dir = os.path.join(self.module_path, self.zoo_dirname) + fo.config.show_progress_bars = False + fo.config.do_not_track = True + self.available_classes = fouo.get_classes() + self.available_classes_lower = None + + print("*** FiftyOne imported successfully") + + +if __name__ == "__main__": + YoloV5Trainer_adaptor().start_loop() \ No newline at end of file diff --git a/src/modules/TrainingYoloV5/TrainingYoloV5.pyproj b/src/modules/TrainingYoloV5/TrainingYoloV5.pyproj new file mode 100644 index 00000000..4bb6c1d3 --- /dev/null +++ b/src/modules/TrainingYoloV5/TrainingYoloV5.pyproj @@ -0,0 +1,62 @@ + + + Debug + 2.0 + 2dfda382-189b-45d1-94d5-3004d1aeb73c + . + TrainingYoloV5.py + + + . + . + TrainingYoloV5 + TrainingYoloV5 + MSBuild|venv|$(MSBuildProjectFullPath) + False + Standard Python launcher + False + CPAI_MODULE_PARALLELISM=2 +YOLOv5_VERBOSE=false + True + + + true + false + + + true + false + + + + + + + + + + + + + + + + venv + 3.9 + venv + scripts\python.exe + scripts\pythonw.exe + PYTHONPATH + X64 + + + + + + + + + + \ No newline at end of file diff --git a/src/modules/TrainingYoloV5/install.bat b/src/modules/TrainingYoloV5/install.bat new file mode 100644 index 00000000..f8a85ce1 --- /dev/null +++ b/src/modules/TrainingYoloV5/install.bat @@ -0,0 +1,128 @@ +:: Development mode setup script :::::::::::::::::::::::::::::::::::::::::::::: +:: +:: YOLO Object Detection Model Training +:: +:: This script is only called from ..\..\setup.bat + +@if "%1" NEQ "install" ( + echo This script is only called from ..\..\setup.bat + @pause + @goto:eof +) + +:: set verbosity=loud + +:: Install python and the required dependencies +call "%sdkScriptsPath%\utils.bat" SetupPython 3.9 "Local" +if errorlevel 1 exit /b 1 + +REM We need to workaround a boto / urllib error. Pre-install urllib. +rem if "a" == "a" ( REM Comment out if this is causing issues + :: -- Start -- To be provided in the SDK scripts for use everywhere in the future + :: + :: To be set at start of install scripts + set pythonVersion=3.9 + set installLocation=Local + :: + :: Variables to provided to install scripts + :: + set pythonName=python!pythonVersion:.=! + if /i "!installLocation!" == "Local" ( + set virtualEnv=!modulePath!\bin\!os!\!pythonName!\venv + ) else ( + set virtualEnv=!runtimesPath!\bin\!os!\!pythonName!\venv + ) + set venvPythonPath=!virtualEnv!\Scripts\python + set packagesPath=%virtualEnv%\Lib\site-packages + :: + REM call "%sdkScriptsPath%\utils.bat" WriteLine "pythonVersion !pythonVersion!" %color_info% + REM call "%sdkScriptsPath%\utils.bat" WriteLine "pythonName !pythonName!" %color_info% + REM call "%sdkScriptsPath%\utils.bat" WriteLine "location !installLocation!" %color_info% + REM call "%sdkScriptsPath%\utils.bat" WriteLine "virtualEnv = !virtualEnv!" %color_info% + REM call "%sdkScriptsPath%\utils.bat" WriteLine "venvPythonPath = !venvPythonPath!" %color_info% + REM call "%sdkScriptsPath%\utils.bat" WriteLine "packagesPath = !packagesPath!" %color_info% + :: + :: + :: InstallSinglePythonPackage to be provided to install scripts + :: + :: (HACK due to botocore. See https://github.com/boto/botocore/issues/2926) + set package="urllib3<1.27,>=1.25.4" + set packageDesc=urllib3, the HTTP client for Python + :: (to be called via: call "%sdkScriptsPath%\utils.bat" InstallSinglePythonPackage !package! !packageDesc!) + :: + call "%sdkScriptsPath%\utils.bat" WriteLine "Installing !packageDesc!..." + "!venvPythonPath!" -m pip install !package! --target "!packagesPath!" !pipFlags! + call "%sdkScriptsPath%\utils.bat" WriteLine "Success" %color_success% + :: + :: -- End -- +rem ) + +call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.9 "%modulePath%" "Local" +if errorlevel 1 exit /b 1 + +REM This makes no sense. None. But: install these package by package to help work +REM around the boto/urllib error +REM set oneStepPIP=true - except this doesn't help +call "%sdkScriptsPath%\utils.bat" WriteLine "Ignore the 'urllib' error below. This issue is discussed at" %color_info% +call "%sdkScriptsPath%\utils.bat" WriteLine "https://github.com/boto/botocore/issues/2926. It will not" %color_info% +call "%sdkScriptsPath%\utils.bat" WriteLine "affect this module. We're all good!" %color_info% + +call "%sdkScriptsPath%\utils.bat" InstallPythonPackages 3.9 "%absoluteAppRootDir%\SDK\Python" "Local" +if errorlevel 1 exit /b 1 + +:: Download the YOLO models and custom models and store in /assets +rem call "%sdkScriptsPath%\utils.bat" GetFromServer "models-yolo5-pt.zip" "assets" "Downloading Standard YOLO models..." +rem if errorlevel 1 exit /b 1 + +:: -- Install script cheatsheet -- +:: +:: Variables available: +:: +:: absoluteAppRootDir - the root path of the app (eg: C:\Program Files]\CodeProject\AI\) +:: sdkScriptsPath - the path to the installation utility scripts (%rootPath%\src\SDK\Scripts) +:: downloadPath - the path to where downloads will be stored (%rootPath%\src\downloads) +:: runtimesPath - the path to the installed runtimes (%rootPath%\src\runtimes) +:: modulesPath - the path to all the AI modules (%rootPath%\src\modules) +:: moduleDir - the name of the directory containing this module +:: modulePath - the path to this module (%modulesPath%\%moduleDir%) +:: os - "windows" +:: architecture - "x86_64" or "arm64" +:: platform - "windows" or "windows-arm64" +:: systemName - "Windows" +:: verbosity - quiet, info or loud. Use this to determines the noise level of output. +:: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. +:: GetFromServer will honour this value. Do it yourself for DownloadAndExtract +:: +:: Methods available (call by 'call %sdkScriptsPath%\utils.bat ') +:: +:: Write text [foreground [background]] (eg call %sdkScriptsPath%\utils.bat WriteLine "Hi" "green") +:: WriteLine text [foreground [background]] +:: +:: GetFromServer filename moduleAssetDir message +:: filename - Name of the compressed archive to be downloaded +:: moduleAssetDir - Name of folder inthe module's directory where archive will be extracted +:: message - Message to display during download +:: +:: DownloadAndExtract storageUrl filename downloadPath dirNameToSave message +:: storageUrl - Url that holds the compressed archive to Download +:: filename - Name of the compressed archive to be downloaded +:: downloadPath - Path to where the downloaded compressed archive should be downloaded +:: dirNameToSave - name of directory, relative to downloadPath, where contents of archive +:: will be extracted and saved +:: message - Message to display during download +:: +:: SetupPython Version [install-location] +:: Version - version number of python to setup. 3.7 and 3.9 currently supported. A virtual +:: environment will be created in the module's local folder if install-location is +:: "Local", otherwise in %runtimesPath%/bin/windows/python/venv. +:: install-location - [optional] "Local" or "Shared" (see above) +:: +:: InstallSinglePythonPackage Package +:: Package - package name/version and flags as per Python requirements.txt +:: eg InstallSinglePythonPackage "torch>=1.7.0 -f https://download.pytorch.org/whl/torch_stable.html" +:: +:: InstallPythonPackages Version requirements-file-directory [install-location] +:: Version - version number, as per SetupPython +:: requirements-file-directory - directory containing the requirements.txt file +:: install-location - [optional] "Local" (installed in the module's local folder) or +:: "Shared" (installed in the shared runtimes/bin directory) diff --git a/src/modules/TrainingYoloV5/install.sh b/src/modules/TrainingYoloV5/install.sh new file mode 100644 index 00000000..3b270593 --- /dev/null +++ b/src/modules/TrainingYoloV5/install.sh @@ -0,0 +1,165 @@ +# Development mode setup script :::::::::::::::::::::::::::::::::::::::::::::: +# +# YOLO Object Detection Model Training +# +# This script is called from the ObjectDetectionYolo directory using: +# +# bash ../../setup.sh +# +# The setup.sh script will find this install.sh file and execute it. + +if [ "$1" != "install" ]; then + read -t 3 -p "This script is only called from: bash ../../setup.sh" + echo + exit 1 +fi + +verbosity="info" + +location="Local" +pythonVersion=3.8 + +oneStepPIP="false" + +pythonName="python${pythonVersion/./}" +if [ "$location" == "Local" ]; then + # echo "Setting up Training venv locally" + virtualEnv="${modulePath}/bin/${os}/${pythonName}/venv" +else + virtualEnv="${runtimesPath}/bin/${os}/${pythonName}/venv" +fi +pythonCmd="${virtualEnv}/bin/python${pythonVersion}" +packagesPath="${virtualEnv}/lib/python${pythonVersion}/site-packages/" + + +# cuDNN needed for linux, but already installed in Docker +if [ "$hasCUDA" == "true" ] && [ "$inDocker" != "true" ] && [ "$os" == "linux" ]; then + writeLine 'Installing nvidia-cudnn...' + sudo apt install nvidia-cudnn -y >/dev/null 2>/dev/null & + spin $! + writeLine "Done" "$color_success" +fi + +# Install python and the required dependencies. If we find torch then asssume it's all there +setupPython $pythonVersion "$location" +if [ $? -ne 0 ]; then quit 1; fi + +if [ "$os" == "linux" ]; then + + # ensure libcurl4 is present + write 'Ensuring libcurl4 present...' + libcurl4_present==$(dpkg-query -W --showformat='${Status}\n' libcurl4|grep "install ok installed") + if [ "${libcurl4_present}" == "" ]; then + sudo apt install libcurl4 -y >/dev/null & + spin $1 + fi + writeLine "Done" $color_success + + # fiftyone on linux hardwires an ancient version of mongod that depends on the ancient + # libssl1.1. This is 2 major versions old. Well done. + if [ ! -L /usr/lib/libcrypto.so.1.1 ] && [ ! -f /usr/lib/libcrypto.so.1.1 ]; then + + write 'Downloading ancient SSL libraries for ancient MongoDB...' + sudo wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb \ + -O libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb >/dev/null + sudo dpkg -i libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb >/dev/null + sudo rm libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb >/dev/null + writeLine "Done" $color_success + + write 'Installing ancient SSL libraries for ancient MongoDB...' + + # Install + if [ "${inDocker}" == "true" ]; then + sudo apt update && sudo apt install libssl1.1 -y >/dev/null + else + sudo apt update && sudo apt install libssl1.1.1 -y >/dev/null + fi + + # Create symlinks + if [ ! -e /usr/lib/libcrypto.so.1.1 ]; then + # Add link at /usr/lib/libcrypto.so.1.1 that points to /lib/x86_64-linux-gnu + sudo ln -s /lib/x86_64-linux-gnu/libcrypto.so.1.1 /usr/lib/libcrypto.so.1.1 >/dev/null + fi + if [ ! -e /usr/lib/libssl.so.1.1 ]; then + # Add link at /usr/lib/libssl.so.1.1 that points to /lib/x86_64-linux-gnu + sudo ln -s /lib/x86_64-linux-gnu/libssl.so.1.1 /usr/lib/libssl.so.1.1 >/dev/null + fi + writeLine "Done" $color_success + + fi + + # https://docs.voxel51.com/getting_started/troubleshooting.html#database-exits + ulimit -n 64000 +fi + +# PyTorch-DirectML not working for this module +# if [ "$hasCUDA" != "true" ] && [ "$os" == "linux" ]; then +# writeLine 'Installing PyTorch-DirectML...' +# "${pythonCmd}" -m pip install torch-directml --target "${packagesPath}" +# fi + +installPythonPackages $pythonVersion "$modulePath" "$location" +if [ $? -ne 0 ]; then quit 1; fi +installPythonPackages $pythonVersion "${absoluteAppRootDir}/SDK/Python" "$location" +if [ $? -ne 0 ]; then quit 1; fi + +# Download the models and store in /assets and /custom-models (already in place in docker) +getFromServer "models-yolo5-pt.zip" "assets" "Downloading Standard YOLO models..." +if [ $? -ne 0 ]; then quit 1; fi + + +# -- Install script cheatsheet -- +# +# Variables available: +# +# absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) +# downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) +# runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) +# modulesPath - the path to all the AI modules ($rootPath/src/modules) +# moduleDir - the name of the directory containing this module +# modulePath - the path to this module ($modulesPath/$moduleDir) +# os - "linux" or "macos" +# architecture - "x86_64" or "arm64" +# platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" +# verbosity - quiet, info or loud. Use this to determines the noise level of output. +# forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. +# getFromServer will honour this value. Do it yourself for downloadAndExtract +# +# Methods available +# +# write text [foreground [background]] (eg write "Hi" "green") +# writeLine text [foreground [background]] +# Download storageUrl downloadPath filename moduleDir message +# storageUrl - Url that holds the compressed archive to Download +# downloadPath - Path to where the downloaded compressed archive should be downloaded +# filename - Name of the compressed archive to be downloaded +# dirNameToSave - name of directory, relative to downloadPath, where contents of archive +# will be extracted and saved +# +# getFromServer filename moduleAssetDir message +# filename - Name of the compressed archive to be downloaded +# moduleAssetDir - Name of folder in module's directory where archive will be extracted +# message - Message to display during download +# +# downloadAndExtract storageUrl filename downloadPath dirNameToSave message +# storageUrl - Url that holds the compressed archive to Download +# filename - Name of the compressed archive to be downloaded +# downloadPath - Path to where the downloaded compressed archive should be downloaded +# dirNameToSave - name of directory, relative to downloadPath, where contents of archive +# will be extracted and saved +# message - Message to display during download +# +# setupPython Version [install-location] +# Version - version number of python to setup. 3.8 and 3.9 currently supported. A virtual +# environment will be created in the module's local folder if install-location is +# "Local", otherwise in $runtimesPath/bin/$platform/python/venv. +# install-location - [optional] "Local" or "Shared" (see above) +# +# installPythonPackages Version requirements-file-directory +# Version - version number, as per SetupPython +# requirements-file-directory - directory containing the requirements.txt file +# install-location - [optional] "Local" (installed in the module's local venv) or +# "Shared" (installed in the shared $runtimesPath/bin venv folder) \ No newline at end of file diff --git a/src/modules/TrainingYoloV5/modulesettings.json b/src/modules/TrainingYoloV5/modulesettings.json new file mode 100644 index 00000000..46a993d4 --- /dev/null +++ b/src/modules/TrainingYoloV5/modulesettings.json @@ -0,0 +1,349 @@ +{ + "Modules": { + + "TrainingYoloV5": { + "Name": "Training for YoloV5 6.2", + "Version": "1.2", + + // Publishing info + "Description": "Train custom models for YOLOv5 v6.2 with support for CPUs, CUDA enabled GPUs, and Apple Silicon.", + "Platforms": [ "all" ], + "License": "GPL-3.0", + "LicenseUrl": "https://opensource.org/licenses/GPL-3.0", + + // Which server version is compatible with each version of this module. + "ModuleReleases": [ + { "ModuleVersion": "1.0", "ServerVersionRange": [ "2.1.10", "" ], "ReleaseDate": "2022-08-02" }, + { "ModuleVersion": "1.1", "ServerVersionRange": [ "2.1.11", "" ], "ReleaseDate": "2023-08-12", "ReleaseNotes": "Added 'patience', 'workers' as parameters" }, + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1.11", "" ], "ReleaseDate": "2023-08-17", "ReleaseNotes": "Bug fix" } + ], + + // Launch instructions + "AutoStart": true, + "FilePath": "TrainingYoloV5.py", + "Runtime": "python38", + "RuntimeLocation": "Local", // Can be Local or Shared. If Local, update install.* files. + + // These are all optional. Defaults are usually fine + "RequiredMb": 5000, // ~5Gb needed in Linux + "SupportGPU": true, + "AcceleratorDeviceName": null, // = default + "Parallelism": 2, // 0 = Default (number of CPUs - 1) + "HalfPrecision": "enable", // "Force", "Enable", "Disable": whether to force on, allow, or disable half-precision ops + "PostStartPauseSecs": 1, // 1 if using GPU, 0 for CPU + + "Queue": "trainingyolov5_queue", // default is lower(modulename) + "_queue" + + "EnvironmentVariables": { + "YOLOv5_AUTOINSTALL": "false", + "YOLOv5_VERBOSE": "false", + + "YOLO_DATASETS_DIRNAME": "datasets", + "YOLO_TRAINING_DIRNAME": "train", // compatible with tutorials and default + "YOLO_WEIGHTS_DIRNAME": "weights", + "YOLO_MODELS_DIRNAME": "assets", + "YOLO_DATASET_ZOO_DIRNAME": "zoo", + "FIFTYONE_DATABASE_DIRNAME": "fiftyone" + }, + + "RouteMaps": [ + { + "Name": "Create Custom Dataset", + "Path": "train/create_dataset", + "Method": "POST", + "Command": "create_dataset", + "Description": "Create a custom dataset from the Open Images repository.", + "Inputs": [ + { + "Name": "name", + "Type": "String", + "Description": "The name of the model." + }, + { + "Name": "classes", + "Type": "String", + "Description": "A comma delimited list of classes to include in the dataset." + }, + { + "Name": "num_images", + "Type": "Integer", + "Description": "The max number of images to include for each class. Default 100.", + "DefaultValue": 100, + "MinValue": 100, + "MaxValue": 10000 + } + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if training started." + } + ] + }, + + { + "Name": "Train Custom Model (YOLOv5 6.2)", + "Path": "train/train_model", + "Method": "POST", + "Command": "train_model", + "Description": "Create a custom model from a custom dataset.", + "Inputs": [ + { + "Name": "name", + "Type": "String", + "Description": "The name of the model." + }, + { + "Name": "dataset", + "Type": "String", + "Description": "The name of the dataset." + }, + { + "Name": "num_epochs", + "Type": "Integer", + "Description": "The epoch to train the model. Default 100.", + "DefaultValue": 100, + "MinValue": 100, + "MaxValue": 1000 + }, + { + "Name": "device", + "Type": "String", + "Description": "None or 'cpu' or 0 or '0' or '0,1,2,3'. Default: ''", + "DefaultValue": "" + }, + { + "Name": "batch", + "Type": "Integer", + "Description": "The batch size. Default: 8", + "DefaultValue": 8, + "MinValue": 1, + "MaxValue": 64 + }, + { + "Name": "freeze", + "Type": "Integer", + "Description": "The layers to freeze, 0-None, 10-Backbone, 24-All", + "DefaultValue": 0, + "MinValue": 0, + "MaxValue": 24 + }, + { + "Name": "hyp", + "Type": "Integer", + "Description": "Hyper-Parameters: 0-finetune (VOC), 1-scratch low, 2-scratch medium, 3-scratch high", + "DefaultValue": 0, + "MinValue": 0, + "MaxValue": 3 + } + + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if training started." + } + ] + }, + + { + "Name": "Training Status (YOLOv5 6.2)", + "Path": "train/status", + "Method": "POST", + "Command": "status", + "Description": "Gets the training status", + "Inputs": [ + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + }, + { + "Name": "model_name", + "Type": "String", + "Description": "The name of the last model training or trained." + }, + { + "Name": "dataset_name", + "Type": "String", + "Description": "The name of the dataset used." + }, + // Idle - initial startup state + // CreatingDataset + // TrainingModel + // other actions to be determined + { + "Name": "action", + "Type": "String", + "Description": "The current action." + }, + // Initializing + // Running + // Completed + // Cancelling + // Cancelled + // Failed + { + "Name": "state", + "Type": "String", + "Description": "The current state in the action processing." + }, + { + "Name": "message", + "Type": "String", + "Description": "Any message, probably error, to display to the user." + }, + { + "Name": "progress", + "Type": "float", + "Description": "The percentage of completion of current state." + } + ] + }, + + { + "Name": "Cancel Dataset or Model creation", + "Path": "train/cancel", + "Method": "POST", + "Command": "cancel", + "Description": "Cancel the creation of Model or Dataset.", + "Inputs": [ + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + } + ] + }, + + { + "Name": "Resume Training Model", + "Path": "train/resume_training", + "Method": "POST", + "Command": "resume_training", + "Description": "Resume training of a model.", + "Inputs": [ + { + "Name": "model_name", + "Type": "String", + "Description": "The name of the model." + } + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + } + ] + }, + + { + "Name": "Get Model information (YOLOv5 6.2)", + "Path": "train/model_info", + "Method": "POST", + "Command": "model_info", + "Description": "Gets info about the model.", + "Inputs": [ + { + "Name": "model_name", + "Type": "String", + "Description": "The name of the model." + } + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + }, + { + "Name": "model_name", + "Type": "String", + "Description": "The name of the model." + }, + { + "Name": "complete", + "Type": "Boolean", + "Description": "True if the training was completed, can restart if not." + }, + { + "Name": "training_dir", + "Type": "String", + "Description": "The training directory containing the custom model file and the training results." + }, + { + "Name": "model_path", + "Type": "String", + "Description": "The path to best the custom model file." + }, + { + "Name": "results_graph_path", + "Type": "String", + "Description": "The path the results.png file if it exists." + }, + { + "Name": "results_csv_path", + "Type": "String", + "Description": "The path the results.csv file if it exists." + }, + { + "Name": "pr_curve_path", + "Type": "String", + "Description": "The path PR_curve.png file if it exists." + }, + { + "Name": "results_graph_image", + "Type": "Base64ImageData", + "Description": "The base64 encoded image of the result graphs." + }, + { + "Name": "pr_curve_image", + "Type": "Base64ImageData", + "Description": "The base64 encoded image of the PR Curve graph." + }, + { + "Name": "results_csv_file", + "Type": "Base64ImageData", + "Description": "The base64 encoded data for the results.csv file." + } + ] + }, + + { + "Name": "Get Dataset information (YOLOv5 6.2)", + "Path": "train/dataset_info", + "Method": "POST", + "Command": "dataset_info", + "Description": "Gets info about the dataset.", + "Inputs": [ + { + "Name": "dataset_name", + "Type": "String", + "Description": "The name of the dataset." + } + ], + "Outputs": [ + { + "Name": "success", + "Type": "Boolean", + "Description": "True if successful." + }, + { + "Name": "complete", + "Type": "Boolean", + "Description": "True if the training was completed, can restart if not." + } + // other info TBD + ] + } + ] + } + } +} \ No newline at end of file diff --git a/src/modules/TrainingYoloV5/modulesettings.macos.json b/src/modules/TrainingYoloV5/modulesettings.macos.json new file mode 100644 index 00000000..7353e0fb --- /dev/null +++ b/src/modules/TrainingYoloV5/modulesettings.macos.json @@ -0,0 +1,7 @@ +{ + "Modules": { + "TrainingYoloV5": { + "SupportGPU": false // https://github.com/ultralytics/yolov5/issues/11235 + } + } +} diff --git a/src/modules/TrainingYoloV5/modulesettings.windows.json b/src/modules/TrainingYoloV5/modulesettings.windows.json new file mode 100644 index 00000000..b013cbe3 --- /dev/null +++ b/src/modules/TrainingYoloV5/modulesettings.windows.json @@ -0,0 +1,8 @@ +{ + "Modules": { + "TrainingYoloV5": { + "Runtime": "python39", + "RequiredMb": 6000, // ~6Gb needed in Windows + } + } +} diff --git a/src/modules/TrainingYoloV5/package.bat b/src/modules/TrainingYoloV5/package.bat new file mode 100644 index 00000000..ac75fb69 --- /dev/null +++ b/src/modules/TrainingYoloV5/package.bat @@ -0,0 +1,8 @@ +@Echo off +REM Module Packaging script. To be called from create_packages.bat + +set moduleId=%~1 +set version=%~2 + +tar -caf %moduleId%-%version%.zip --exclude=__pycache__ --exclude=*.development.* --exclude=*.log ^ + *.py modulesettings.* requirements.* install.sh install.bat diff --git a/src/modules/TrainingYoloV5/requirements.cuda.txt b/src/modules/TrainingYoloV5/requirements.cuda.txt new file mode 100644 index 00000000..903fed40 --- /dev/null +++ b/src/modules/TrainingYoloV5/requirements.cuda.txt @@ -0,0 +1,52 @@ +# YOLOv5 requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +matplotlib>=3.2.2 # Installing matplotlib, the python plotting package +numpy>=1.18.5 # Installing NumPy, a package for scientific computing +opencv-python>=4.1.1 # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +PyYAML>=5.3.1 # Installing PyYAML, a library for reading configuration files +requests>=2.23.0 # Installing request, the HTTP library +scipy>=1.4.1 # Installing SciPy, a library for mathematics, science, and engineering +--find-links https://download.pytorch.org/whl/cu117 +torch>=1.7.0 # Installing Torch, for Tensor computation and Deep neural networks +torchvision>=0.8.1 # Installing TorchVision, for Computer Vision based AI +tqdm>=4.64.0 # Installing tdqm, the Fast, Extensible Progress Meter + +# This is to fix an issue at https://github.com/ultralytics/yolov5/issues/8012 +protobuf<=3.20.1 # Installing protobuf, extensible mechanisms for serializing structured data + +# Logging ------------------------------------- +tensorboard>=2.4.1 # Installing tensorboard, a tool to let you watch Tensors Flow + +# wandb +# clearml + +# Plotting ------------------------------------ +pandas>=1.1.4 # Installing Pandas, a data analysis / data manipulation tool +seaborn>=0.11.0 # Installing Seaborn, a data visualization library based on matplotlib + +# Export -------------------------------------- +# coremltools>=5.2 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Extras -------------------------------------- +ipython # Installing ipython, for interactive notebooks +psutil # Installing psutil, a tool to check system utilization +thop>=0.1.1 # Installing thop, a tool to count the FLOPs of PyTorch model. +# albumentations>=1.0.3 +# pycocotools>=2.0 # COCO mAP +# roboflow + +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +fiftyone # Installing fiftyone, for building datasets and computer vision models + +# last line left blank \ No newline at end of file diff --git a/src/modules/TrainingYoloV5/requirements.macos.txt b/src/modules/TrainingYoloV5/requirements.macos.txt new file mode 100644 index 00000000..d9157c7d --- /dev/null +++ b/src/modules/TrainingYoloV5/requirements.macos.txt @@ -0,0 +1,53 @@ +# YOLOv5 requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +numpy>=1.18.5 # Installing NumPy, a package for scientific computing +opencv-python>=4.1.1 # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +PyYAML>=5.3.1 # Installing PyYAML, a library for reading configuration files +requests>=2.23.0 # Installing request, the HTTP library +scipy>=1.4.1 # Installing SciPy, a library for mathematics, science, and engineering + +--index-url https://download.pytorch.org/whl/torch_stable.html +torch>=1.7.0,<2.0 # Installing Torch, for Tensor computation and Deep neural networks +torchvision>=0.8.1 # Installing TorchVision, for Computer Vision based AI +tqdm>=4.64.0 # Installing tdqm, the Fast, Extensible Progress Meter + +# This is to fix an issue at https://github.com/ultralytics/yolov5/issues/8012 +protobuf<=3.20.1 # Installing protobuf, extensible mechanisms for serializing structured data + +# Logging ------------------------------------- +tensorboard>=2.4.1 # Installing tensorboard, a tool to let you watch Tensors Flow + +# wandb +# clearml + +# Plotting ------------------------------------ +matplotlib>=3.2.2 # Installing matplotlib, the python plotting package +pandas>=1.1.4 # Installing Pandas, a data analysis / data manipulation tool +seaborn>=0.11.0 # Installing Seaborn, a data visualization library based on matplotlib + +# Export -------------------------------------- +# coremltools>=5.2 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Extras -------------------------------------- +ipython # Installing ipython, for interactive notebooks +psutil # Installing psutil, a tool to check system utilization +thop>=0.1.1 # Installing thop, a tool to count the FLOPs of PyTorch model. +# albumentations>=1.0.3 +# pycocotools>=2.0 # COCO mAP +# roboflow + +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +fiftyone # Installing fiftyone, for building datasets and computer vision models + +# last line left blank \ No newline at end of file diff --git a/src/modules/TrainingYoloV5/requirements.txt b/src/modules/TrainingYoloV5/requirements.txt new file mode 100644 index 00000000..8ddf7d6f --- /dev/null +++ b/src/modules/TrainingYoloV5/requirements.txt @@ -0,0 +1,51 @@ +# YOLOv5 requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +matplotlib>=3.2.2 # Installing matplotlib, the python plotting package +numpy>=1.18.5 # Installing NumPy, a package for scientific computing +opencv-python>=4.1.1 # Installing OpenCV, the Open source Computer Vision library +Pillow<10.0.0 # Installing Pillow, a Python Image Library +PyYAML>=5.3.1 # Installing PyYAML, a library for reading configuration files +requests>=2.23.0 # Installing request, the HTTP library +scipy>=1.4.1 # Installing SciPy, a library for mathematics, science, and engineering +--find-links https://download.pytorch.org/whl/torch_stable.html +torch>=1.7.0 # Installing Torch, for Tensor computation and Deep neural networks +torchvision>=0.8.1 # Installing TorchVision, for Computer Vision based AI +tqdm>=4.64.0 # Installing tdqm, the Fast, Extensible Progress Meter + +# This is to fix an issue at https://github.com/ultralytics/yolov5/issues/8012 +protobuf<=3.20.1 # Installing protobuf, extensible mechanisms for serializing structured data + +# Logging ------------------------------------- +tensorboard>=2.4.1 # Installing tensorboard, a tool to let you watch Tensors Flow +# wandb +# clearml + +# Plotting ------------------------------------ +pandas>=1.1.4 # Installing Pandas, a data analysis / data manipulation tool +seaborn>=0.11.0 # Installing Seaborn, a data visualization library based on matplotlib + +# Export -------------------------------------- +# coremltools>=5.2 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Extras -------------------------------------- +ipython # Installing ipython, for interactive notebooks +psutil # Installing psutil, a tool to check system utilization +thop>=0.1.1 # Installing thop, a tool to count the FLOPs of PyTorch model. +# albumentations>=1.0.3 +# pycocotools>=2.0 # COCO mAP +# roboflow + +yolov5==6.2.3 # Installing Ultralytics YoloV5 package for object detection in images +fiftyone # Installing fiftyone, for building datasets and computer vision models + +# last line left blank \ No newline at end of file diff --git a/src/modules/YOLOv5-3.1/detection.py b/src/modules/YOLOv5-3.1/detection.py index de086c6f..6d850df4 100644 --- a/src/modules/YOLOv5-3.1/detection.py +++ b/src/modules/YOLOv5-3.1/detection.py @@ -125,7 +125,7 @@ def objectdetection(thread_name: str, delay: float): output = { "success": False, - "error": "error occured on the server" + "error": "error occurred on the server" } finally: diff --git a/src/modules/YOLOv5-3.1/face.py b/src/modules/YOLOv5-3.1/face.py index 2baaf575..17661e88 100644 --- a/src/modules/YOLOv5-3.1/face.py +++ b/src/modules/YOLOv5-3.1/face.py @@ -154,7 +154,7 @@ def face(thread_name, delay): print(err_trace, file=sys.stderr, flush=True) output = { "success": False, - "error": "error occured on the server" + "error": "error occurred on the server" } finally: @@ -262,7 +262,7 @@ def face(thread_name, delay): output = { "success": False, - "error": "error occured on the server" + "error": "error occurred on the server" } finally: @@ -440,7 +440,7 @@ def face(thread_name, delay): output = { "success": False, - "error": "error occured on the server" + "error": "error occurred on the server" } finally: @@ -528,7 +528,7 @@ def face(thread_name, delay): output = { "success": False, - "error": "error occured on the server" + "error": "error occurred on the server" } finally: diff --git a/src/modules/YOLOv5-3.1/install.bat b/src/modules/YOLOv5-3.1/install.bat index 4db0ebd8..ed754273 100644 --- a/src/modules/YOLOv5-3.1/install.bat +++ b/src/modules/YOLOv5-3.1/install.bat @@ -53,6 +53,7 @@ call "%sdkScriptsPath%\utils.bat" GetFromServer "custom-models-yolo5-31-pt.zip" :: os - "windows" :: architecture - "x86_64" or "arm64" :: platform - "windows" or "windows-arm64" +:: systemName - "Windows" :: verbosity - quiet, info or loud. Use this to determines the noise level of output. :: forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. :: GetFromServer will honour this value. Do it yourself for DownloadAndExtract diff --git a/src/modules/YOLOv5-3.1/install.sh b/src/modules/YOLOv5-3.1/install.sh index 20b57d1d..a419023c 100644 --- a/src/modules/YOLOv5-3.1/install.sh +++ b/src/modules/YOLOv5-3.1/install.sh @@ -15,48 +15,15 @@ if [ "$1" != "install" ]; then exit 1 fi - -# *** IF YOU WISH TO USE GPU ON LINUX *** -# Before you do anything you need to ensure CUDA is installed in Ubuntu. -# These steps need to be done outside of our setup scripts - -message=" -*** IF YOU WISH TO USE GPU ON LINUX Please ensure you have CUDA installed *** -# The steps are: (See https://chennima.github.io/cuda-gpu-setup-for-paddle-on-windows-wsl) - -sudo apt install libgomp1 - -# Install CUDA - -sudo apt-key del 7fa2af80 -wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin -sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600 -wget https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda-repo-wsl-ubuntu-11-7-local_11.7.0-1_amd64.deb -sudo dpkg -i cuda-repo-wsl-ubuntu-11-7-local_11.7.0-1_amd64.deb - -sudo cp /var/cuda-repo-wsl-ubuntu-11-7-local/cuda-B81839D3-keyring.gpg /usr/share/keyrings/ - -sudo apt-get update -sudo apt-get -y install cuda - -# Now Install cuDNN - -sudo apt-get install zlib1g - -# => Go to https://developer.nvidia.com/cudnn, sign in / sign up, agree to terms -# and download 'Local Installer for Linux x86_64 (Tar)'. This will download a -# file similar to 'cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz' -# -# In the downloads folder do: - -tar -xvf cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz -sudo cp cudnn-*-archive/include/cudnn*.h /usr/local/cuda/include -sudo cp -P cudnn-*-archive/lib/libcudnn* /usr/local/cuda/lib64 -sudo chmod a+r /usr/local/cuda/include/cudnn*.h /usr/local/cuda/lib64/libcudnn* - -# and you'll be good to go" - -# print message +# Ensure CUDA and cuDNN is installed. Note this is only for native linux since +# macOS no longer supports NVIDIA, WSL (Linux under Windows) uses the Windows +# drivers, and docker images already contain the necessary SDKs and libraries +if [ "$os" == "linux" ] && [ "$hasCUDA" == "true" ] && [ "${inDocker}" == "false" ] && \ + [ "${systemName}" != "Jetson" ] && [ "${systemName}" != "Raspberry Pi" ] && \ + [ "${systemName}" != "Orange Pi" ]; then + correctLineEndings "${sdkScriptsPath}/install_cuDNN.sh" + source "${sdkScriptsPath}/install_cuDNN.sh" +fi # Install python and the required dependencies. setupPython 3.8 "Local" @@ -68,7 +35,7 @@ if [ $? -ne 0 ]; then quit 1; fi # Download the models and store in /assets and /custom-models getFromServer "models-yolo5-31-pt.zip" "assets" "Downloading Standard YOLOv5 models..." -if [ $? -ne 0 ]; then quit 1; fi +# if [ $? -ne 0 ]; then quit 1; fi getFromServer "custom-models-yolo5-31-pt.zip" "custom-models" "Downloading Custom YOLOv5 models..." # Cleanup if you wish @@ -80,7 +47,7 @@ getFromServer "custom-models-yolo5-31-pt.zip" "custom-models" "Downloading Custo # Variables available: # # absoluteRootDir - the root path of the installation (eg: ~/CodeProject/AI) -# sdkScriptsPath - the path to the installation utility scripts ($rootPath/Installers) +# sdkScriptsPath - the path to the installation utility scripts ($rootPath/SDK/Scripts) # downloadPath - the path to where downloads will be stored ($sdkScriptsPath/downloads) # runtimesPath - the path to the installed runtimes ($rootPath/src/runtimes) # modulesPath - the path to all the AI modules ($rootPath/src/modules) @@ -89,6 +56,8 @@ getFromServer "custom-models-yolo5-31-pt.zip" "custom-models" "Downloading Custo # os - "linux" or "macos" # architecture - "x86_64" or "arm64" # platform - "linux", "linux-arm64", "macos" or "macos-arm64" +# systemName - General name for the system. "Linux", "macOS", "Raspberry Pi", "Orange Pi" +# "Jetson" or "Docker" # verbosity - quiet, info or loud. Use this to determines the noise level of output. # forceOverwrite - if true then ensure you force a re-download and re-copy of downloads. # getFromServer will honour this value. Do it yourself for downloadAndExtract diff --git a/src/modules/YOLOv5-3.1/modulesettings.json b/src/modules/YOLOv5-3.1/modulesettings.json index 3e59e818..29501b90 100644 --- a/src/modules/YOLOv5-3.1/modulesettings.json +++ b/src/modules/YOLOv5-3.1/modulesettings.json @@ -2,18 +2,19 @@ "Modules": { "YOLOv5-3.1": { "Name": "Object Detection (YOLOv5 3.1)", - "Version": "1.2", + "Version": "1.3", // Publishing info - "Description": "The object detection module uses YOLO (You Only Look Once) to locate and classify the objects the models have been trained on. At this point there are 80 different types of objects that can be detected.", + "Description": "Provides Object Detection using YOLOv5 3.1 targeting CUDA 10.2/Torch 1.7 for older GPUs.", "Platforms": [ "windows", "linux", "linux-arm64", "macos" ], // macos-arm64 should use the YOLOv5 6.2 "License": "GPL-3.0", "LicenseUrl": "https://opensource.org/licenses/GPL-3.0", // Which server version is compatible with each version of this module. - "VersionCompatibililty": [ + "ModuleReleases": [ { "ModuleVersion": "1.0", "ServerVersionRange": [ "1.0", "2.0.8" ], "ReleaseDate": "2022-11-01" }, - { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-03-20" } + { "ModuleVersion": "1.2", "ServerVersionRange": [ "2.1", "2.1.6" ], "ReleaseDate": "2023-03-20", "ReleaseNotes": "Updated for CodeProject.AI Server 2.1" }, + { "ModuleVersion": "1.3", "ServerVersionRange": [ "2.1", "" ], "ReleaseDate": "2023-05-17", "ReleaseNotes": "Updated module settings", "Importance": "Minor" } ], // Launch instructions diff --git a/src/modules/YOLOv5-3.1/options.py b/src/modules/YOLOv5-3.1/options.py index 1bfdffb4..6af05653 100644 --- a/src/modules/YOLOv5-3.1/options.py +++ b/src/modules/YOLOv5-3.1/options.py @@ -54,7 +54,7 @@ def __init__(self): # dump the important variables if self._show_env_variables: - print(f"APPDIR: {self.app_dir}") - print(f"MODEL_SIZE: {self.model_size}") - print(f"MODELS_DIR: {self.models_dir}") + print(f"Debug: APPDIR: {self.app_dir}") + print(f"Debug: MODEL_SIZE: {self.model_size}") + print(f"Debug: MODELS_DIR: {self.models_dir}") diff --git a/src/modules/YOLOv5-3.1/process.py b/src/modules/YOLOv5-3.1/process.py index 49066df6..05171cb2 100644 --- a/src/modules/YOLOv5-3.1/process.py +++ b/src/modules/YOLOv5-3.1/process.py @@ -55,10 +55,7 @@ def __init__(self, model_path: str, reso: int = 640, cuda: bool = False, print(f"GPU compute capability is {torch.cuda.get_device_capability()[0]}.{torch.cuda.get_device_capability()[1]}") - if half_precision == 'disable': - self.half = False - else: - self.half = half_precision == 'force' or torch.cuda.get_device_capability()[0] >= 6 + self.half = half_precision != 'disable' if self.half: print(f"Using half-precision for the device '{device_name}'") diff --git a/src/modules/YOLOv5-3.1/requirements.linux.arm64.txt b/src/modules/YOLOv5-3.1/requirements.linux.arm64.txt index 48ad1b1a..788a5b09 100644 --- a/src/modules/YOLOv5-3.1/requirements.linux.arm64.txt +++ b/src/modules/YOLOv5-3.1/requirements.linux.arm64.txt @@ -1,6 +1,6 @@ OpenCV-Python # Installing OpenCV, the Open source Computer Vision library Cython # Installing the Cython compiler for C extensions for the Python language. -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/YOLOv5-3.1/requirements.linux.txt b/src/modules/YOLOv5-3.1/requirements.linux.txt index cca86d35..739834da 100644 --- a/src/modules/YOLOv5-3.1/requirements.linux.txt +++ b/src/modules/YOLOv5-3.1/requirements.linux.txt @@ -11,7 +11,7 @@ torchvision==0.9.1 # Installing TorchVision, for Computer Vision based AI OpenCV-Python # Installing OpenCV, the Open source Computer Vision library Cython # Installing the Cython compiler for C extensions for the Python language. -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/YOLOv5-3.1/requirements.macos.arm64.txt b/src/modules/YOLOv5-3.1/requirements.macos.arm64.txt index a1d7ded9..cd86a79a 100644 --- a/src/modules/YOLOv5-3.1/requirements.macos.arm64.txt +++ b/src/modules/YOLOv5-3.1/requirements.macos.arm64.txt @@ -5,7 +5,7 @@ Pandas # Installing Pandas, a data analysis / data manipulation tool CoreMLTools # Installing CoreMLTools, for working with .mlmodel format models OpenCV-Python # Installing OpenCV, the Open source Computer Vision library -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/YOLOv5-3.1/requirements.macos.txt b/src/modules/YOLOv5-3.1/requirements.macos.txt index dfc1111a..9ff07f16 100644 --- a/src/modules/YOLOv5-3.1/requirements.macos.txt +++ b/src/modules/YOLOv5-3.1/requirements.macos.txt @@ -1,6 +1,6 @@ OpenCV-Python # Installing OpenCV, the Open source Computer Vision library Cython # Installing the Cython compiler for C extensions for the Python language. -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/YOLOv5-3.1/requirements.txt b/src/modules/YOLOv5-3.1/requirements.txt index c4f8964f..1b81b022 100644 --- a/src/modules/YOLOv5-3.1/requirements.txt +++ b/src/modules/YOLOv5-3.1/requirements.txt @@ -1,6 +1,6 @@ OpenCV-Python # Installing OpenCV, the Open source Computer Vision library Cython # Installing the Cython compiler for C extensions for the Python language. -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/YOLOv5-3.1/requirements.windows.cuda.txt b/src/modules/YOLOv5-3.1/requirements.windows.cuda.txt index f542af63..6bcc0505 100644 --- a/src/modules/YOLOv5-3.1/requirements.windows.cuda.txt +++ b/src/modules/YOLOv5-3.1/requirements.windows.cuda.txt @@ -1,6 +1,6 @@ OpenCV-Python # Installing OpenCV, the Open source Computer Vision library Cython # Installing the Cython compiler for C extensions for the Python language. -Pillow # Installing Pillow, a Python Image Library +Pillow<10.0.0 # Installing Pillow, a Python Image Library SciPy # Installing SciPy, a library for mathematics, science, and engineering PyYAML # Installing PyYAML, a library for reading configuration files diff --git a/src/modules/YOLOv5-3.1/yolo_adapter.py b/src/modules/YOLOv5-3.1/yolo_adapter.py index 9bf03bef..3e9b89ef 100644 --- a/src/modules/YOLOv5-3.1/yolo_adapter.py +++ b/src/modules/YOLOv5-3.1/yolo_adapter.py @@ -47,6 +47,9 @@ def initialise(self): elif self.opts.use_MPS: self.execution_provider = "MPS" + if self.opts.use_CUDA and self.half_precision == 'enable' and not self.hasTorchHalfPrecision: + self.half_precision = 'disable' + def process(self, data: RequestData) -> JSON: @@ -242,7 +245,7 @@ def do_detection(self, models_dir: str, model_name: str, resolution: int, except Exception as ex: self.report_error(ex, __file__) - return { "success": False, "error": "Error occured on the server" } + return { "success": False, "error": "Error occurred on the server" } if __name__ == "__main__": diff --git a/src/setup.bat b/src/setup.bat index 1c0e7165..5fe5b8d7 100644 --- a/src/setup.bat +++ b/src/setup.bat @@ -178,10 +178,15 @@ popd set absoluteAppRootDir=!installerScriptsPath! -:: Platform can define where things are located ::::::::::::::::::::::::::::::: +:: Helper vars for OS, Platform (see note below), and system name. systemName is +:: a no-op here because nothing exciting happens on Windows. In the corresponding +:: .sh setup files, systemName can be docker, Raspberry Pi, WSL - all sorts of fun +:: things. It's here to just make switching between .bat and .sh scripts consistent set os=windows set platform=windows +set systemName=Windows + :: This can be x86 (32-bit), AMD64 (Intel/AMD 64bit), ARM64 (Arm 64bit) set architecture=%PROCESSOR_ARCHITECTURE% @@ -208,6 +213,12 @@ if /i "!enableGPU!" == "true" ( ) if /i "!hasCUDA!" == "false" set supportCUDA=false +set hasROCm=false +if /i "!enableGPU!" == "true" ( + where rocm-smi >nul 2>nul + if !errorlevel! EQU 0 set hasROCm=true +) + :: The location of directories relative to the root of the solution directory set runtimesPath=!absoluteAppRootDir!!runtimesDir! @@ -249,7 +260,9 @@ if /i "%verbosity%" neq "quiet" ( :: Checks on GPU ability +call "!sdkScriptsPath!\utils.bat" WriteLine "Checking GPU support" "White" "Blue" !lineWidth! call "!sdkScriptsPath!\utils.bat" WriteLine "" + call "!sdkScriptsPath!\utils.bat" Write "CUDA Present..." if /i "%hasCUDA%" == "true" ( call "!sdkScriptsPath!\utils.bat" WriteLine "True" !color_success! diff --git a/src/setup.sh b/src/setup.sh index bfba25d6..a211e7ce 100644 --- a/src/setup.sh +++ b/src/setup.sh @@ -74,15 +74,6 @@ lineWidth=70 # shared area allowSharedPythonInstallsForModules="true" -# We can't do shared installs in Docker. They won't persist -if [ "$DOTNET_RUNNING_IN_CONTAINER" == "true" ]; then - echo - echo "Hi Docker! We will disable shared python installs for downloaded modules" - echo - allowSharedPythonInstallsForModules="false"; -fi - - # Debug flags for downloads and installs # If files are already present, then don't overwrite if this is false @@ -114,9 +105,12 @@ installerScriptsPath="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" # The location of large packages that need to be downloaded (eg an AWS S3 bucket name) storageUrl='https://codeproject-ai.s3.ca-central-1.amazonaws.com/sense/installer/dev/' -# The name of the source directory +# The name of the source directory (in development) srcDir='src' +# The name of the app directory (in docker) +appDir='app' + # The name of the dir, within the current directory, where install assets will # be downloaded downloadDir='downloads' @@ -146,6 +140,20 @@ done # If offline then force the system to use pre-downloaded files if [ "$offlineInstall" == "true" ]; then forceOverwrite="false"; fi +# We can't do shared installs in Docker. They won't persist +inDocker="false" +if [ "$DOTNET_RUNNING_IN_CONTAINER" == "true" ]; then + + inDocker="true" + + echo + echo "Hi Docker! We will disable shared python installs for downloaded modules" + echo + allowSharedPythonInstallsForModules="false"; +fi + +diskSpace=$(df $PWD | awk '/[0-9]%/{print $(NF-2)}') +echo "${diskSpace} available" # Execution environment, setup mode and Paths :::::::::::::::::::::::::::::::: @@ -162,11 +170,18 @@ if [ "$currentDirName" == "$srcDir" ]; then setupMode='SetupDevEnvironment'; fi # containing this script and check the name of the parent folder to see if # we're in dev or production. pushd "$installerScriptsPath" >/dev/null -currentDirName="$(basename ${installerScriptsPath})" -currentDirName=${currentDirName:-/} # correct for the case where pwd=/ +installScriptDirName="$(basename ${installerScriptsPath})" +installScriptDirName=${installScriptDirName:-/} # correct for the case where pwd=/ popd >/dev/null executionEnvironment='Production' -if [ "$currentDirName" == "$srcDir" ]; then executionEnvironment='Development'; fi +if [ "$installScriptDirName" == "$srcDir" ]; then executionEnvironment='Development'; fi + +# For Docker +if [ "$inDocker" == "true" ]; then + # Yes, this is a little contradictory. Maybe "SetupDevEnvironment" should be "SetupAllModules" + if [ "$currentDirName" == "$appDir" ]; then setupMode='SetupDevEnvironment'; fi + executionEnvironment='Production' +fi # The absolute path to the installer script and the root directory. Note that # this script (and the SDK folder) is either in the /src dir or the root dir @@ -205,11 +220,15 @@ correctLineEndings ${sdkScriptsPath}/utils.sh source ${sdkScriptsPath}/utils.sh # Test for CUDA drivers and adjust supportCUDA if needed -hasCUDA="false" +hasCUDA='false' if [ "$os" == "macos" ]; then supportCUDA="false" -else +else if [ "$supportCUDA" == "true" ]; then + # https://stackoverflow.com/a/66486390 + cp /usr/lib/wsl/lib/nvidia-smi /usr/bin/nvidia-smi > /dev/null 2>&1 + chmod ogu+x /usr/bin/nvidia-smi > /dev/null 2>&1 + if [ -x "$(command -v nvidia-smi)" ]; then nvidia=$(nvidia-smi | grep -i -E 'CUDA Version: [0-9]+.[0-9]+') > /dev/null 2>&1 if [[ ${nvidia} == *'CUDA Version: '* ]]; then hasCUDA='true'; fi @@ -217,6 +236,21 @@ else fi fi +# Test for AMD ROCm drivers +hasROCm='false' +if [ "$os" == "linux" ]; then + if [ ! -x "$(command -v rocminfo)" ]; then + write "Checking for ROCm support..." $color_primary + sudo apt install rocminfo -y > /dev/null 2>&1 & + spin $! + writeLine "Done" $color_success + fi + if [ -x "$(command -v rocminfo)" ]; then + amdinfo=$(rocminfo | grep -i -E 'AMD ROCm System Management Interface') > /dev/null 2>&1 + if [[ ${amdinfo} == *'AMD ROCm System Management Interface'* ]]; then hasROCm='true'; fi + fi +fi + # The location of directories relative to the root of the solution directory runtimesPath="${absoluteAppRootDir}/${runtimesDir}" @@ -282,7 +316,7 @@ fi # - For Windows, oneStep is necessary otherwise FaceProcessing fails. # - For Mac and Linux, oneStep will NOT work # - For Docker, which is Linux, it DOES work. Sometimes. Or maybe not. -if [ "$DOTNET_RUNNING_IN_CONTAINER" == "true" ]; then +if [ "$inDocker" == "true" ]; then oneStepPIP="true" elif [ "$os" == "linux" ] || [ "$os" == "macos" ]; then oneStepPIP="false" @@ -333,6 +367,9 @@ writeLine "" # ============================================================================ # Checks on GPU ability +writeLine "Checking GPU support" "White" "Blue" $lineWidth +writeLine + write "CUDA Present..." if [ "$hasCUDA" == "true" ]; then writeLine "Yes" $color_success; else writeLine "No" $color_warn; fi write "Allowing GPU Support: " @@ -352,7 +389,9 @@ writeLine write "Creating Directories..." $color_primary # For downloading assets -mkdir -p "${downloadPath}" +if [ ! -d "${downloadPath}" ]; then + mkdir -p "${downloadPath}" +fi if [ "$os" == "macos" ]; then if [[ ! -w "${downloadPath}" ]]; then sudo chmod 777 "${downloadPath}" @@ -361,14 +400,19 @@ fi # For persisting settings if [ "$os" == "linux" ]; then - sudo mkdir -p "${commonDataDir}" + if [ ! -d "${commonDataDir}" ]; then + sudo mkdir -p "${commonDataDir}" + fi if [[ ! -w "${commonDataDir}" ]]; then sudo chmod 777 "${commonDataDir}" fi fi # for the runtimes -mkdir -p "${runtimesPath}" +if [ ! -d "${runtimesPath}" ]; then + sudo mkdir -p "${runtimesPath}" +fi +sudo chmod a+w "${runtimesPath}" writeLine "Done" $color_success @@ -390,26 +434,23 @@ if [ "$setupMode" == 'SetupDevEnvironment' ]; then # dirname=${moduleDir,,} # requires bash 4.X, which isn't on macOS by default dirname=$(echo $moduleDir | tr '[:upper:]' '[:lower:]') - if [ "${dirname}" != 'bin' ]; then - if [ -f "${modulePath}/install.sh" ]; then + if [ -f "${modulePath}/install.sh" ]; then - writeLine - writeLine "Processing side-loaded module ${moduleDir}" "White" "Blue" $lineWidth - writeLine + writeLine + writeLine "Processing side-loaded module ${moduleDir}" "White" "Blue" $lineWidth + writeLine - correctLineEndings "${modulePath}/install.sh" - source "${modulePath}/install.sh" "install" + correctLineEndings "${modulePath}/install.sh" + source "${modulePath}/install.sh" "install" - if [ $? -ne 0 ]; then success='false'; fi - fi + # if [ $? -ne 0 ]; then success='false'; fi fi done writeLine writeLine "Modules setup Complete" $color_success - # Now do SDK moduleDir="SDK" modulePath="${absoluteAppRootDir}/${moduleDir}" @@ -418,7 +459,7 @@ if [ "$setupMode" == 'SetupDevEnvironment' ]; then writeLine correctLineEndings "${modulePath}/install.sh" source "${modulePath}/install.sh" "install" - if [ $? -ne 0 ]; then success='false'; fi + # if [ $? -ne 0 ]; then success='false'; fi # And Demos moduleDir="demos" @@ -428,9 +469,10 @@ if [ "$setupMode" == 'SetupDevEnvironment' ]; then writeLine correctLineEndings "${modulePath}/install.sh" source "${modulePath}/install.sh" "install" - if [ $? -ne 0 ]; then success='false'; fi + # if [ $? -ne 0 ]; then success='false'; fi # And finally, supporting library packages + # TODO: Move this into the DSK install.sh script # libfontconfig1 is required for SkiaSharp, libgdplus is required for System.Drawing if [ "${verbosity}" == "quiet" ]; then @@ -509,7 +551,7 @@ else correctLineEndings "${modulePath}/install.sh" source "${modulePath}/install.sh" "install" - if [ $? -ne 0 ]; then success='false'; fi + # if [ $? -ne 0 ]; then success='false'; fi fi # ============================================================================ diff --git a/tests/QueueServiceTests/QueueProcessing.cs b/tests/QueueServiceTests/QueueProcessing.cs index 5cc23986..3514e8c7 100644 --- a/tests/QueueServiceTests/QueueProcessing.cs +++ b/tests/QueueServiceTests/QueueProcessing.cs @@ -46,10 +46,11 @@ public TestOptions(QueueProcessingOptions options) new NullLogger()); [Fact] - public async void RequestTimesOutIfNotHandled() + public async Task RequestTimesOutIfNotHandled() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; - var result = await _queueServices.SendRequestAsync(QueueName, request); + var result = await _queueServices.SendRequestAsync(QueueName, request) + .ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result); @@ -65,7 +66,8 @@ public async Task CanPullRequestFromQueue() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; var requestTask = _queueServices.SendRequestAsync(QueueName, request); - BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName); + BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName) + .ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result); @@ -73,11 +75,12 @@ public async Task CanPullRequestFromQueue() } [Fact] - public async void CanPullRequestFromQueuAsynce() + public async Task CanPullRequestFromQueuAsynce() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; var requestTask = _queueServices.SendRequestAsync(QueueName, request); - BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName); + BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName) + .ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result); @@ -96,23 +99,24 @@ public void CantPullRequestFronWrongQueue() } [Fact] - public async void CantPullRequestFronWrongQueueAsync() + public async Task CantPullRequestFronWrongQueueAsync() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; var requestTask = _queueServices.SendRequestAsync(QueueName, request); - BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName + "_Wrong"); + BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName + "_Wrong").ConfigureAwait(false); Assert.Null(result); } [Fact] - public async void CanCancelPullRequestQueueAsync() + public async Task CanCancelPullRequestQueueAsync() { // make sure the queue exists var request = new TestQueuedRequest { image_name = "Bob.jpg" }; var requestTask = _queueServices.SendRequestAsync(QueueName, request); - BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName); + BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName) + .ConfigureAwait(false); using CancellationTokenSource cancellationSource = new(); @@ -121,13 +125,13 @@ public async void CanCancelPullRequestQueueAsync() var task = _queueServices.DequeueRequestAsync(QueueName, token); cancellationSource.Cancel(); - result = await task; + result = await task.ConfigureAwait(false); Assert.Null(result); } [Fact] - public async void CanGetResponse() + public async Task CanGetResponse() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; var testResponse = new TestQueuedResponse() { success = true, label = "Bob" }; @@ -139,18 +143,18 @@ public async void CanGetResponse() bool success = _queueServices.SetResult(pulledRequest!.reqid, testResponseString); Assert.True(success); - var result = await requestTask; + var result = await requestTask.ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result); } [Fact] - public async void CantAddSameRequestTwice() + public async Task CantAddSameRequestTwice() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; var firstrequestTask = _queueServices.SendRequestAsync(QueueName, request); var secondRequestTask = _queueServices.SendRequestAsync(QueueName, request); - var secondResult = await secondRequestTask; + var secondResult = await secondRequestTask.ConfigureAwait(false); Assert.NotNull(secondResult); Assert.IsType(secondResult); @@ -161,7 +165,7 @@ public async void CantAddSameRequestTwice() } [Fact] - public async void NullResponseReturnsError() + public async Task NullResponseReturnsError() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; string? testResponseString = null; @@ -172,7 +176,7 @@ public async void NullResponseReturnsError() bool success = _queueServices.SetResult(request.reqid, testResponseString); Assert.True(success); - var result = await requestTask; + var result = await requestTask.ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result); var errorResult = result as BackendErrorResponse; @@ -182,7 +186,7 @@ public async void NullResponseReturnsError() } [Fact] - public async void BadResponseReturnsError() + public async Task BadResponseReturnsError() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; string testResponseString = "This is not JSON"; @@ -193,7 +197,7 @@ public async void BadResponseReturnsError() bool success = _queueServices.SetResult(request.reqid, testResponseString); Assert.True(success); - var result = await requestTask; + var result = await requestTask.ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result); @@ -204,7 +208,7 @@ public async void BadResponseReturnsError() } [Fact] - public async void EmptyResponseReturnsError() + public async Task EmptyResponseReturnsError() { var request = new TestQueuedRequest { image_name = "Bob.jpg" }; string testResponseString = "null"; @@ -215,7 +219,7 @@ public async void EmptyResponseReturnsError() bool success = _queueServices.SetResult(request.reqid, testResponseString); Assert.True(success); - var result = await requestTask; + var result = await requestTask.ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result); @@ -232,10 +236,12 @@ public async Task TimeoutRequestDoesntClogQueue() var request2 = new TestQueuedRequest { image_name = "Alf.jpg" }; var request1Task = _queueServices.SendRequestAsync(QueueName, request1); - await Task.Delay(queueOptions.ResponseTimeout + TimeSpan.FromSeconds(5)); + await Task.Delay(queueOptions.ResponseTimeout + TimeSpan.FromSeconds(5)) + .ConfigureAwait(false); var request2Task = _queueServices.SendRequestAsync(QueueName, request2); - BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName); + BackendRequestBase? result = await _queueServices.DequeueRequestAsync(QueueName) + .ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result); Assert.Equal(request2, result); @@ -255,7 +261,7 @@ public async Task RequestQueueHasLimit() var request2Task = _queueServices.SendRequestAsync(QueueName, request2); tasks.Add(request2Task.AsTask()); - await Task.WhenAll(tasks); + await Task.WhenAll(tasks).ConfigureAwait(false); var lastTask = tasks[queueOptions.MaxQueueLength]; Assert.True(lastTask.IsCompletedSuccessfully); @@ -290,7 +296,7 @@ public async Task RequestCanBeCanceled() var requestTask = _queueServices.SendRequestAsync(QueueName, request, cts.Token); cts.Cancel(); - var result = await requestTask; + var result = await requestTask.ConfigureAwait(false); Assert.NotNull(result); Assert.IsType(result);