diff --git a/test/LearningTest/cmd/python/README.md b/test/LearningTest/cmd/python/README.md index 2aa68223d..d6c5e07f3 100644 --- a/test/LearningTest/cmd/python/README.md +++ b/test/LearningTest/cmd/python/README.md @@ -1,93 +1,216 @@ -Test de Khiops -============== - -LearningTest: creation en mars 2009 -Automatisation des tests du logiciel Khiops -Versions synchronisees avec les versions majeures de Khiops -Historisation des versions par la commande MakeLearningTestVersion - - -Procédure pour effectuer les tests de Khiops sur un autre environement ----------------------------------------------------------------------- -Installation de LearningTest sur une nouvelle machine -- copier l'arborescence LearningTest -- personnalisation de l'environnement par un fichier de config learning_test.config dans le répertoire LearningTest\cmd\python - - voir LearningTest\cmd\python\learning_test_env.py pour la documentation sur le contenun de ce fichier de config -- Installer python -- mettre python dans le path - -Utilisation de LearningTest -- ouvrir un shell -- lancer une commande se trouvant dans learningTest\cmd -- lancer les tests, par exemple - - TestKhiops r Standard Adult - - TestKhiops r Standard - - TestAll r -- analyser les résultats, par exemple - - ApplyCommand errors TestKhiops\Standard, pour avoir une synthése des erreurs/warning sur les tests de TestKhiops\Standard - - ApplyCommandAll errors, pour la même commande sur tous les tests - -Principales commandes - - helpOptions: doc sur différentes options paramètrables par variable d'environnement - - testKhiops [version] [testName] ([subTestName]): lance un test sur un répertoire ou une arborescence de test, sous le directory TestKhiops - - version peut être: - - "nul" pour ne faire que les comparaisons de résultats - - "d" ou "r" pour la version de debug ou release de l'environnement de développement - - un exe se trouvant dans LearningTest\cmd\modl\.[.exe] - - un exe dont la path complet est - - TestCoclustering: idem pour Coclustering - - TestKNI: idem pour KNI - - testAll: pour lancer tous les tests - - applyCommand [command] [root_path] ([dir_name]): pour exécuter une commande (un service) sur un ou plusieurs jeux de tests - - applyCommandAll: pour exécuter une commande sur tous les jeux de test -Les commande lancées sans argument sont auto-documentées - - -Procédure de test ------------------ -Repertoires: - - doc : documentation - - cmd : fichier de commandes pour la gestion des tests - - datasets : un repertoire par jeu de données, comportant une fichier dictionnaire .kdic et un fichier de donnéee .txt - - MTdatasets : un repertoire par jeu de données multi-table, comportant une fichier dictionnaire .kdic et un fichier de donnéee .txt par table - - TextDatasets : un repertoire par jeu de données, comportant une fichier dictionnaire .kdic et un fichier de donnéee .txt - - TestKhiops - - Standard: fonctionnalités de base - - Classification: tests de classification - - Regression: tests de regression - - SideEffects: tests d'effets de bord - - ... - - TestCoclustering - - Standard: fonctionnalités de base - - Bugs: jeux de tests éaborés pour reproduire des bugs et vérifier leur correction - - TestKNITransfer: - - Standard: fonctionnalités de base - - MultiTables: test multi-tables - - ... - -Les sous-repertoires préfixés par y_ ou z_ (ex: z_Work) sont des répertoire de test temporaires. - -Dans chaque répertoire de test (par exemple: Classification/Iris) - - un fichier de scénario (test.prm) - - un sous-répertoire results, contenant les fi chiers produits par le scenario - - un sous-répertoire results.ref, contenant la version de référence de ces fichiers - -Les fichiers de scénario test.prm doivent étre écrits de façon indépendante de la localisation de LearningTest, en modifiant les paths des fichiers concernés, qui doivent étre relatifs é l'arborescence LearningTest, avec un syntaxe de type linux. -Exemple: - - "./SNB_Modeling.kdic" pour accéder à un dictionnaire spécifique local au répertoire de test - - hormis les jeux de données définis dans les arborescences racines de type LearningTest/dataset, les jeux de données peuvent avoir des dictionnaires ou des données spécifique par répertoire de test - - "../../../datasets/Adult/Adult.txt" pour accéder à un fichier de données d'un dataset - - "./results/T_Adult.txt" pour un résultat dans sous-répertoire des résultats - -Ceci est automatisé par les fichiers de commandes se trouvant dans le répertoire cmd. -- testKhiops lance un test sur un répertoire ou une arborescence de test, sous le directory TestKhiops - - testKhiops [version] [Test tree dir] -- testCoclustering lance un test sur un répertoire ou une arborescence de test, sous le directory TestCoclustering -- applyCommand lance des commandes un répertoire ou une arborescence de test - - utititaire generique: cf script python appele - - lancer sans argument pour avoir la liste des commandes possibles - - principales commandes: errors (synthése des erreur et warning), logs (logs détaillés des erreurs) -- testAll lance les tests sur toutes les arborescences de test -- testAll64bits lance les tests sur toutes les arborescences de test en mode 64 bits -- applyCommandAll lance des commandes toutes les arborescences de test +# Khiops tests suite: LearningTest + +LearningTest +- created in March 2009 +- automated testing of Khiops software +- versions synchronized with delivered Khiops versions + - one version per delivered Khiops version, with same tag + - one version for the current branch under development + +Non-regression tests consist of over 600 test sets organized into around 40 families, mainly for Khiops, but also for Khiops coclustering and the KNI DLL. +They collectively occupy around 11 Gb, including 5 Gb for the databases and 3.5 Gb for the test scripts with the reference results. + +## LearningTest directory contents + +### Main directories + +Directories of LearningTest: +- doc: documentation +- cmd: command file for test management under windows + - cmd/pythons: python scripts for running tests and managing test results + - cmd/modl: directory that contains specific versions of exe files to test +- datasets: standard datasets +- MTdatasets: multi-table datasets +- TextDatasets: text datasets +- UnusedDatasets: datasets not currently in use +- TestKhiops: tests for Khiops +- TestCoclustering: tests for Coclustering +- TestKNITransfer: test for KNI + +Each test directory tree, TestKhiops, TestCoclustering, TestKNITransfer is a two-level tree: +- Level 1: one directory per test family +- Level 2: one directory per test set + +Subdirectories prefixed with y_ or z_ (e.g. z_Work) are temporary test directories. + + +### Test set directories + +Each test directory is organized as follows: +- test.prm: scenario file to run the test +- ... : test-specific local data files, if any +- results: sub-directory containing the results of running the tool with the script +- results.ref: sub-directory containing reference results +- comparisonResults.log: test results obtained by comparing results and results.ref + +The test.prm scenario files must be written independently of the LearningTest localization, by modifying the paths of the files concerned, which must be relative to the LearningTest tree, with linux-like syntax. +For example: +- `./SNB_Modeling.kdic` to access a specific dictionary local to the test directory + - except for datasets defined in LearningTest/dataset root trees, + datasets can have specific dictionaries or data per test directory +- `../../../datasets/Adult/Adult.txt` to access a dataset data file +- `/results/T_Adult.txt` for a result in the results sub-directory + + +## Running Khiops tests + +Installing LearningTest on a new machine +- copy the LearningTest directory tree +- Install python +- put python in the path + +### Personnalisation if necessary +- modify learning_test.config file in directory LearningTest/cmd/python +~~~~ +# The config file learning_test.config must be in directory LearningTest\cmd\python +# It is optional, in which case all keys are set to empty +# It contains the following key=value pairs that allows a personnalisation of the environment: +# - path: additional path (eg: to access to java runtime) +# - classpath: additional classpath for java libraries +# - learningtest_root: alternative root dir to use where LearningTest is located +# - learning_release_dir: dir where the release developement binaries are located (to enable the 'r' alias') +# - learning_debug_dir: dir where the debug developement binaries are located (to enable the 'd' alias') +~~~~ + +## Using LearningTest on any platform + +The commands are are available using python scripts located in directory LearningTest/cmd/python (alias [ScriptPath]). +~~~~ +python [ScriptPath]/[script].py +~~~~ + +Commands launched without arguments are self-documented + +#### help_options.py +Show the status of environnement variables used by the scripts. +Main environment variables are: +- KhiopsMPIProcessNumber: Number of MPI processes in parallel mode +- KhiopsCompleteTests: perform all tests, even the longest ones (more than one day overall) +- KhiopsMinTestTime: run only tests where run time (in file time.log) is beyond a threshold +- KhiopsMaxTestTime: run only tests where run time (in file time.log) is below a threshold + +#### test_khiops.py +Example: +- python [ScriptPath]/test_khiops.py Khiops r Standard +- python [ScriptPath]/test_khiops.py Khiops r Standard IrisLight +- python [ScriptPath]/test_khiops.py Coclustering r Standard Iris +~~~~ +test [toolName] [version] [testName] ([subTestName]) + run tests of one of the Khiops tools + tool_name: name of the tool, among Khiops, Coclustering, KNI + version: version of the tool, one of the following options + : full path of the executable + d: debug version in developpement environnement + r: release version in developpement environnement + ver: ..exe in directory LearningTest\cmd\modl + nul: for comparison with the test results only + testName: name of the tool test directory (Standard, MultiTables...) + subTestName: optional, name of the tool test sub-directory (Adult,Iris...) +~~~~ + +#### test_khiops_all.py +Example +- python [ScriptPath]/test_khiops_all.py r +- python [ScriptPath]/test_khiops_all.py [MODL_PATH] Khiops +~~~~ +testAll [version] + run all tests for all Khiops tools + version: version of the tool + d: debug version in developpement environnement + r: release version in developpement environnement + ver: ..exe in directory LearningTest\cmd\modl + nul: for comparison with the test results only + full exe path, if parameter is used + tool: all tools if not specified, one specified tool otherwise + Khiops + Coclustering + KNI +~~~~ + +#### apply_command.py +Example: +- python [ScriptPath]/apply_command.py errors TestKhiops/Standard + +~~~~ +apply_command [command] [root_path] ([dir_name]) + apply command on a directory structure + command: name of the command + rootPath is the path of the root directory + dirName is the name of one specific sub-directory + or all (default) for executing on all sub-directories + example: applyCommand list TestKhiops\Standard + example: applyCommand list TestKhiops\Standard Adult + + List of available standard commands (* for all commands): + list: list of sub-directories + errors: report errors and warnings + logs: detailed report errors and warnings + compareTimes: compare time with ref time and report warnings only + compareTimesVerbose: compare time with ref time and report all + performance: report SNB test accuracy + performanceRef: report ref SNB test accuracy + clean: delete results files + cleanref: delete results.ref files + makeref: copy results files to results.ref + copyref: copy results.ref files to results + checkHDFS: check if parameter files are compliant with HDFS + transformHDFS: transform parameter files to be compliant with HDFS + transformHDFSresults: transform results files to be compliant with HDFS +~~~~ + +#### apply_command_all.py +Example: +- python [ScriptPath]/apply_command_all.py errors + +~~~~ +applyCommandAll [command] <*> + apply command on all test sub-directories + command: name of the command + *: to include 'unofficial' sub-directories, such as z_work + Type applyCommand to see available commands +~~~~ + + +### Using LearningTest under Windows + +Commands are available using command files (.cmd) located in directory LearningTest/cmd, that are simply wrappers to the python scripts: +- helpOptions +- testKhiops +- testCoclustering +- testKNI +- testAll +- applyCommand +- applyCommandAll + +Typical use +- open a shell +- run a command found in learningTest/cmd +- run the tests, for example +~~~ + TestKhiops r Standard Adult + TestKhiops r Standard + TestAll r +~~~ +- analyze results, for example +~~~ + ApplyCommand errors TestKhiops/Standard + ApplyCommandAll errors +~~~ + + +## Test methodology + +### Test hierarchy + +The set of non-regression tests is voluminous. In practice, the tests are run in stages: +- elementary: TestKhiops Standard IrisLight, less than one second +- standard: TestKhiops Standard, less than one minute +- all : TestAll, less than two hours +- complete: TestAll in KhiopsCompleteTests mode (see help_options), more than one day +- release: the multiplication of test conditions reinforces the tool's robustness + - TestAll under different platforms + - TestAll in sequential or parallel mode (cf. KhiopMPIProcessNumber) + - Test in debug mode for short test runs (cf KhiopsMinTestTime, KhiopsMaxTestTime) + + diff --git a/test/LearningTest/cmd/python/apply_command.py b/test/LearningTest/cmd/python/apply_command.py index 15714be0e..6a407dc18 100644 --- a/test/LearningTest/cmd/python/apply_command.py +++ b/test/LearningTest/cmd/python/apply_command.py @@ -131,109 +131,96 @@ def apply_command_list(work_dir): print(work_dir) -def merge_tests_results(work_dir) -> object: - # list test directories with errors or warnings - # report log errors and warnings +def merge_tests_results(work_dir): + """Analyse test directories for warning, errors or fatal errors + Returns: + - warning number + - erreur number + - fatal error (boolean) + - message related to file extensions (optional) + - specific message (optional) + """ + + def extract_number(message): + assert message != "" + fields = message.split() + assert fields[0].isdigit() + number = int(fields[0]) + return number + + # Traitement des erreurs memorisee dans le log log_file_name = os.path.join(work_dir, "comparisonResults.log") - error_message = "" - warning_message = "" + error_number = 0 + warning_number = 0 + fatal_error = False + message_extension = "" + specific_message = "" if os.path.isfile(log_file_name): log_file = open(log_file_name, "r", errors="ignore") - - error_message_err = "" - error_message_kdic = "" - error_message_txt = "" - - begin_file_err = 0 - begin_file_kdic = 0 - begin_file_txt = 0 - for s in log_file: - s = s.replace("\n", "") - # on traite de facon particuliere les messages du fichier de log err.txt - if s.find("file ") == 0 or s == "": - if s.find("results\\err.txt") >= 0: - begin_file_err = 1 - else: - begin_file_err = 0 - if begin_file_err == 1: - if s.find("error") >= 0 and s.find("0 error") != 0: - error_message_err = s - # on traite de facon particuliere les messagess du fichier Modeling.Kdic - if s.find("file ") == 0 or s == "": - if s.find("results\\Modeling.kdic") >= 0: - begin_file_kdic = 1 - else: - begin_file_kdic = 0 - if begin_file_kdic == 1: - if s.find("error") >= 0 and s.find("0 error") != 0: - error_message_kdic = s - if begin_file_kdic == 1: - if s.find("error") >= 0 and s.find("0 error") != 0: - error_message_kdic = s - # on traite de facon particuliere les messages des fichiers .txt, hors err.txt - if s.find("file ") == 0 or s == "": - if ( - s.find("results\\") >= 0 - and s.find(".txt") >= 0 - and s.find("results\\err.txt") == -1 - ): - begin_file_txt = 1 - else: - begin_file_txt = 0 - if begin_file_txt == 1: - if s.find("error") >= 0 and s.find("0 error") != 0: - error_message_txt = s - # on cherche ici a reperer les erreurs globales agregees - if s.find("error") >= 0 and s.find("0 error") != 0: - error_message = s - if s.find("warning") >= 0 and s.find("0 warning") != 0: - warning_message = s - - if error_message_err == error_message and error_message != "": - error_message = error_message + " (all in err.txt)" - if error_message_kdic == error_message and error_message != "": - error_message = error_message + " (all in Modeling.Kdic)" - if error_message_txt == error_message and error_message != "": - error_message = error_message + " (all in deployed .txt files)" + begin_summary = False + for line in log_file: + line = line.strip() + # Recherche du debug de la synthese + if line == "SUMMARY": + begin_summary = True + + # Analyse de la synthese + if begin_summary: + if line.find("warning(s)") >= 0 and line.find("0 warning(s)") != 0: + warning_number = extract_number(line) + if line.find("error(s)") >= 0 and line.find("0 error(s)") != 0: + error_number = extract_number(line) + if line == "FATAL ERROR": + fatal_error = True + if line.find("Error files: ") == 0: + message_extension = line + if line.find("Note: ") == 0: + specific_message = line + + # Fermeture du fichier log_file.close() else: - error_message = "The test has not been launched" - return error_message, warning_message + error_number = 1 + specific_message = "The test has not been launched" + return ( + warning_number, + error_number, + fatal_error, + message_extension, + specific_message, + ) def apply_command_errors(work_dir): # list test directories with errors or warnings # outpout in standard output stream - dir_name = os.path.basename(work_dir) - root_name = os.path.basename(os.path.dirname(work_dir)) - - def print_log_error(message): - print(root_name + " " + dir_name + ": " + message) - - error_message, warning_message = merge_tests_results(work_dir) - if error_message != "": - print_log_error(error_message) - if warning_message != "": - print_log_error(warning_message) - - -def merge_errors_and_warnings(work_dir): - # list test directories with errors or warnings - # output return in string - dir_name = os.path.basename(work_dir) - # output=open(os.path.join(work_dir,work_dir+".compareResults"),'w') - - # def write_log_error(file,message): - # file.write(root_name + " " + dir_name + ": "+message) - output = "" - error_message, warning_message = merge_tests_results(work_dir) - if error_message != "": - output = output + dir_name + " : " + error_message + "\n" - if warning_message != "": - output = output + dir_name + " : " + warning_message + "\n" - if error_message != "" or warning_message != "": - output = output + "\n" - return output + test_dir_name = os.path.basename(work_dir) + family_dir_name = os.path.basename(os.path.dirname(work_dir)) + tool_name = os.path.basename(os.path.dirname(os.path.dirname(work_dir))) + ( + warning_number, + error_number, + fatal_error, + message_extension, + specific_message, + ) = merge_tests_results(work_dir) + if warning_number != 0 or error_number != 0 or fatal_error: + message = "\t" + tool_name + "\t" + message += family_dir_name + "\t" + message += test_dir_name + "\t" + if warning_number > 0: + message += "warnings\t" + str(warning_number) + "\t" + else: + message += "\t\t" + if error_number > 0: + message += "errors\t" + str(error_number) + "\t" + else: + message += "\t\t" + if fatal_error: + message += "FATAL ERROR" + message += "\t" + message_extension + message += "\t" + specific_message + print(message) def apply_command_logs(work_dir): @@ -241,8 +228,14 @@ def apply_command_logs(work_dir): # outpout in standard output stream dir_name = os.path.basename(work_dir) root_name = os.path.basename(os.path.dirname(work_dir)) - error_message, warning_message = merge_tests_results(work_dir) - if error_message != "" or warning_message != "": + ( + warning_number, + error_number, + fatal_error, + message_extension, + specific_message, + ) = merge_tests_results(work_dir) + if warning_number != 0 or error_number != "" or fatal_error: log_file_name = os.path.join(work_dir, "comparisonResults.log") if os.path.isfile(log_file_name): print("==================================================================") @@ -1236,6 +1229,8 @@ def execute_command( if len(test_list) == 0: print("error: no sub-directory is available in " + root_path) exit(0) + # Sort test list + test_list.sort() # Execution de la commande (command_function, command_label) = available_commands[command_id] for name in test_list: @@ -1253,10 +1248,11 @@ def execute_command( os.chdir(work_dir) command_function(work_dir) os.chdir(root_path) - # Message de fin - base_name = os.path.basename(root_path) - test_dir_name = os.path.dirname(root_path) - print("DONE: " + os.path.basename(test_dir_name) + " " + base_name) + # Message synthetique de fin si famille de jeu de tests + family_dir_name = os.path.basename(root_path) + tool_name = os.path.basename(os.path.dirname(root_path)) + if test_dir_name is None: + print("DONE\t" + tool_name + "\t" + family_dir_name) def register_all_commands(): diff --git a/test/LearningTest/cmd/python/apply_command_all.py b/test/LearningTest/cmd/python/apply_command_all.py index 9f938d758..20fdc93f4 100644 --- a/test/LearningTest/cmd/python/apply_command_all.py +++ b/test/LearningTest/cmd/python/apply_command_all.py @@ -3,6 +3,7 @@ import learning_test_env import apply_command import test_khiops +import test_families if __name__ == "__main__": all_commands, standard_command_number = apply_command.register_all_commands() @@ -29,9 +30,25 @@ tool_test_path = os.path.join( learning_test_env.learning_test_root, "LearningTest", test_sub_dir ) - for dir_name in os.listdir(tool_test_path): + # Get standard families to initialize directories to use + test_family = test_families.get_test_family(khiops_tool_name) + used_dir_names = test_family.copy() + # Add unofficial test directories if requested + if include_unofficial_sub_dirs: + # Sort all actual directories by name to ensure stability accross platforms + all_dir_names = os.listdir(tool_test_path) + all_dir_names.sort() + for dir_name in all_dir_names: + if not dir_name in test_family: + # Unofficial directopries are with an '_' in second char (e.g. z_work) + if dir_name.find("_") == 1: + root_path = os.path.join(tool_test_path, dir_name) + if os.path.isdir(root_path): + used_dir_names.append(dir_name) + # Execute command on all used directories + for dir_name in used_dir_names: root_path = os.path.join(tool_test_path, dir_name) if os.path.isdir(root_path): - # Skip unofficial test directories - if include_unofficial_sub_dirs or dir_name.find("_") != 1: - apply_command.execute_command(all_commands, command, root_path) + apply_command.execute_command(all_commands, command, root_path) + else: + print("BUG directory not found: " + root_path) diff --git a/test/LearningTest/cmd/python/check_results.py b/test/LearningTest/cmd/python/check_results.py index 67a3c6e98..9f5c5444f 100644 --- a/test/LearningTest/cmd/python/check_results.py +++ b/test/LearningTest/cmd/python/check_results.py @@ -54,7 +54,6 @@ def initialize_parsers(): def check_results(test): # compare les fichiers 2 a 2 et ecrit les resultat dans le fichier comparisonResults.log - print("--Comparing results...") ref_dir = os.path.join(os.getcwd(), test, "results.ref") if not os.path.isdir(ref_dir): print("reference directory (" + ref_dir + ") not available") @@ -67,11 +66,14 @@ def check_results(test): number_errors = 0 number_warnings = 0 number_files = 0 + missing_result_files = False log_file = open(os.path.join(os.getcwd(), test, "comparisonResults.log"), "w") write_message(log_file, test.upper() + " comparison\n") # Initialisation des parsers initialize_parsers() - # test des fichiers 2 a 2 + # test des fichiers 2 a 2 en memorisant les erreurs par extension + number_errors_in_err_txt = 0 + number_errors_per_extension = {} for file_name in os.listdir(ref_dir): [errors, warnings] = check_file( log_file, @@ -81,6 +83,15 @@ def check_results(test): number_files = number_files + 1 number_errors = number_errors + errors number_warnings = number_warnings + warnings + # Memorisation des statistiques par extension + if errors > 0: + if file_name == "err.txt": + number_errors_in_err_txt += errors + else: + _, file_extension = os.path.splitext(file_name) + number_errors_per_extension[file_extension] = ( + number_errors_per_extension.get(file_extension, 0) + errors + ) # recherche des erreurs fatales fatal_error_files = [ "stdout_error.log", @@ -91,20 +102,21 @@ def check_results(test): if file_name in fatal_error_files: number_fatal_errors = number_fatal_errors + 1 # comparaison du nombre de fichiers - if len(os.listdir(ref_dir)) == 0: + ref_result_file_number = len(os.listdir(ref_dir)) + test_result_file_number = len(os.listdir(test_dir)) + if ref_result_file_number == 0: print_message(log_file, "no comparison: missing reference result files") number_errors = number_errors + 1 - if len(os.listdir(ref_dir)) > 0 and len(os.listdir(ref_dir)) != len( - os.listdir(test_dir) - ): + if ref_result_file_number > 0 and ref_result_file_number != test_result_file_number: print_message( log_file, - "number of results files (" - + str(len(os.listdir(test_dir))) + "\nerror: number of results files (" + + str(test_result_file_number) + ") should be " - + str(len(os.listdir(ref_dir))), + + str(ref_result_file_number), ) number_errors = number_errors + 1 + missing_result_files = test_result_file_number < ref_result_file_number # report errors in err.txt file; if no ref file if len(os.listdir(ref_dir)) == 0: err_file_name = os.path.join(test_dir, "err.txt") @@ -130,10 +142,54 @@ def check_results(test): ) number_errors = number_errors + 1 err_file.close() - print_message(log_file, "\n" + str(number_warnings) + " warning(s)") - print_message(log_file, str(number_errors) + " error(s)") + # Write summary + write_message(log_file, "\nSUMMARY") + write_message(log_file, str(number_warnings) + " warning(s)") + write_message(log_file, str(number_errors) + " error(s)") if number_fatal_errors > 0: - print_message(log_file, "FATAL ERROR") + write_message(log_file, "FATAL ERROR") + # Write additional info related to error per file extension + if number_errors > 0: + # Sort file extensions + file_extensions = [] + for file_extension in number_errors_per_extension: + file_extensions.append(file_extension) + file_extensions.sort() + # Build messages + message_extension = "" + specific_message = "" + if number_errors_in_err_txt > 0: + message_extension += "err.txt" + if len(file_extensions) > 0: + message_extension += ", " + if number_errors_in_err_txt == number_errors: + specific_message = "errors only in err.txt" + if len(file_extensions) > 0: + for i, file_extension in enumerate(file_extensions): + if i > 0: + message_extension += ", " + message_extension += file_extension + if number_errors_per_extension[file_extension] == number_errors: + specific_message = "errors only in " + file_extension + " files" + # Build specific message if number or errors only in err.txt and report files + if specific_message == "": + number_errors_in_report_files = number_errors_per_extension.get( + ".khj", 0 + ) + number_errors_per_extension.get(".khcj", 0) + if ( + number_errors_in_err_txt == number_errors_in_report_files + and number_errors_in_err_txt + number_errors_in_report_files + == number_errors + ): + specific_message = "all errors in err.txt and in json report files with the same number" + # Build specific message in case of missing files + if specific_message == "" and missing_result_files: + specific_message = "Missing result files" + # Write additional messages + if message_extension != "": + write_message(log_file, "Error files: " + message_extension) + if specific_message != "": + write_message(log_file, "Note: " + specific_message) log_file.close() print( @@ -144,7 +200,7 @@ def check_results(test): + " error(s), " + str(number_warnings) + " warning(s)" - + ("\nFATAL ERROR" if number_fatal_errors > 0 else "") + + (", FATAL ERROR" if number_fatal_errors > 0 else "") ) print( "log writed in " @@ -518,10 +574,12 @@ def filter_khiops_temp_dir(value): write_message(log_file, str(warning) + " warning(s) (epsilon difference)") if error == 0: write_message(log_file, "OK") - elif max_threshold > 0: - write_message(log_file, "max relative difference: " + str(max_threshold)) if error > 0: - write_message(log_file, str(error) + " error(s)") + message = str(error) + " error(s)" + if max_threshold > 0: + message += " (max relative difference: " + str(max_threshold) + ")" + write_message(log_file, message) + return [error, warning] @@ -546,7 +604,7 @@ def check_value(val1, val2): # - 1 si les cellules sont identiques # - 2 si les la difference relative est toleree # - 0 si les cellules sont differentes - # - threshold: differe,ce relative si result = 2 + # - threshold: difference relative si result = 2 # Ok si valeurs egales if val1 == val2: return [1, 0] @@ -572,9 +630,9 @@ def check_cell(cell1, cell2): # renvoie deux valeur: # - result: # - 1 si les cellules sont identiques - # - 2 si les la difference relative est toleree - # - 0 si les cellules sont differentes - # - threshold: differe,ce relative si result = 2 + # - 2 si les la difference relative est toleree (warning) + # - 0 si les cellules sont differentes (error) + # - threshold: difference relative liee au cas erreur ou warning if cell1 == cell2: return [1, 0] @@ -653,16 +711,26 @@ def check_cell(cell1, cell2): else: i = 0 length = len(substrings1) - full_eval = 1 - full_threshold = 0 + warnings = 0 + errors = 0 + max_warning_threshold = 0 + max_error_threshold = 0 while i < length: [eval_result, threshold_result] = check_value( substrings1[i], substrings2[i] ) + # Traitement des erreurs if eval_result == 0: - return [0, 0] + errors += 1 + max_error_threshold = max(threshold_result, max_error_threshold) + # Traitement des warnings if eval_result == 2: - full_eval = 2 - full_threshold = max(threshold_result, full_threshold) + warnings += 1 + max_warning_threshold = max(threshold_result, max_warning_threshold) i = i + 1 - return [full_eval, full_threshold] + if errors > 0: + return [0, max_error_threshold] + elif warnings > 0: + return [2, max_warning_threshold] + else: + return [1, 0] diff --git a/test/LearningTest/cmd/python/test_families.py b/test/LearningTest/cmd/python/test_families.py new file mode 100644 index 000000000..0a98c4b10 --- /dev/null +++ b/test/LearningTest/cmd/python/test_families.py @@ -0,0 +1,49 @@ +import os + + +def get_test_family(tool): + """Return list of tes families per tool + Account for 'KhiopsCompleteTests' env var for extended families""" + # Khiops tool + if tool == "Khiops": + test_family = [ + "Standard", + "SideEffects", + "Rules", + "MissingValues", + "Advanced", + "Bugs", + "BugsMultiTables", + "MultipleTargets", + "MultiTables", + "DeployCoclustering", + "SparseData", + "SparseModeling", + "ParallelTask", + "NewPriorV9", + "DTClassification", + "VariableConstruction", + "NewV10", + "KIInterpretation", + "CrashTests", + "SmallInstability", + ] + # V11 "Histograms", + # V11 "HistogramsLimits", + # V11 "TextVariables", + # Following tests are very long, unstable and not useful: + if os.getenv("KhiopsCompleteTests") == "true": + test_family.append("Classification") + test_family.append("MTClassification") + test_family.append("Regression") + test_family.append("ChallengeAutoML") + # V11 test_family.append("TextClassification") + + # Coclustering tool + if tool == "Coclustering": + test_family = ["Standard", "Bugs", "NewPriorV9", "SmallInstability"] + + # KNI tool + if tool == "KNI": + test_family = ["Standard", "MultiTables", "SmallInstability"] + return test_family diff --git a/test/LearningTest/cmd/python/test_khiops.py b/test/LearningTest/cmd/python/test_khiops.py index 9e2cf7f53..80ceee131 100644 --- a/test/LearningTest/cmd/python/test_khiops.py +++ b/test/LearningTest/cmd/python/test_khiops.py @@ -1,3 +1,4 @@ +import platform import os.path import sys import stat @@ -283,10 +284,13 @@ def filter_empty_lines(lines): khiops_params.append(mpiExecPath) if os.name == "nt": khiops_params.append("-l") + if platform.system() == "Darwin": + khiops_params.append("-host") + khiops_params.append("localhost") khiops_params.append("-n") khiops_params.append(khiops_mpi_process_number) khiops_params.append(modl_path) - if os.getenv("KhiopsBatchMode") != "false": + if os.getenv("KhiopsBatchMode") != "lse": khiops_params.append("-b") khiops_params.append("-i") khiops_params.append(os.path.join(os.getcwd(), "test.prm")) diff --git a/test/LearningTest/cmd/python/test_khiops_all.py b/test/LearningTest/cmd/python/test_khiops_all.py index 9eaf7dd5e..f539339c3 100644 --- a/test/LearningTest/cmd/python/test_khiops_all.py +++ b/test/LearningTest/cmd/python/test_khiops_all.py @@ -1,8 +1,8 @@ import learning_test_env import test_khiops +import test_families import os.path import sys - import stat @@ -86,50 +86,18 @@ def test_khiops_tool(tool_name, tool_version, tool_test_dirs): tool = sys.argv[2] # Khiops tool - khiops_tests = [ - "Standard", - "SideEffects", - "Rules", - "MissingValues", - "Advanced", - "Bugs", - "BugsMultiTables", - "MultipleTargets", - "MultiTables", - "DeployCoclustering", - "SparseData", - "SparseModeling", - "ParallelTask", - "NewPriorV9", - "DTClassification", - "VariableConstruction", - "NewV10", - "KIInterpretation", - "CrashTests", - "SmallInstability", - ] - # V11 "Histograms", - # V11 "HistogramsLimits", - # V11 "TextVariables", - - # Following tests are very long, instable and not usefull: - if os.getenv("KhiopsCompleteTests") == "true": - khiops_tests.append("Classification") - khiops_tests.append("TextClassification") - khiops_tests.append("MTClassification") - khiops_tests.append("Regression") - khiops_tests.append("ChallengeAutoML") if tool == "" or tool == "Khiops": + khiops_tests = test_families.get_test_family("Khiops") test_khiops_tool("Khiops", version, khiops_tests) # Coclustering tool - coclustering_tests = ["Standard", "Bugs", "NewPriorV9", "SmallInstability"] if tool == "" or tool == "Coclustering": + coclustering_tests = test_families.get_test_family("Coclustering") test_khiops_tool("Coclustering", version, coclustering_tests) # KNI tool - KNI_tests = ["Standard", "MultiTables", "SmallInstability"] if tool == "" or tool == "KNI": + KNI_tests = test_families.get_test_family("KNI") test_khiops_tool("KNI", version, KNI_tests) print("all tests are done")