From ad0b3d2ecdba2796847d3d12689b061b384ff4ec Mon Sep 17 00:00:00 2001 From: Corentin Allaire Date: Tue, 24 Oct 2023 18:38:18 +0200 Subject: [PATCH] spell check --- .../seed_filter_full_chain.py | 20 +++++++++---------- .../seed_solver_network.py | 2 +- .../train_seed_solver.py | 6 +++--- .../Mlpack/SeedFilterDBScanClustering.hpp | 4 ++-- .../Acts/Plugins/Onnx/SeedClassifier.hpp | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py b/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py index 49eebd03e49..f40ac455ed3 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py @@ -63,7 +63,7 @@ def clusterSeed( @param[in] event: input DataFrame that contain all track in one event @param[in] DBSCAN_eps: minimum radius used by the DBSCAN to cluster track together @param[in] DBSCAN_min_samples: minimum number of tracks needed for DBSCAN to create a cluster - @return: DataFrame identical to the output with an added collumn with the cluster + @return: DataFrame identical to the output with an added column with the cluster """ # Perform the DBSCAN clustering and sort the Db by cluster ID trackDir = event[["eta", "phi", "vertexZ", "pT"]].to_numpy() @@ -152,7 +152,7 @@ def renameCluster(clusterarray: np.ndarray) -> np.ndarray: plt.clf() -# Create historgram filled with the number of seed per cluster +# Create histogram filled with the number of seed per cluster for event in plotData: event["nb_seed"] = 0 event["nb_fake"] = 0 @@ -169,7 +169,7 @@ def renameCluster(clusterarray: np.ndarray) -> np.ndarray: plt.ylabel("nb cluster") plt.savefig("nb_seed.png") plt.clf() - # Create historgram filled with the number of fake seed per cluster + # Create histogram filled with the number of fake seed per cluster event.loc[event["good/duplicate/fake"] == "fake", "nb_fake"] = ( event.loc[event["good/duplicate/fake"] == "fake"] .groupby(["cluster"])["cluster"] @@ -180,7 +180,7 @@ def renameCluster(clusterarray: np.ndarray) -> np.ndarray: plt.ylabel("nb cluster") plt.savefig("nb_fake.png") plt.clf() - # Create historgram filled with the number of duplicate seed per cluster + # Create histogram filled with the number of duplicate seed per cluster event.loc[event["good/duplicate/fake"] == "duplicate", "nb_duplicate"] = ( event.loc[event["good/duplicate/fake"] == "duplicate"] .groupby(["cluster"])["cluster"] @@ -191,7 +191,7 @@ def renameCluster(clusterarray: np.ndarray) -> np.ndarray: plt.ylabel("nb cluster") plt.savefig("nb_duplicate.png") plt.clf() - # Create historgram filled with the number of good seed per cluster + # Create histogram filled with the number of good seed per cluster event.loc[event["good/duplicate/fake"] == "good", "nb_good"] = ( event.loc[event["good/duplicate/fake"] == "good"] .groupby(["cluster"])["cluster"] @@ -202,14 +202,14 @@ def renameCluster(clusterarray: np.ndarray) -> np.ndarray: plt.ylabel("nb cluster") plt.savefig("nb_good.png") plt.clf() - # Create historgram filled with the number of truth particle per cluster + # Create histogram filled with the number of truth particle per cluster event["nb_truth"] = event.groupby(["cluster"])["particleId"].transform("nunique") event["nb_truth"].hist(bins=10, range=[0, 10]) plt.xlabel("nb truth") plt.ylabel("nb cluster") plt.savefig("nb_truth.png") plt.clf() - # Create historgram filled with the number of cluser per truth particle + # Create histogram filled with the number of cluster per truth particle event["nb_cluster"] = event.groupby(event.index)["cluster"].transform("nunique") event["nb_cluster"].hist(bins=30, weights=1 / event["nb_seed"], range=[0, 30]) plt.xlabel("nb cluster") @@ -217,7 +217,7 @@ def renameCluster(clusterarray: np.ndarray) -> np.ndarray: plt.savefig("nb_cluster.png") plt.clf() - # Create historgram filled with the number of good cluser with more than one + # Create histogram filled with the number of good cluster with more than one event["nb_good"].hist(bins=10, weights=(event["nb_seed"] > 1) / event["nb_seed"]) plt.xlabel("nb good cluster with more than 1 seed") plt.ylabel("nb cluster") @@ -234,7 +234,7 @@ def renameCluster(clusterarray: np.ndarray) -> np.ndarray: x = torch.tensor(x_test, dtype=torch.float32) output_predict = duplicateClassifier(x).detach().numpy() - # creat an array of random value between 0 and 1 of the same size as the output + # Create an array of random value between 0 and 1 of the same size as the output # output_predict = np.random.rand(len(x_test)) clusteredEvent["score"] = output_predict @@ -244,7 +244,7 @@ def renameCluster(clusterarray: np.ndarray) -> np.ndarray: ) cleanedEvent = clusteredEvent[idx] - # For each cluster only keep the track with the higest score + # For each cluster only keep the track with the highest score idx = ( cleanedEvent.groupby(["cluster"])["score"].transform(max) == cleanedEvent["score"] diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py b/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py index 1ef18525aa8..bd68f87a5e1 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py @@ -12,7 +12,7 @@ def prepareDataSet(data: pd.DataFrame) -> pd.DataFrame: """Format the dataset that have been written from the Csv file""" """ @param[in] data: input DataFrame containing 1 event - @return: Formated DataFrame + @return: Formatted DataFrame """ data = data data = data.sort_values("good/duplicate/fake", ascending=False) diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py b/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py index 513ca0f96dc..7644c0e52b9 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py @@ -166,7 +166,7 @@ def scoringBatch(batch: list[pd.DataFrame], Optimiser=0) -> tuple[int, int, floa max_match = 1 # loop over all the batch for b_data in batch: - # ID of the current particule + # ID of the current particle pid = b_data[0][0] # loss for the current batch batch_loss = 0 @@ -184,7 +184,7 @@ def scoringBatch(batch: list[pd.DataFrame], Optimiser=0) -> tuple[int, int, floa prediction = duplicateClassifier(input) # loop over all the seed in the batch for index, pred, truth in zip(b_data[0], prediction, b_data[2]): - # If we are changing particle uptade the loss + # If we are changing particle update the loss if index != pid: # Starting a new particles, compute the loss for the previous one if max_match == 0 or max_match == 2: @@ -237,7 +237,7 @@ def scoringBatch(batch: list[pd.DataFrame], Optimiser=0) -> tuple[int, int, floa # Normalise the loss to the batch size batch_loss = batch_loss / len(b_data[0]) loss += batch_loss - # Perform the gradient decend if an optimiser was specified + # Perform the gradient descent if an optimiser was specified if Optimiser: batch_loss.backward() Optimiser.step() diff --git a/Plugins/Mlpack/include/Acts/Plugins/Mlpack/SeedFilterDBScanClustering.hpp b/Plugins/Mlpack/include/Acts/Plugins/Mlpack/SeedFilterDBScanClustering.hpp index 3ff8661d38a..b07c222c5fc 100644 --- a/Plugins/Mlpack/include/Acts/Plugins/Mlpack/SeedFilterDBScanClustering.hpp +++ b/Plugins/Mlpack/include/Acts/Plugins/Mlpack/SeedFilterDBScanClustering.hpp @@ -16,7 +16,7 @@ namespace Acts { -/// Clusterise seed based on their Z position, their direction and their +/// Clusters seed based on their Z position, their direction and their /// momentum using DBScan /// /// @param input : Input parameters for the clustering (phi, eta, z, Pt/10) @@ -26,7 +26,7 @@ namespace Acts { std::vector> dbscanSeedClustering( const std::vector>& input, float epsilon = 0.07, int minPoints = 2) { - // DBSCAN algoritm from MLpack used in the seed clustering + // DBSCAN algorithm from MLpack used in the seed clustering mlpack::DBSCAN dbscan(epsilon, minPoints); // Compute the space dimension of the input diff --git a/Plugins/Onnx/include/Acts/Plugins/Onnx/SeedClassifier.hpp b/Plugins/Onnx/include/Acts/Plugins/Onnx/SeedClassifier.hpp index bd2878d49d4..f8d493f07f1 100644 --- a/Plugins/Onnx/include/Acts/Plugins/Onnx/SeedClassifier.hpp +++ b/Plugins/Onnx/include/Acts/Plugins/Onnx/SeedClassifier.hpp @@ -70,7 +70,7 @@ class SeedClassifier { /// @param clusters is a map of clusters, each cluster correspond to a vector of seed ID /// @param networkInput input of the network /// @return a vector of seedID corresponding the the good seeds - std::vector solveAmbuguity(std::vector>& clusters, + std::vector solveAmbiguity(std::vector>& clusters, Acts::NetworkBatchInput& networkInput) const { std::vector> outputTensor = inferScores(networkInput); std::vector goodSeeds = seedSelection(clusters, outputTensor); @@ -78,7 +78,7 @@ class SeedClassifier { } private: - // ONNX environement + // ONNX environment Ort::Env m_env; // ONNX model for the duplicate neural network Acts::OnnxRuntimeBase m_duplicateClassifier;