diff --git a/README.md b/README.md index 28faabf..5754785 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,12 @@ python utils/correctness.py --alg brute --csv tiny module load python python utils/visualizer.py tiny build/brute.out +# For Timing Comparison Graph + +module load python +python utils/comparisons.py (tries all implementations) +python utils/comparisons.py greedy dp (only compares greedy and dp) + # References https://www.kaggle.com/datasets/mexwell/traveling-salesman-problem/data \ No newline at end of file diff --git a/algorithms/greedy.cpp b/algorithms/greedy.cpp index e69de29..8a9154f 100644 --- a/algorithms/greedy.cpp +++ b/algorithms/greedy.cpp @@ -0,0 +1,59 @@ +#include +#include "../common/algorithms.hpp" + +using namespace std; + +// Helper function to calculate Euclidean distance +double distance(const std::pair& p1, const std::pair& p2) { + return std::sqrt(std::pow(p1.first - p2.first, 2) + std::pow(p1.second - p2.second, 2)); +} + +TSPResult solve(const std::vector>& coordinates) { + int n = coordinates.size(); + + // Compute distance matrix + vector> distances(n, vector(n)); + for (int i = 0; i < n; i++) { + for (int j = 0; j < n; j++) { + distances[i][j] = distance(coordinates[i], coordinates[j]); + } + } + + // Initialize variables for greedy algorithm + vector visited(n, false); + vector path; + double total_cost = 0.0; + + // Start from city 0 + int current_city = 0; + path.push_back(current_city); + visited[current_city] = true; + + // Visit remaining n-1 cities + while (path.size() < n) { + double min_distance = numeric_limits::infinity(); + int next_city = -1; + + // Find nearest unvisited city + for (int j = 0; j < n; j++) { + if (!visited[j] && distances[current_city][j] < min_distance) { + min_distance = distances[current_city][j]; + next_city = j; + } + } + + // Add nearest city to path + current_city = next_city; + path.push_back(current_city); + visited[current_city] = true; + total_cost += min_distance; + } + + // Add cost of returning to start + total_cost += distances[path.back()][path[0]]; + + TSPResult result; + result.cost = total_cost; + result.path = path; + return result; +} \ No newline at end of file diff --git a/utils/comparisons.py b/utils/comparisons.py new file mode 100644 index 0000000..3032e20 --- /dev/null +++ b/utils/comparisons.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +import subprocess +import time +import argparse +import matplotlib.pyplot as plt + +def run_implementation(exec_path: str, dataset: str) -> float: + """Run implementation with dataset and return execution time""" + try: + start = time.time() + result = subprocess.run([exec_path, '--csv', f'data/{dataset}.csv'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + end = time.time() + return end - start + except Exception as e: + print(f"Error running {exec_path} with {dataset}: {e}") + return -1 + +def plot_results(results): + """Plot timing results for all implementations""" + plt.figure(figsize=(12, 8)) + + colors = { + 'brute': 'b', + 'greedy': 'g', + 'genetic': 'r', + 'dp': 'purple', + 'greedy_omp': 'y' + } + + datasets = ['tiny', 'small', 'medium', 'large','huge','gigantic'] + x_pos = range(len(datasets)) + + for impl, timings in results.items(): + if timings: # Only plot if we have data + plt.plot(x_pos, [timings[d] for d in datasets], + f'{colors[impl]}-o', + label=f'{impl.capitalize()} Implementation') + + plt.xticks(x_pos, datasets) + plt.xlabel('Dataset Size') + plt.ylabel('Execution Time (seconds)') + plt.title('TSP Implementation Performance Comparison') + plt.legend() + plt.grid(True) + plt.yscale('log') # Log scale for better visibility + + plt.savefig('comparison_results.png') + print("Plot saved as comparison_results.png") + + try: + plt.show() + except Exception as e: + print(f"Could not display plot: {e}") + +if __name__ == "__main__": + all_implementations = ['brute', 'greedy', 'genetic', 'dp', 'greedy_omp'] + + parser = argparse.ArgumentParser(description='Compare TSP implementations') + parser.add_argument('implementations', + nargs='*', + choices=all_implementations, + help='Implementations to compare. If none specified, runs all.') + + args = parser.parse_args() + + # Select implementations to run + implementations = args.implementations if args.implementations else all_implementations + datasets = ['tiny', 'small', 'medium', 'large','huge','gigantic'] + + results = {} + + # Run tests for each implementation and dataset + for impl in implementations: + results[impl] = {} + exec_path = f'./build/{impl}' + + for dataset in datasets: + print(f"Running {impl} on {dataset} dataset...") + execution_time = run_implementation(exec_path, dataset) + + if execution_time >= 0: + results[impl][dataset] = execution_time + print(f"{impl.capitalize()}, {dataset}: {execution_time:.6f} seconds") + + # Plot results + plot_results(results) \ No newline at end of file