Skip to content

Commit

Permalink
gh-158: add bfs example app (with running valid cpu & ref impls)
Browse files Browse the repository at this point in the history
  • Loading branch information
EgorOrachyov committed Oct 13, 2022
1 parent fd12440 commit c800d05
Show file tree
Hide file tree
Showing 21 changed files with 273 additions and 46 deletions.
72 changes: 72 additions & 0 deletions examples/bfs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,12 @@
/* SOFTWARE. */
/**********************************************************************************/

#include "common.hpp"
#include "options.hpp"

#include <spla/spla.hpp>


int main(int argc, const char* const* argv) {
std::shared_ptr<cxxopts::Options> options = make_options("bfs", "bfs (breadth first search) algorithm with spla library");
cxxopts::ParseResult args;
Expand All @@ -39,12 +41,82 @@ int main(int argc, const char* const* argv) {
return ret;
}

spla::Timer timer;
spla::Timer timer_cpu;
spla::Timer timer_gpu;
spla::Timer timer_ref;
spla::MtxLoader loader;

timer.start();

if (!loader.load(args[OPT_MTXPATH].as<std::string>())) {
std::cerr << "failed to load graph";
return 1;
}

const spla::uint N = loader.get_n_rows();
const spla::uint s = args[OPT_SOURCE].as<int>();
spla::ref_ptr<spla::Vector> v = spla::make_vector(N, spla::INT);
spla::ref_ptr<spla::Matrix> A = spla::make_matrix(N, N, spla::INT);

const auto& Ai = loader.get_Ai();
const auto& Aj = loader.get_Aj();

for (std::size_t k = 0; k < loader.get_n_values(); ++k) {
A->set_int(Ai[k], Aj[k], 1);
}

const int n_iters = args[OPT_NITERS].as<int>();

if (args[OPT_RUN_CPU].as<bool>()) {
for (int i = 0; i < n_iters; ++i) {
v->clear();

timer_cpu.lap_begin();
spla::bfs(v, A, s, spla::ref_ptr<spla::Descriptor>());
timer_cpu.lap_end();
}
}

if (args[OPT_RUN_GPU].as<bool>()) {
for (int i = 0; i < n_iters; ++i) {
v->clear();

timer_gpu.lap_begin();
spla::bfs(v, A, s, spla::ref_ptr<spla::Descriptor>());
timer_gpu.lap_end();
}
}

if (args[OPT_RUN_REF].as<bool>()) {
std::vector<int> ref_v(N);
std::vector<std::vector<spla::uint>> ref_A(N, std::vector<spla::uint>());

for (std::size_t k = 0; k < loader.get_n_values(); ++k) {
ref_A[Ai[k]].push_back(Aj[k]);
}

timer_ref.lap_begin();
spla::bfs_naive(ref_v, ref_A, s, spla::ref_ptr<spla::Descriptor>());
timer_ref.lap_end();

verify_exact(v, ref_v);
}

spla::get_library()->finalize();

timer.stop();

std::cout << "total(ms): " << timer.get_elapsed_ms() << std::endl;
std::cout << "cpu(ms): ";
output_time(timer_cpu);
std::cout << std::endl;
std::cout << "gpu(ms): ";
output_time(timer_gpu);
std::cout << std::endl;
std::cout << "ref(ms): ";
output_time(timer_ref);
std::cout << std::endl;

return 0;
}
57 changes: 57 additions & 0 deletions examples/common.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
/**********************************************************************************/
/* This file is part of spla project */
/* https://github.com/JetBrains-Research/spla */
/**********************************************************************************/
/* MIT License */
/* */
/* Copyright (c) 2021 JetBrains-Research */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining a copy */
/* of this software and associated documentation files (the "Software"), to deal */
/* in the Software without restriction, including without limitation the rights */
/* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
/* copies of the Software, and to permit persons to whom the Software is */
/* furnished to do so, subject to the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be included in all */
/* copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
/* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
/* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
/* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
/* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
/* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */
/* SOFTWARE. */
/**********************************************************************************/

#ifndef SPLA_COMMON_HPP
#define SPLA_COMMON_HPP

#include <spla/spla.hpp>

#include <iostream>
#include <vector>

void verify_exact(const spla::ref_ptr<spla::Vector>& a, const std::vector<int>& b) {
const auto N = a->get_n_rows();
for (spla::uint i = 0; i < N; i++) {
int expected = b[i];
int actual;
a->get_int(i, actual);

assert(expected == actual);

if (expected != actual) {
std::cerr << " VERIFY: expected " << expected << " actual" << actual << std::endl;
}
}
}

void output_time(const spla::Timer& timer) {
for (auto t : timer.get_laps_ms()) {
std::cout << t << ",";
}
}

#endif//SPLA_COMMON_HPP
10 changes: 9 additions & 1 deletion examples/options.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,21 @@

#define OPT_MTXPATH "mtxpath"
#define OPT_NITERS "niters"
#define OPT_SOURCE "source"
#define OPT_RUN_REF "run-ref"
#define OPT_RUN_CPU "run-cpu"
#define OPT_RUN_GPU "run-gpu"


std::shared_ptr<cxxopts::Options> make_options(const std::string& name, const std::string& desc) {
std::shared_ptr<cxxopts::Options> options = std::make_shared<cxxopts::Options>(name, desc);
options->add_option("", cxxopts::Option("h,help", "display help info", cxxopts::value<bool>()->default_value("false")));
options->add_option("", cxxopts::Option(OPT_MTXPATH, "path to matrix file", cxxopts::value<std::string>()));
options->add_option("", cxxopts::Option(OPT_NITERS, "number of iterations to run", cxxopts::value<int>()->default_value("4")));
options->add_option("", cxxopts::Option("source", "source vertex to run", cxxopts::value<int>()->default_value("0")));
options->add_option("", cxxopts::Option(OPT_SOURCE, "source vertex to run", cxxopts::value<int>()->default_value("0")));
options->add_option("", cxxopts::Option(OPT_RUN_REF, "check validity running naive version", cxxopts::value<bool>()->default_value("true")));
options->add_option("", cxxopts::Option(OPT_RUN_CPU, "run algo with cpu backend", cxxopts::value<bool>()->default_value("true")));
options->add_option("", cxxopts::Option(OPT_RUN_GPU, "run algo with gpu (acc) backend", cxxopts::value<bool>()->default_value("true")));
options->add_option("", cxxopts::Option("undirected", "force graph to be undirected", cxxopts::value<bool>()->default_value("false")));
options->add_option("", cxxopts::Option("platform", "id of platform to run", cxxopts::value<int>()->default_value("0")));
options->add_option("", cxxopts::Option("devices", "id of device to run", cxxopts::value<int>()->default_value("0")));
Expand Down
15 changes: 15 additions & 0 deletions include/spla/algorithm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,21 @@ namespace spla {
uint s,
const ref_ptr<Descriptor>& descriptor);

/**
* @brief Naive breadth-first search algorithm
*
* @param v int vector to store reached distances
* @param A int graph adjacency lists filled with 1 where exist edge from i to j
* @param s start vertex id to search
* @param descriptor optional descriptor for algorithm
*
* @return ok on success
*/
SPLA_API Status bfs_naive(std::vector<int>& v,
std::vector<std::vector<spla::uint>>& A,
uint s,
const ref_ptr<Descriptor>& descriptor);

/**
* @}
*/
Expand Down
3 changes: 3 additions & 0 deletions include/spla/op.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,9 @@ namespace spla {
SPLA_API extern ref_ptr<OpBinary> MAX_UINT;
SPLA_API extern ref_ptr<OpBinary> MAX_FLOAT;

SPLA_API extern ref_ptr<OpBinary> OR_INT;
SPLA_API extern ref_ptr<OpBinary> OR_UINT;

SPLA_API extern ref_ptr<OpSelect> GZERO_INT;
SPLA_API extern ref_ptr<OpSelect> GZERO_UINT;
SPLA_API extern ref_ptr<OpSelect> GZERO_FLOAT;
Expand Down
2 changes: 2 additions & 0 deletions include/spla/schedule.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ namespace spla {
* @param v
* @param op_multiply
* @param op_add
* @param init
* @param opt_complement
* @param desc Scheduled task descriptor; default is null
*
Expand All @@ -112,6 +113,7 @@ namespace spla {
ref_ptr<Vector> v,
ref_ptr<OpBinary> op_multiply,
ref_ptr<OpBinary> op_add,
ref_ptr<Scalar> init,
bool opt_complement,
ref_ptr<Descriptor> desc = ref_ptr<Descriptor>());

Expand Down
3 changes: 2 additions & 1 deletion include/spla/timer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ namespace spla {

SPLA_API void start();
SPLA_API void stop();
SPLA_API void lap();
SPLA_API void lap_begin();
SPLA_API void lap_end();
[[nodiscard]] SPLA_API double get_elapsed_ms() const;
[[nodiscard]] SPLA_API double get_elapsed_lap_ms() const;
[[nodiscard]] SPLA_API const std::vector<double>& get_laps_ms() const;
Expand Down
58 changes: 47 additions & 11 deletions src/algorithm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@
#include <spla/op.hpp>
#include <spla/schedule.hpp>

#include <algorithm>
#include <cassert>
#include <queue>

namespace spla {

Expand All @@ -40,36 +42,70 @@ namespace spla {
assert(v);
assert(A);

auto N = v->get_n_rows();
const auto N = v->get_n_rows();

ref_ptr<Vector> frontier = make_vector(N, INT);
ref_ptr<Vector> frontier_prev = make_vector(N, INT);
ref_ptr<Vector> frontier_new = make_vector(N, INT);
ref_ptr<Scalar> frontier_size = make_int(1);
ref_ptr<Scalar> depth = make_int(1);
ref_ptr<Scalar> zero = make_int(0);
int current_level = 1;
bool frontier_empty = false;
bool complement = true;

ref_ptr<Schedule> bfs_body = make_schedule();
ref_ptr<ScheduleTask> bfs_assign = make_sched_v_assign_masked(v, frontier, depth, SECOND_INT);
ref_ptr<ScheduleTask> bfs_step = make_sched_mxv_masked(frontier, v, A, frontier, MULT_INT, PLUS_INT, complement);
ref_ptr<ScheduleTask> bfs_check = make_sched_v_reduce(frontier_size, zero, frontier, PLUS_INT);
bfs_body->step_task(bfs_assign);
bfs_body->step_task(bfs_step);
bfs_body->step_task(bfs_check);

frontier->set_int(s, 1);
frontier_prev->set_int(s, 1);

while (!frontier_empty) {
depth->set_int(current_level);

ref_ptr<Schedule> bfs_body = make_schedule();
ref_ptr<ScheduleTask> bfs_assign = make_sched_v_assign_masked(v, frontier_prev, depth, SECOND_INT);
ref_ptr<ScheduleTask> bfs_step = make_sched_mxv_masked(frontier_new, v, A, frontier_prev, MULT_INT, OR_INT, zero, complement);
ref_ptr<ScheduleTask> bfs_check = make_sched_v_reduce(frontier_size, zero, frontier_new, PLUS_INT);
bfs_body->step_task(bfs_assign);
bfs_body->step_task(bfs_step);
bfs_body->step_task(bfs_check);
bfs_body->submit();

int observed_vertices;
frontier_size->get_int(observed_vertices);

frontier_empty = observed_vertices == 0;
current_level += 1;

std::swap(frontier_prev, frontier_new);
}

return Status::Ok;
}

Status bfs_naive(std::vector<int>& v,
std::vector<std::vector<spla::uint>>& A,
uint s,
const ref_ptr<Descriptor>& descriptor) {

const auto N = v.size();

std::queue<uint> front;
std::vector<bool> visited(N, false);

std::fill(v.begin(), v.end(), 0);

front.push(s);
visited[s] = true;
v[s] = 1;

while (!front.empty()) {
auto i = front.front();
front.pop();

for (auto j : A[i]) {
if (!visited[j]) {
visited[j] = true;
v[j] = v[i] + 1;
front.push(j);
}
}
}

return Status::Ok;
Expand Down
2 changes: 0 additions & 2 deletions src/core/dispatcher.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ namespace spla {
algo = g_reg->find(key_acc);

if (algo) {
LOG_MSG(Status::Ok, "found acc algo " << algo->get_name());
return algo->execute(ctx);
}
}
Expand All @@ -54,7 +53,6 @@ namespace spla {
algo = g_reg->find(key_cpu);

if (algo) {
LOG_MSG(Status::Ok, "found cpu algo " << algo->get_name());
return algo->execute(ctx);
}

Expand Down
8 changes: 4 additions & 4 deletions src/core/tscalar.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,25 +106,25 @@ namespace spla {

template<typename T>
Status TScalar<T>::get_byte(std::int8_t& value) {
value = static_cast<T>(m_value);
value = static_cast<std::int8_t>(m_value);
return Status::Ok;
}

template<typename T>
Status TScalar<T>::get_int(std::int32_t& value) {
value = static_cast<T>(m_value);
value = static_cast<std::int32_t>(m_value);
return Status::Ok;
}

template<typename T>
Status TScalar<T>::get_uint(std::uint32_t& value) {
value = static_cast<T>(m_value);
value = static_cast<std::uint32_t>(m_value);
return Status::Ok;
}

template<typename T>
Status TScalar<T>::get_float(float& value) {
value = static_cast<T>(m_value);
value = static_cast<float>(m_value);
return Status::Ok;
}

Expand Down
Loading

0 comments on commit c800d05

Please sign in to comment.