From 8173bb775d49bb8a7687cf6dee07659da5cacf66 Mon Sep 17 00:00:00 2001 From: Leo Auri Date: Mon, 11 Mar 2024 16:28:07 +1030 Subject: [PATCH 1/3] Catch exceptions around input tensor size --- src/backend/backend.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/backend/backend.cpp b/src/backend/backend.cpp index f243bdb..2a4ceb8 100644 --- a/src/backend/backend.cpp +++ b/src/backend/backend.cpp @@ -41,13 +41,18 @@ void Backend::perform(std::vector in_buffer, } auto cat_tensor_in = torch::cat(tensor_in, 1); - cat_tensor_in = cat_tensor_in.reshape({in_dim, n_batches, -1, in_ratio}); - cat_tensor_in = cat_tensor_in.select(-1, -1); - cat_tensor_in = cat_tensor_in.permute({1, 0, 2}); - // std::cout << cat_tensor_in.size(0) << ";" << cat_tensor_in.size(1) << ";" << cat_tensor_in.size(2) << std::endl; - // for (int i = 0; i < cat_tensor_in.size(1); i++ ) - // std::cout << cat_tensor_in[0][i][0] << ";"; - // std::cout << std::endl; + try { + cat_tensor_in = cat_tensor_in.reshape({in_dim, n_batches, -1, in_ratio}); + cat_tensor_in = cat_tensor_in.select(-1, -1); + cat_tensor_in = cat_tensor_in.permute({1, 0, 2}); + // std::cout << cat_tensor_in.size(0) << ";" << cat_tensor_in.size(1) << ";" << cat_tensor_in.size(2) << std::endl; + // for (int i = 0; i < cat_tensor_in.size(1); i++ ) + // std::cout << cat_tensor_in[0][i][0] << ";"; + // std::cout << std::endl; + } catch (const std::exception &e) { + std::cout << e.what() << std::endl; + return; + } // SEND TENSOR TO DEVICE std::unique_lock model_lock(m_model_mutex); From 44940ae60fe0cda9010d9278de10abee9649d847 Mon Sep 17 00:00:00 2001 From: Leo Auri Date: Mon, 18 Mar 2024 15:40:19 +1030 Subject: [PATCH 2/3] Handle ratios not a factor of audio buffer --- .gitignore | 3 +- src/backend/backend.cpp | 88 ++++++++++++++++++++--- src/backend/backend.h | 5 ++ src/frontend/maxmsp/nn_tilde/nn_tilde.cpp | 2 + 4 files changed, 86 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index 853d1c7..87ec03d 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ src/tests src/docs *.ts dist/ -*.egg-info* \ No newline at end of file +*.egg-info* +tags \ No newline at end of file diff --git a/src/backend/backend.cpp b/src/backend/backend.cpp index 2a4ceb8..91dfae4 100644 --- a/src/backend/backend.cpp +++ b/src/backend/backend.cpp @@ -8,7 +8,13 @@ #define CUDA torch::kCUDA #define MPS torch::kMPS -Backend::Backend() : m_loaded(0), m_device(CPU), m_use_gpu(false) { +namespace F = torch::nn::functional; +namespace TI = torch::indexing; + +Backend::Backend() + : m_loaded(0), m_device(CPU), m_use_gpu(false), in_cursor(0), + out_cursor(0) { + at::init_num_threads(); } @@ -41,9 +47,34 @@ void Backend::perform(std::vector in_buffer, } auto cat_tensor_in = torch::cat(tensor_in, 1); + + // pad tensor to be broadcastable with in_ratio + int tensor_in_size = cat_tensor_in.size(-1); + int ciel_mult = ((tensor_in_size + in_ratio - 1) / in_ratio) * in_ratio; + int pad = ciel_mult - tensor_in_size; + // std::cout << pad << std::endl; try { + cat_tensor_in = F::pad(cat_tensor_in, F::PadFuncOptions({0, pad})); + cat_tensor_in = cat_tensor_in.reshape({in_dim, n_batches, -1, in_ratio}); - cat_tensor_in = cat_tensor_in.select(-1, -1); + + // select slice for input + cat_tensor_in = cat_tensor_in.select(-1, in_cursor); + + // trim end when padding included + cat_tensor_in = cat_tensor_in.index( + { + "...", + TI::Slice(0, (tensor_in_size - in_cursor + in_ratio - 1) / in_ratio) + } + ); + std::cout << "Tensor in size: " << cat_tensor_in.size(-1) << std::endl; + + // move cursor + in_cursor += ciel_mult - tensor_in_size; + in_cursor %= in_ratio; + std::cout << "In cursor: " << in_cursor << std::endl; + cat_tensor_in = cat_tensor_in.permute({1, 0, 2}); // std::cout << cat_tensor_in.size(0) << ";" << cat_tensor_in.size(1) << ";" << cat_tensor_in.size(2) << std::endl; // for (int i = 0; i < cat_tensor_in.size(1); i++ ) @@ -87,19 +118,45 @@ void Backend::perform(std::vector in_buffer, return; } - if (out_n_vec != n_vec) { - std::cout << "model output size is not consistent, expected " << n_vec - << " samples, got " << out_n_vec << "!\n"; - return; - } + // if (out_n_vec != n_vec) { + // std::cout << "model output size is not consistent, expected " << n_vec + // << " samples, got " << out_n_vec << "!\n"; + // return; + // } + std::cout << "Tensor out size: " << out_n_vec << ", output vector size: " + << n_vec << std::endl; tensor_out = tensor_out.to(CPU); tensor_out = tensor_out.reshape({out_batches * out_channels, -1}); - auto out_ptr = tensor_out.contiguous().data_ptr(); - for (int i(0); i < out_buffer.size(); i++) { - memcpy(out_buffer[i], out_ptr + i * n_vec, n_vec * sizeof(float)); - } + // split tensor into current and future buffer parts + // copy future part to new tensor + auto tensor_future = tensor_out.index({TI::Slice(n_vec - out_cursor, TI::None)}); + tensor_out = tensor_out.index({TI::Slice(0, n_vec - out_cursor)}); + + // Copy data from future buffer into output buffer + for (int i(0); i < out_buffer.size(); i++) + memcpy(out_buffer[i], future_buffer[i].get(), out_cursor * sizeof(float)); + + // Fill rest of output buffer with tensor values + auto out_ptr = tensor_out.contiguous().data_ptr(); + for (int i(0); i < out_buffer.size(); i++) + memcpy( + out_buffer[i] + out_cursor, + out_ptr + i * (n_vec - out_cursor), + (n_vec - out_cursor) * sizeof(float) + ); + + // Copy remaining tensor values to future buffer and set out cursor + auto fut_ptr = tensor_future.contiguous().data_ptr(); + out_cursor += out_n_vec - n_vec; + for (int i(0); i < out_buffer.size(); i++) + memcpy( + future_buffer[i].get(), + fut_ptr + i * out_cursor, + out_cursor * sizeof(float) + ); + std::cout << "Out cursor: " << out_cursor << std::endl; } int Backend::load(std::string path) { @@ -127,6 +184,15 @@ int Backend::reload() { return return_code; } +void Backend::prepare(int chans, std::string method) { + // future buffer should be preallocated with out_dim * batches arrays + // of size in_ratio * out_ratio + future_buffer.clear(); + auto params = get_method_params(method); + for (; chans > 0; chans--) + future_buffer.push_back(std::make_unique(params[1] * params[3])); +} + bool Backend::has_method(std::string method_name) { std::unique_lock model_lock(m_model_mutex); for (const auto &m : m_model.get_methods()) { diff --git a/src/backend/backend.h b/src/backend/backend.h index 52905eb..ce4bfcc 100644 --- a/src/backend/backend.h +++ b/src/backend/backend.h @@ -15,6 +15,10 @@ class Backend { c10::DeviceType m_device; bool m_use_gpu; + size_t in_cursor; + size_t out_cursor; + std::vector> future_buffer; + public: Backend(); void perform(std::vector in_buffer, std::vector out_buffer, @@ -33,6 +37,7 @@ class Backend { int get_higher_ratio(); int load(std::string path); int reload(); + void prepare(int chans, std::string method); bool is_loaded(); torch::jit::script::Module get_model() { return m_model; } void use_gpu(bool value); diff --git a/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp b/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp index a091e29..23390a0 100644 --- a/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp +++ b/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp @@ -288,6 +288,8 @@ nn::nn(const atoms &args) m_out_model.push_back(std::make_unique(m_buffer_size)); } + m_model->prepare(m_out_dim, m_method); + if (m_use_thread) m_compute_thread = std::make_unique(model_perform_loop, this); } From 7829787d6c86437ddd0071f6013903a6d3152ab5 Mon Sep 17 00:00:00 2001 From: Leo Auri Date: Mon, 18 Mar 2024 21:50:55 +1030 Subject: [PATCH 3/3] Fix tensor slicing --- src/backend/backend.cpp | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/src/backend/backend.cpp b/src/backend/backend.cpp index 91dfae4..97a3a8d 100644 --- a/src/backend/backend.cpp +++ b/src/backend/backend.cpp @@ -68,12 +68,12 @@ void Backend::perform(std::vector in_buffer, TI::Slice(0, (tensor_in_size - in_cursor + in_ratio - 1) / in_ratio) } ); - std::cout << "Tensor in size: " << cat_tensor_in.size(-1) << std::endl; + // std::cout << "Tensor in size: " << cat_tensor_in.size(-1) << std::endl; // move cursor in_cursor += ciel_mult - tensor_in_size; in_cursor %= in_ratio; - std::cout << "In cursor: " << in_cursor << std::endl; + // std::cout << "In cursor: " << in_cursor << std::endl; cat_tensor_in = cat_tensor_in.permute({1, 0, 2}); // std::cout << cat_tensor_in.size(0) << ";" << cat_tensor_in.size(1) << ";" << cat_tensor_in.size(2) << std::endl; @@ -123,16 +123,23 @@ void Backend::perform(std::vector in_buffer, // << " samples, got " << out_n_vec << "!\n"; // return; // } - std::cout << "Tensor out size: " << out_n_vec << ", output vector size: " - << n_vec << std::endl; + + // std::cout << "Tensor out size: " << out_n_vec << ", output vector size: " + // << n_vec << std::endl; tensor_out = tensor_out.to(CPU); tensor_out = tensor_out.reshape({out_batches * out_channels, -1}); // split tensor into current and future buffer parts // copy future part to new tensor - auto tensor_future = tensor_out.index({TI::Slice(n_vec - out_cursor, TI::None)}); - tensor_out = tensor_out.index({TI::Slice(0, n_vec - out_cursor)}); + auto tensor_future = tensor_out.index({ + "...", TI::Slice(n_vec - out_cursor, TI::None) + }); + tensor_out = tensor_out.index({ + "...", TI::Slice(0, n_vec - out_cursor) + }); + // std::cout << "Out tensor shape: " << tensor_out.sizes() + // << ", future tensor shape: " << tensor_future.sizes() << std::endl; // Copy data from future buffer into output buffer for (int i(0); i < out_buffer.size(); i++) @@ -156,7 +163,7 @@ void Backend::perform(std::vector in_buffer, fut_ptr + i * out_cursor, out_cursor * sizeof(float) ); - std::cout << "Out cursor: " << out_cursor << std::endl; + // std::cout << "Out cursor: " << out_cursor << std::endl; } int Backend::load(std::string path) {