diff --git a/CMakeLists.txt b/CMakeLists.txt index 016e74354d..5bc32da5b0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,25 +12,25 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -O3") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -march=native") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -march=native") -# Check C++11 or C++0x support +# Check C++14 or C++0x support include(CheckCXXCompilerFlag) -CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) +CHECK_CXX_COMPILER_FLAG("-std=c++14" COMPILER_SUPPORTS_CXX14) CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) -if(COMPILER_SUPPORTS_CXX11) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") - add_definitions(-DCOMPILEDWITHC11) - message(STATUS "Using flag -std=c++11.") +if(COMPILER_SUPPORTS_CXX14) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") + add_definitions(-DCOMPILEDWITHC14) + message(STATUS "Using flag -std=c++14.") elseif(COMPILER_SUPPORTS_CXX0X) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") add_definitions(-DCOMPILEDWITHC0X) message(STATUS "Using flag -std=c++0x.") else() - message(FATAL_ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") + message(FATAL_ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++14 support. Please use a different C++ compiler.") endif() LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules) -find_package(OpenCV 4.4) +find_package(OpenCV) if(NOT OpenCV_FOUND) message(FATAL_ERROR "OpenCV > 4.4 not found.") endif() @@ -119,7 +119,7 @@ target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS} ${EIGEN3_LIBS} ${Pangolin_LIBRARIES} -${PROJECT_SOURCE_DIR}/Thirdparty/DBoW2/lib/libDBoW2.so +${PROJECT_SOURCE_DIR}/Thirdparty/DBoW2/lib/libDBoW3.so ${PROJECT_SOURCE_DIR}/Thirdparty/g2o/lib/libg2o.so -lboost_serialization -lcrypto diff --git a/Examples/Monocular-Inertial/mono_inertial_euroc.cc b/Examples/Monocular-Inertial/mono_inertial_euroc.cc index b9f320fd76..c389d09f30 100644 --- a/Examples/Monocular-Inertial/mono_inertial_euroc.cc +++ b/Examples/Monocular-Inertial/mono_inertial_euroc.cc @@ -148,7 +148,7 @@ int main(int argc, char *argv[]) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -158,7 +158,7 @@ int main(int argc, char *argv[]) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -184,7 +184,7 @@ int main(int argc, char *argv[]) } } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -194,7 +194,7 @@ int main(int argc, char *argv[]) // cout << "tframe = " << tframe << endl; SLAM.TrackMonocular(im,tframe,vImuMeas); // TODO change to monocular_inertial - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular-Inertial/mono_inertial_realsense_D435i.cc b/Examples/Monocular-Inertial/mono_inertial_realsense_D435i.cc index c8bf1ff589..9c8b245b12 100644 --- a/Examples/Monocular-Inertial/mono_inertial_realsense_D435i.cc +++ b/Examples/Monocular-Inertial/mono_inertial_realsense_D435i.cc @@ -314,7 +314,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -365,7 +365,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -375,7 +375,7 @@ int main(int argc, char **argv) { int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -386,7 +386,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -395,7 +395,7 @@ int main(int argc, char **argv) { // Pass the image to the SLAM system SLAM.TrackMonocular(im, timestamp, vImuMeas); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular-Inertial/mono_inertial_realsense_t265.cc b/Examples/Monocular-Inertial/mono_inertial_realsense_t265.cc index b4575b6acb..71fae5dc58 100644 --- a/Examples/Monocular-Inertial/mono_inertial_realsense_t265.cc +++ b/Examples/Monocular-Inertial/mono_inertial_realsense_t265.cc @@ -230,7 +230,7 @@ int main(int argc, char **argv) while(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -257,7 +257,7 @@ int main(int argc, char **argv) else { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -267,7 +267,7 @@ int main(int argc, char **argv) int height = imCV.rows * imageScale; cv::resize(imCV, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -308,7 +308,7 @@ int main(int argc, char **argv) } } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -316,7 +316,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackMonocular(im, timestamp, vImuMeas); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular-Inertial/mono_inertial_tum_vi.cc b/Examples/Monocular-Inertial/mono_inertial_tum_vi.cc index bd84ce0012..f3352d4b7a 100644 --- a/Examples/Monocular-Inertial/mono_inertial_tum_vi.cc +++ b/Examples/Monocular-Inertial/mono_inertial_tum_vi.cc @@ -172,7 +172,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -182,7 +182,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -195,7 +195,7 @@ int main(int argc, char **argv) // cout << "first imu: " << first_imu[seq] << endl; /*cout << "first imu time: " << fixed << vTimestampsImu[first_imu] << endl; cout << "size vImu: " << vImuMeas.size() << endl;*/ - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -205,7 +205,7 @@ int main(int argc, char **argv) // cout << "tframe = " << tframe << endl; SLAM.TrackMonocular(im,tframe,vImuMeas); // TODO change to monocular_inertial - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular/mono_euroc.cc b/Examples/Monocular/mono_euroc.cc index 3a233129be..7f54f5b68c 100644 --- a/Examples/Monocular/mono_euroc.cc +++ b/Examples/Monocular/mono_euroc.cc @@ -80,7 +80,7 @@ int main(int argc, char **argv) int fps = 20; float dT = 1.f/fps; // Create SLAM system. It initializes all system threads and gets ready to process frames. - ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::MONOCULAR, false); + ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::MONOCULAR, true); float imageScale = SLAM.GetImageScale(); double t_resize = 0.f; @@ -109,7 +109,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -119,7 +119,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -129,7 +129,7 @@ int main(int argc, char **argv) #endif } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -139,7 +139,7 @@ int main(int argc, char **argv) // cout << "tframe = " << tframe << endl; SLAM.TrackMonocular(im,tframe); // TODO change to monocular_inertial - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular/mono_kitti.cc b/Examples/Monocular/mono_kitti.cc index 404c8242c8..d1a4ac612d 100644 --- a/Examples/Monocular/mono_kitti.cc +++ b/Examples/Monocular/mono_kitti.cc @@ -78,7 +78,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -88,7 +88,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -98,7 +98,7 @@ int main(int argc, char **argv) #endif } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -107,7 +107,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackMonocular(im,tframe,vector(), vstrImageFilenames[ni]); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular/mono_realsense_D435i.cc b/Examples/Monocular/mono_realsense_D435i.cc index 11cbc035d2..ea4e3de89c 100644 --- a/Examples/Monocular/mono_realsense_D435i.cc +++ b/Examples/Monocular/mono_realsense_D435i.cc @@ -228,7 +228,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -247,7 +247,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -258,7 +258,7 @@ int main(int argc, char **argv) { cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -269,7 +269,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -278,7 +278,7 @@ int main(int argc, char **argv) { // Stereo images are already rectified. SLAM.TrackMonocular(im, timestamp); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular/mono_realsense_t265.cc b/Examples/Monocular/mono_realsense_t265.cc index 9895eef8ca..576ed184f0 100644 --- a/Examples/Monocular/mono_realsense_t265.cc +++ b/Examples/Monocular/mono_realsense_t265.cc @@ -116,7 +116,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -126,7 +126,7 @@ int main(int argc, char **argv) int height = imCV.rows * imageScale; cv::resize(imCV, imCV, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -141,7 +141,7 @@ int main(int argc, char **argv) //clahe->apply(imRight,imRight); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -152,7 +152,7 @@ int main(int argc, char **argv) SLAM.TrackMonocular(imCV, timestamp_ms); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular/mono_tum.cc b/Examples/Monocular/mono_tum.cc index 0e7d922c37..97c91cf52a 100644 --- a/Examples/Monocular/mono_tum.cc +++ b/Examples/Monocular/mono_tum.cc @@ -79,7 +79,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -89,7 +89,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -99,7 +99,7 @@ int main(int argc, char **argv) #endif } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -108,7 +108,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackMonocular(im,tframe); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Monocular/mono_tum_vi.cc b/Examples/Monocular/mono_tum_vi.cc index 2805c92522..02d2c78eec 100644 --- a/Examples/Monocular/mono_tum_vi.cc +++ b/Examples/Monocular/mono_tum_vi.cc @@ -112,7 +112,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -122,7 +122,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -145,7 +145,7 @@ int main(int argc, char **argv) << vstrImageFilenames[seq][ni] << endl; return 1; } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -154,7 +154,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackMonocular(im,tframe); // TODO change to monocular_inertial -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/RGB-D-Inertial/rgbd_inertial_realsense_D435i.cc b/Examples/RGB-D-Inertial/rgbd_inertial_realsense_D435i.cc index 03970c511a..392e0088a8 100644 --- a/Examples/RGB-D-Inertial/rgbd_inertial_realsense_D435i.cc +++ b/Examples/RGB-D-Inertial/rgbd_inertial_realsense_D435i.cc @@ -345,7 +345,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -412,7 +412,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -424,7 +424,7 @@ int main(int argc, char **argv) { cv::resize(depth, depth, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -435,7 +435,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -445,7 +445,7 @@ int main(int argc, char **argv) { SLAM.TrackRGBD(im, depth, timestamp, vImuMeas); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples/RGB-D/rgbd_realsense_D435i.cc b/Examples/RGB-D/rgbd_realsense_D435i.cc index 8ff6131804..c9e1ab9060 100644 --- a/Examples/RGB-D/rgbd_realsense_D435i.cc +++ b/Examples/RGB-D/rgbd_realsense_D435i.cc @@ -323,7 +323,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -359,7 +359,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -371,7 +371,7 @@ int main(int argc, char **argv) { cv::resize(depth, depth, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -382,7 +382,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -392,7 +392,7 @@ int main(int argc, char **argv) { SLAM.TrackRGBD(im, depth, timestamp); //, vImuMeas); depthCV #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples/RGB-D/rgbd_tum.cc b/Examples/RGB-D/rgbd_tum.cc index d420da52c3..b120130625 100644 --- a/Examples/RGB-D/rgbd_tum.cc +++ b/Examples/RGB-D/rgbd_tum.cc @@ -94,7 +94,7 @@ int main(int argc, char **argv) cv::resize(imD, imD, cv::Size(width, height)); } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -103,7 +103,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackRGBD(imRGB,imD,tframe); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo-Inertial/stereo_inertial_euroc.cc b/Examples/Stereo-Inertial/stereo_inertial_euroc.cc index bb52223542..8d897502b1 100644 --- a/Examples/Stereo-Inertial/stereo_inertial_euroc.cc +++ b/Examples/Stereo-Inertial/stereo_inertial_euroc.cc @@ -175,7 +175,7 @@ int main(int argc, char **argv) first_imu[seq]++; } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -184,7 +184,7 @@ int main(int argc, char **argv) // Pass the images to the SLAM system SLAM.TrackStereo(imLeft,imRight,tframe,vImuMeas); - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo-Inertial/stereo_inertial_realsense_D435i.cc b/Examples/Stereo-Inertial/stereo_inertial_realsense_D435i.cc index d3a03bf005..3b5e01e66a 100644 --- a/Examples/Stereo-Inertial/stereo_inertial_realsense_D435i.cc +++ b/Examples/Stereo-Inertial/stereo_inertial_realsense_D435i.cc @@ -344,7 +344,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -396,7 +396,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -408,7 +408,7 @@ int main(int argc, char **argv) { cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -419,7 +419,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -428,7 +428,7 @@ int main(int argc, char **argv) { // Stereo images are already rectified. SLAM.TrackStereo(im, imRight, timestamp, vImuMeas); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo-Inertial/stereo_inertial_realsense_t265.cc b/Examples/Stereo-Inertial/stereo_inertial_realsense_t265.cc index 9fcd77588c..66f2f84155 100644 --- a/Examples/Stereo-Inertial/stereo_inertial_realsense_t265.cc +++ b/Examples/Stereo-Inertial/stereo_inertial_realsense_t265.cc @@ -243,7 +243,7 @@ int main(int argc, char **argv) else { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -254,7 +254,7 @@ int main(int argc, char **argv) cv::resize(imCV, im_left, cv::Size(width, height)); cv::resize(imCV_right, im_right, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -294,7 +294,7 @@ int main(int argc, char **argv) } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -303,7 +303,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackStereo(im_left, im_right, timestamp, vImuMeas); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo-Inertial/stereo_inertial_tum_vi.cc b/Examples/Stereo-Inertial/stereo_inertial_tum_vi.cc index 2abe61472b..c551835dcf 100644 --- a/Examples/Stereo-Inertial/stereo_inertial_tum_vi.cc +++ b/Examples/Stereo-Inertial/stereo_inertial_tum_vi.cc @@ -144,7 +144,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -155,7 +155,7 @@ int main(int argc, char **argv) cv::resize(imLeft, imLeft, cv::Size(width, height)); cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -201,7 +201,7 @@ int main(int argc, char **argv) cout << "first imu time: " << fixed << vTimestampsImu[seq][0] << endl; cout << "size vImu: " << vImuMeas.size() << endl;*/ - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -210,7 +210,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackStereo(imLeft,imRight,tframe,vImuMeas); - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo/stereo_euroc.cc b/Examples/Stereo/stereo_euroc.cc index 1250f2ee3c..87149302ec 100644 --- a/Examples/Stereo/stereo_euroc.cc +++ b/Examples/Stereo/stereo_euroc.cc @@ -122,7 +122,7 @@ int main(int argc, char **argv) double tframe = vTimestampsCam[seq][ni]; - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -131,7 +131,7 @@ int main(int argc, char **argv) // Pass the images to the SLAM system SLAM.TrackStereo(imLeft,imRight,tframe, vector(), vstrImageLeft[seq][ni]); - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo/stereo_kitti.cc b/Examples/Stereo/stereo_kitti.cc index fdc8ef0415..4d96898d46 100644 --- a/Examples/Stereo/stereo_kitti.cc +++ b/Examples/Stereo/stereo_kitti.cc @@ -81,7 +81,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -92,7 +92,7 @@ int main(int argc, char **argv) cv::resize(imLeft, imLeft, cv::Size(width, height)); cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -102,7 +102,7 @@ int main(int argc, char **argv) #endif } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -111,7 +111,7 @@ int main(int argc, char **argv) // Pass the images to the SLAM system SLAM.TrackStereo(imLeft,imRight,tframe); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo/stereo_realsense_D435i.cc b/Examples/Stereo/stereo_realsense_D435i.cc index f1a45710b6..a8735ff683 100644 --- a/Examples/Stereo/stereo_realsense_D435i.cc +++ b/Examples/Stereo/stereo_realsense_D435i.cc @@ -259,7 +259,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -279,7 +279,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -291,7 +291,7 @@ int main(int argc, char **argv) { cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -302,7 +302,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -311,7 +311,7 @@ int main(int argc, char **argv) { // Stereo images are already rectified. SLAM.TrackStereo(im, imRight, timestamp); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo/stereo_realsense_t265.cc b/Examples/Stereo/stereo_realsense_t265.cc index f96d80af35..3a6c7bdeef 100644 --- a/Examples/Stereo/stereo_realsense_t265.cc +++ b/Examples/Stereo/stereo_realsense_t265.cc @@ -126,7 +126,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -137,7 +137,7 @@ int main(int argc, char **argv) cv::resize(imLeft, imLeft, cv::Size(width, height)); cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -151,7 +151,7 @@ int main(int argc, char **argv) //clahe->apply(imLeft,imLeft); //clahe->apply(imRight,imRight); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -162,7 +162,7 @@ int main(int argc, char **argv) SLAM.TrackStereo(imLeft, imRight, timestamp); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/Stereo/stereo_tum_vi.cc b/Examples/Stereo/stereo_tum_vi.cc index 20a3ecf72d..1fd9c32b02 100644 --- a/Examples/Stereo/stereo_tum_vi.cc +++ b/Examples/Stereo/stereo_tum_vi.cc @@ -117,7 +117,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -128,7 +128,7 @@ int main(int argc, char **argv) cv::resize(imLeft, imLeft, cv::Size(width, height)); cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -151,7 +151,7 @@ int main(int argc, char **argv) return 1; } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -160,7 +160,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackStereo(imLeft,imRight,tframe); - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples/euroc_examples.sh b/Examples/euroc_examples.sh new file mode 100755 index 0000000000..6dc36bd225 --- /dev/null +++ b/Examples/euroc_examples.sh @@ -0,0 +1,7 @@ +#!/bin/bash +pathDatasetEuroc='/' #Example, it is necesary to change it by the dataset path + +#------------------------------------ +# Monocular Examples +echo "Launching MH01 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH04 ./Monocular/EuRoC_TimeStamps/MH04.txt dataset-MH04_mono \ No newline at end of file diff --git a/Examples_old/Monocular-Inertial/mono_inertial_euroc.cc b/Examples_old/Monocular-Inertial/mono_inertial_euroc.cc index ea1dceb805..6573e16f6b 100644 --- a/Examples_old/Monocular-Inertial/mono_inertial_euroc.cc +++ b/Examples_old/Monocular-Inertial/mono_inertial_euroc.cc @@ -147,7 +147,7 @@ int main(int argc, char *argv[]) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -157,7 +157,7 @@ int main(int argc, char *argv[]) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -183,7 +183,7 @@ int main(int argc, char *argv[]) } } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -193,7 +193,7 @@ int main(int argc, char *argv[]) // cout << "tframe = " << tframe << endl; SLAM.TrackMonocular(im,tframe,vImuMeas); // TODO change to monocular_inertial - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular-Inertial/mono_inertial_realsense_D435i.cc b/Examples_old/Monocular-Inertial/mono_inertial_realsense_D435i.cc index c8bf1ff589..9c8b245b12 100644 --- a/Examples_old/Monocular-Inertial/mono_inertial_realsense_D435i.cc +++ b/Examples_old/Monocular-Inertial/mono_inertial_realsense_D435i.cc @@ -314,7 +314,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -365,7 +365,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -375,7 +375,7 @@ int main(int argc, char **argv) { int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -386,7 +386,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -395,7 +395,7 @@ int main(int argc, char **argv) { // Pass the image to the SLAM system SLAM.TrackMonocular(im, timestamp, vImuMeas); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular-Inertial/mono_inertial_realsense_t265.cc b/Examples_old/Monocular-Inertial/mono_inertial_realsense_t265.cc index b4575b6acb..71fae5dc58 100644 --- a/Examples_old/Monocular-Inertial/mono_inertial_realsense_t265.cc +++ b/Examples_old/Monocular-Inertial/mono_inertial_realsense_t265.cc @@ -230,7 +230,7 @@ int main(int argc, char **argv) while(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -257,7 +257,7 @@ int main(int argc, char **argv) else { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -267,7 +267,7 @@ int main(int argc, char **argv) int height = imCV.rows * imageScale; cv::resize(imCV, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -308,7 +308,7 @@ int main(int argc, char **argv) } } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -316,7 +316,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackMonocular(im, timestamp, vImuMeas); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular-Inertial/mono_inertial_tum_vi.cc b/Examples_old/Monocular-Inertial/mono_inertial_tum_vi.cc index bd84ce0012..f3352d4b7a 100644 --- a/Examples_old/Monocular-Inertial/mono_inertial_tum_vi.cc +++ b/Examples_old/Monocular-Inertial/mono_inertial_tum_vi.cc @@ -172,7 +172,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -182,7 +182,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -195,7 +195,7 @@ int main(int argc, char **argv) // cout << "first imu: " << first_imu[seq] << endl; /*cout << "first imu time: " << fixed << vTimestampsImu[first_imu] << endl; cout << "size vImu: " << vImuMeas.size() << endl;*/ - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -205,7 +205,7 @@ int main(int argc, char **argv) // cout << "tframe = " << tframe << endl; SLAM.TrackMonocular(im,tframe,vImuMeas); // TODO change to monocular_inertial - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular/mono_euroc.cc b/Examples_old/Monocular/mono_euroc.cc index 3a233129be..fc5ed0dc91 100644 --- a/Examples_old/Monocular/mono_euroc.cc +++ b/Examples_old/Monocular/mono_euroc.cc @@ -109,7 +109,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -119,7 +119,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -129,7 +129,7 @@ int main(int argc, char **argv) #endif } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -139,7 +139,7 @@ int main(int argc, char **argv) // cout << "tframe = " << tframe << endl; SLAM.TrackMonocular(im,tframe); // TODO change to monocular_inertial - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular/mono_kitti.cc b/Examples_old/Monocular/mono_kitti.cc index 404c8242c8..d1a4ac612d 100644 --- a/Examples_old/Monocular/mono_kitti.cc +++ b/Examples_old/Monocular/mono_kitti.cc @@ -78,7 +78,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -88,7 +88,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -98,7 +98,7 @@ int main(int argc, char **argv) #endif } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -107,7 +107,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackMonocular(im,tframe,vector(), vstrImageFilenames[ni]); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular/mono_realsense_D435i.cc b/Examples_old/Monocular/mono_realsense_D435i.cc index 11cbc035d2..ea4e3de89c 100644 --- a/Examples_old/Monocular/mono_realsense_D435i.cc +++ b/Examples_old/Monocular/mono_realsense_D435i.cc @@ -228,7 +228,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -247,7 +247,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -258,7 +258,7 @@ int main(int argc, char **argv) { cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -269,7 +269,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -278,7 +278,7 @@ int main(int argc, char **argv) { // Stereo images are already rectified. SLAM.TrackMonocular(im, timestamp); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular/mono_realsense_t265.cc b/Examples_old/Monocular/mono_realsense_t265.cc index 9895eef8ca..576ed184f0 100644 --- a/Examples_old/Monocular/mono_realsense_t265.cc +++ b/Examples_old/Monocular/mono_realsense_t265.cc @@ -116,7 +116,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -126,7 +126,7 @@ int main(int argc, char **argv) int height = imCV.rows * imageScale; cv::resize(imCV, imCV, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -141,7 +141,7 @@ int main(int argc, char **argv) //clahe->apply(imRight,imRight); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -152,7 +152,7 @@ int main(int argc, char **argv) SLAM.TrackMonocular(imCV, timestamp_ms); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular/mono_tum.cc b/Examples_old/Monocular/mono_tum.cc index 0e7d922c37..97c91cf52a 100644 --- a/Examples_old/Monocular/mono_tum.cc +++ b/Examples_old/Monocular/mono_tum.cc @@ -79,7 +79,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -89,7 +89,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -99,7 +99,7 @@ int main(int argc, char **argv) #endif } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -108,7 +108,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackMonocular(im,tframe); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Monocular/mono_tum_vi.cc b/Examples_old/Monocular/mono_tum_vi.cc index 2805c92522..02d2c78eec 100644 --- a/Examples_old/Monocular/mono_tum_vi.cc +++ b/Examples_old/Monocular/mono_tum_vi.cc @@ -112,7 +112,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -122,7 +122,7 @@ int main(int argc, char **argv) int height = im.rows * imageScale; cv::resize(im, im, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -145,7 +145,7 @@ int main(int argc, char **argv) << vstrImageFilenames[seq][ni] << endl; return 1; } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -154,7 +154,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackMonocular(im,tframe); // TODO change to monocular_inertial -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/RGB-D-Inertial/rgbd_inertial_realsense_D435i.cc b/Examples_old/RGB-D-Inertial/rgbd_inertial_realsense_D435i.cc index 03970c511a..392e0088a8 100644 --- a/Examples_old/RGB-D-Inertial/rgbd_inertial_realsense_D435i.cc +++ b/Examples_old/RGB-D-Inertial/rgbd_inertial_realsense_D435i.cc @@ -345,7 +345,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -412,7 +412,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -424,7 +424,7 @@ int main(int argc, char **argv) { cv::resize(depth, depth, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -435,7 +435,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -445,7 +445,7 @@ int main(int argc, char **argv) { SLAM.TrackRGBD(im, depth, timestamp, vImuMeas); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/RGB-D/rgbd_realsense_D435i.cc b/Examples_old/RGB-D/rgbd_realsense_D435i.cc index 8ff6131804..c9e1ab9060 100644 --- a/Examples_old/RGB-D/rgbd_realsense_D435i.cc +++ b/Examples_old/RGB-D/rgbd_realsense_D435i.cc @@ -323,7 +323,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -359,7 +359,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -371,7 +371,7 @@ int main(int argc, char **argv) { cv::resize(depth, depth, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -382,7 +382,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -392,7 +392,7 @@ int main(int argc, char **argv) { SLAM.TrackRGBD(im, depth, timestamp); //, vImuMeas); depthCV #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/RGB-D/rgbd_tum.cc b/Examples_old/RGB-D/rgbd_tum.cc index d420da52c3..b120130625 100644 --- a/Examples_old/RGB-D/rgbd_tum.cc +++ b/Examples_old/RGB-D/rgbd_tum.cc @@ -94,7 +94,7 @@ int main(int argc, char **argv) cv::resize(imD, imD, cv::Size(width, height)); } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -103,7 +103,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackRGBD(imRGB,imD,tframe); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/ROS/ORB_SLAM3/CMakeLists.txt b/Examples_old/ROS/ORB_SLAM3/CMakeLists.txt index 12792fe44b..9c652f17f9 100644 --- a/Examples_old/ROS/ORB_SLAM3/CMakeLists.txt +++ b/Examples_old/ROS/ORB_SLAM3/CMakeLists.txt @@ -18,7 +18,7 @@ CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) if(COMPILER_SUPPORTS_CXX11) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") - add_definitions(-DCOMPILEDWITHC11) + add_definitions(-DCOMPILEDWITHC14) message(STATUS "Using flag -std=c++11.") elseif(COMPILER_SUPPORTS_CXX0X) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") diff --git a/Examples_old/ROS/ORB_SLAM3/src/AR/ViewerAR.cc b/Examples_old/ROS/ORB_SLAM3/src/AR/ViewerAR.cc index dc9f18b7e3..f32c5ffe4b 100644 --- a/Examples_old/ROS/ORB_SLAM3/src/AR/ViewerAR.cc +++ b/Examples_old/ROS/ORB_SLAM3/src/AR/ViewerAR.cc @@ -17,6 +17,7 @@ */ #include "ViewerAR.h" +#include "Thirdparty/DBoW2/DUtils/Random.h" #include diff --git a/Examples_old/Stereo-Inertial/stereo_inertial_euroc.cc b/Examples_old/Stereo-Inertial/stereo_inertial_euroc.cc index 25dbf3cf2b..c14fd7d869 100644 --- a/Examples_old/Stereo-Inertial/stereo_inertial_euroc.cc +++ b/Examples_old/Stereo-Inertial/stereo_inertial_euroc.cc @@ -194,7 +194,7 @@ int main(int argc, char **argv) #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Rect = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Rect = std::chrono::monotonic_clock::now(); @@ -204,7 +204,7 @@ int main(int argc, char **argv) cv::remap(imRight,imRightRect,M1r,M2r,cv::INTER_LINEAR); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Rect = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Rect = std::chrono::monotonic_clock::now(); @@ -217,7 +217,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -228,7 +228,7 @@ int main(int argc, char **argv) cv::resize(imLeftRect, imLeftRect, cv::Size(width, height)); cv::resize(imRightRect, imRightRect, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -252,7 +252,7 @@ int main(int argc, char **argv) first_imu[seq]++; } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -261,7 +261,7 @@ int main(int argc, char **argv) // Pass the images to the SLAM system SLAM.TrackStereo(imLeftRect,imRightRect,tframe,vImuMeas); - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Stereo-Inertial/stereo_inertial_realsense_D435i.cc b/Examples_old/Stereo-Inertial/stereo_inertial_realsense_D435i.cc index d3a03bf005..3b5e01e66a 100644 --- a/Examples_old/Stereo-Inertial/stereo_inertial_realsense_D435i.cc +++ b/Examples_old/Stereo-Inertial/stereo_inertial_realsense_D435i.cc @@ -344,7 +344,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -396,7 +396,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -408,7 +408,7 @@ int main(int argc, char **argv) { cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -419,7 +419,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -428,7 +428,7 @@ int main(int argc, char **argv) { // Stereo images are already rectified. SLAM.TrackStereo(im, imRight, timestamp, vImuMeas); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Stereo-Inertial/stereo_inertial_realsense_t265.cc b/Examples_old/Stereo-Inertial/stereo_inertial_realsense_t265.cc index 9fcd77588c..66f2f84155 100644 --- a/Examples_old/Stereo-Inertial/stereo_inertial_realsense_t265.cc +++ b/Examples_old/Stereo-Inertial/stereo_inertial_realsense_t265.cc @@ -243,7 +243,7 @@ int main(int argc, char **argv) else { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -254,7 +254,7 @@ int main(int argc, char **argv) cv::resize(imCV, im_left, cv::Size(width, height)); cv::resize(imCV_right, im_right, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -294,7 +294,7 @@ int main(int argc, char **argv) } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -303,7 +303,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackStereo(im_left, im_right, timestamp, vImuMeas); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Stereo-Inertial/stereo_inertial_tum_vi.cc b/Examples_old/Stereo-Inertial/stereo_inertial_tum_vi.cc index 2abe61472b..c551835dcf 100644 --- a/Examples_old/Stereo-Inertial/stereo_inertial_tum_vi.cc +++ b/Examples_old/Stereo-Inertial/stereo_inertial_tum_vi.cc @@ -144,7 +144,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -155,7 +155,7 @@ int main(int argc, char **argv) cv::resize(imLeft, imLeft, cv::Size(width, height)); cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -201,7 +201,7 @@ int main(int argc, char **argv) cout << "first imu time: " << fixed << vTimestampsImu[seq][0] << endl; cout << "size vImu: " << vImuMeas.size() << endl;*/ - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -210,7 +210,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackStereo(imLeft,imRight,tframe,vImuMeas); - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Stereo/stereo_euroc.cc b/Examples_old/Stereo/stereo_euroc.cc index 08a59e4272..c8f00f5dad 100644 --- a/Examples_old/Stereo/stereo_euroc.cc +++ b/Examples_old/Stereo/stereo_euroc.cc @@ -160,7 +160,7 @@ int main(int argc, char **argv) } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Rect = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Rect = std::chrono::monotonic_clock::now(); @@ -170,7 +170,7 @@ int main(int argc, char **argv) cv::remap(imRight,imRightRect,M1r,M2r,cv::INTER_LINEAR); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Rect = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Rect = std::chrono::monotonic_clock::now(); @@ -183,7 +183,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -194,7 +194,7 @@ int main(int argc, char **argv) cv::resize(imLeftRect, imLeftRect, cv::Size(width, height)); cv::resize(imRightRect, imRightRect, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -204,7 +204,7 @@ int main(int argc, char **argv) #endif } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -213,7 +213,7 @@ int main(int argc, char **argv) // Pass the images to the SLAM system SLAM.TrackStereo(imLeftRect,imRightRect,tframe, vector(), vstrImageLeft[seq][ni]); - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Stereo/stereo_kitti.cc b/Examples_old/Stereo/stereo_kitti.cc index fdc8ef0415..4d96898d46 100644 --- a/Examples_old/Stereo/stereo_kitti.cc +++ b/Examples_old/Stereo/stereo_kitti.cc @@ -81,7 +81,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -92,7 +92,7 @@ int main(int argc, char **argv) cv::resize(imLeft, imLeft, cv::Size(width, height)); cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -102,7 +102,7 @@ int main(int argc, char **argv) #endif } -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -111,7 +111,7 @@ int main(int argc, char **argv) // Pass the images to the SLAM system SLAM.TrackStereo(imLeft,imRight,tframe); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Stereo/stereo_realsense_D435i.cc b/Examples_old/Stereo/stereo_realsense_D435i.cc index f1a45710b6..a8735ff683 100644 --- a/Examples_old/Stereo/stereo_realsense_D435i.cc +++ b/Examples_old/Stereo/stereo_realsense_D435i.cc @@ -259,7 +259,7 @@ int main(int argc, char **argv) { if(!image_ready) cond_image_rec.wait(lk); -#ifdef COMPILEDWITHC11 +#ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now(); @@ -279,7 +279,7 @@ int main(int argc, char **argv) { if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -291,7 +291,7 @@ int main(int argc, char **argv) { cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -302,7 +302,7 @@ int main(int argc, char **argv) { } #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now(); @@ -311,7 +311,7 @@ int main(int argc, char **argv) { // Stereo images are already rectified. SLAM.TrackStereo(im, imRight, timestamp); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Stereo/stereo_realsense_t265.cc b/Examples_old/Stereo/stereo_realsense_t265.cc index f96d80af35..3a6c7bdeef 100644 --- a/Examples_old/Stereo/stereo_realsense_t265.cc +++ b/Examples_old/Stereo/stereo_realsense_t265.cc @@ -126,7 +126,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -137,7 +137,7 @@ int main(int argc, char **argv) cv::resize(imLeft, imLeft, cv::Size(width, height)); cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -151,7 +151,7 @@ int main(int argc, char **argv) //clahe->apply(imLeft,imLeft); //clahe->apply(imRight,imRight); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -162,7 +162,7 @@ int main(int argc, char **argv) SLAM.TrackStereo(imLeft, imRight, timestamp); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/Examples_old/Stereo/stereo_tum_vi.cc b/Examples_old/Stereo/stereo_tum_vi.cc index 3e296f6d21..51a8397780 100644 --- a/Examples_old/Stereo/stereo_tum_vi.cc +++ b/Examples_old/Stereo/stereo_tum_vi.cc @@ -118,7 +118,7 @@ int main(int argc, char **argv) if(imageScale != 1.f) { #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now(); @@ -129,7 +129,7 @@ int main(int argc, char **argv) cv::resize(imLeft, imLeft, cv::Size(width, height)); cv::resize(imRight, imRight, cv::Size(width, height)); #ifdef REGISTER_TIMES - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now(); @@ -152,7 +152,7 @@ int main(int argc, char **argv) return 1; } - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now(); @@ -161,7 +161,7 @@ int main(int argc, char **argv) // Pass the image to the SLAM system SLAM.TrackStereo(imLeft,imRight,tframe); - #ifdef COMPILEDWITHC11 + #ifdef COMPILEDWITHC14 std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); #else std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now(); diff --git a/TheIdea.md b/TheIdea.md new file mode 100644 index 0000000000..a084e7f7fa --- /dev/null +++ b/TheIdea.md @@ -0,0 +1,3 @@ +# Summary of changes + +## The task : BOW \ No newline at end of file diff --git a/Thirdparty/DBoW2/CMakeLists.txt b/Thirdparty/DBoW2/CMakeLists.txt index c312b255a8..343ea95b1f 100644 --- a/Thirdparty/DBoW2/CMakeLists.txt +++ b/Thirdparty/DBoW2/CMakeLists.txt @@ -1,4 +1,6 @@ cmake_minimum_required(VERSION 2.8) +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) project(DBoW2) if(NOT CMAKE_BUILD_TYPE) @@ -8,18 +10,27 @@ endif() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -O3 -march=native ") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -O3 -march=native") -set(HDRS_DBOW2 - DBoW2/BowVector.h - DBoW2/FORB.h - DBoW2/FClass.h - DBoW2/FeatureVector.h - DBoW2/ScoringObject.h - DBoW2/TemplatedVocabulary.h) -set(SRCS_DBOW2 - DBoW2/BowVector.cpp - DBoW2/FORB.cpp - DBoW2/FeatureVector.cpp - DBoW2/ScoringObject.cpp) +set(HDRS_DBOW3 + DBoW3/src/BowVector.h + DBoW3/src/Database.h + DBoW3/src/DBoW3.h + DBoW3/src/DescManip.h + DBoW3/src/exports.h + DBoW3/src/FeatureVector.h + DBoW3/src/QueryResults.h + DBoW3/src/quicklz.h + DBoW3/src/ScoringObject.h + DBoW3/src/Vocabulary.h + DBoW3/src/timers.h) +set(SRCS_DBOW3 + DBoW3/src/BowVector.cpp + DBoW3/src/Database.cpp + DBoW3/src/DescManip.cpp + DBoW3/src/FeatureVector.cpp + DBoW3/src/QueryResults.cpp + DBoW3/src/quicklz.c + DBoW3/src/ScoringObject.cpp + DBoW3/src/Vocabulary.cpp) set(HDRS_DUTILS DUtils/Random.h @@ -40,6 +51,6 @@ endif() set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) include_directories(${OpenCV_INCLUDE_DIRS}) -add_library(DBoW2 SHARED ${SRCS_DBOW2} ${SRCS_DUTILS}) -target_link_libraries(DBoW2 ${OpenCV_LIBS}) +add_library(DBoW3 SHARED ${SRCS_DBOW3} ${SRCS_DUTILS}) +target_link_libraries(DBoW3 ${OpenCV_LIBS}) diff --git a/Thirdparty/DBoW2/DBoW2/FClass.h b/Thirdparty/DBoW2/DBoW2/FClass.h deleted file mode 100644 index 13be53d753..0000000000 --- a/Thirdparty/DBoW2/DBoW2/FClass.h +++ /dev/null @@ -1,71 +0,0 @@ -/** - * File: FClass.h - * Date: November 2011 - * Author: Dorian Galvez-Lopez - * Description: generic FClass to instantiate templated classes - * License: see the LICENSE.txt file - * - */ - -#ifndef __D_T_FCLASS__ -#define __D_T_FCLASS__ - -#include -#include -#include - -namespace DBoW2 { - -/// Generic class to encapsulate functions to manage descriptors. -/** - * This class must be inherited. Derived classes can be used as the - * parameter F when creating Templated structures - * (TemplatedVocabulary, TemplatedDatabase, ...) - */ -class FClass -{ - class TDescriptor; - typedef const TDescriptor *pDescriptor; - - /** - * Calculates the mean value of a set of descriptors - * @param descriptors - * @param mean mean descriptor - */ - virtual void meanValue(const std::vector &descriptors, - TDescriptor &mean) = 0; - - /** - * Calculates the distance between two descriptors - * @param a - * @param b - * @return distance - */ - static double distance(const TDescriptor &a, const TDescriptor &b); - - /** - * Returns a string version of the descriptor - * @param a descriptor - * @return string version - */ - static std::string toString(const TDescriptor &a); - - /** - * Returns a descriptor from a string - * @param a descriptor - * @param s string version - */ - static void fromString(TDescriptor &a, const std::string &s); - - /** - * Returns a mat with the descriptors in float format - * @param descriptors - * @param mat (out) NxL 32F matrix - */ - static void toMat32F(const std::vector &descriptors, - cv::Mat &mat); -}; - -} // namespace DBoW2 - -#endif diff --git a/Thirdparty/DBoW2/DBoW2/FORB.cpp b/Thirdparty/DBoW2/DBoW2/FORB.cpp deleted file mode 100644 index 1f1990c2f7..0000000000 --- a/Thirdparty/DBoW2/DBoW2/FORB.cpp +++ /dev/null @@ -1,193 +0,0 @@ -/** - * File: FORB.cpp - * Date: June 2012 - * Author: Dorian Galvez-Lopez - * Description: functions for ORB descriptors - * License: see the LICENSE.txt file - * - * Distance function has been modified - * - */ - - -#include -#include -#include -#include - -#include "FORB.h" - -using namespace std; - -namespace DBoW2 { - -// -------------------------------------------------------------------------- - -const int FORB::L=32; - -void FORB::meanValue(const std::vector &descriptors, - FORB::TDescriptor &mean) -{ - if(descriptors.empty()) - { - mean.release(); - return; - } - else if(descriptors.size() == 1) - { - mean = descriptors[0]->clone(); - } - else - { - vector sum(FORB::L * 8, 0); - - for(size_t i = 0; i < descriptors.size(); ++i) - { - const cv::Mat &d = *descriptors[i]; - const unsigned char *p = d.ptr(); - - for(int j = 0; j < d.cols; ++j, ++p) - { - if(*p & (1 << 7)) ++sum[ j*8 ]; - if(*p & (1 << 6)) ++sum[ j*8 + 1 ]; - if(*p & (1 << 5)) ++sum[ j*8 + 2 ]; - if(*p & (1 << 4)) ++sum[ j*8 + 3 ]; - if(*p & (1 << 3)) ++sum[ j*8 + 4 ]; - if(*p & (1 << 2)) ++sum[ j*8 + 5 ]; - if(*p & (1 << 1)) ++sum[ j*8 + 6 ]; - if(*p & (1)) ++sum[ j*8 + 7 ]; - } - } - - mean = cv::Mat::zeros(1, FORB::L, CV_8U); - unsigned char *p = mean.ptr(); - - const int N2 = (int)descriptors.size() / 2 + descriptors.size() % 2; - for(size_t i = 0; i < sum.size(); ++i) - { - if(sum[i] >= N2) - { - // set bit - *p |= 1 << (7 - (i % 8)); - } - - if(i % 8 == 7) ++p; - } - } -} - -// -------------------------------------------------------------------------- - -int FORB::distance(const FORB::TDescriptor &a, - const FORB::TDescriptor &b) -{ - // Bit set count operation from - // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel - - const int *pa = a.ptr(); - const int *pb = b.ptr(); - - int dist=0; - - for(int i=0; i<8; i++, pa++, pb++) - { - unsigned int v = *pa ^ *pb; - v = v - ((v >> 1) & 0x55555555); - v = (v & 0x33333333) + ((v >> 2) & 0x33333333); - dist += (((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; - } - - return dist; -} - -// -------------------------------------------------------------------------- - -std::string FORB::toString(const FORB::TDescriptor &a) -{ - stringstream ss; - const unsigned char *p = a.ptr(); - - for(int i = 0; i < a.cols; ++i, ++p) - { - ss << (int)*p << " "; - } - - return ss.str(); -} - -// -------------------------------------------------------------------------- - -void FORB::fromString(FORB::TDescriptor &a, const std::string &s) -{ - a.create(1, FORB::L, CV_8U); - unsigned char *p = a.ptr(); - - stringstream ss(s); - for(int i = 0; i < FORB::L; ++i, ++p) - { - int n; - ss >> n; - - if(!ss.fail()) - *p = (unsigned char)n; - } - -} - -// -------------------------------------------------------------------------- - -void FORB::toMat32F(const std::vector &descriptors, - cv::Mat &mat) -{ - if(descriptors.empty()) - { - mat.release(); - return; - } - - const size_t N = descriptors.size(); - - mat.create(N, FORB::L*8, CV_32F); - float *p = mat.ptr(); - - for(size_t i = 0; i < N; ++i) - { - const int C = descriptors[i].cols; - const unsigned char *desc = descriptors[i].ptr(); - - for(int j = 0; j < C; ++j, p += 8) - { - p[0] = (desc[j] & (1 << 7) ? 1 : 0); - p[1] = (desc[j] & (1 << 6) ? 1 : 0); - p[2] = (desc[j] & (1 << 5) ? 1 : 0); - p[3] = (desc[j] & (1 << 4) ? 1 : 0); - p[4] = (desc[j] & (1 << 3) ? 1 : 0); - p[5] = (desc[j] & (1 << 2) ? 1 : 0); - p[6] = (desc[j] & (1 << 1) ? 1 : 0); - p[7] = desc[j] & (1); - } - } -} - -// -------------------------------------------------------------------------- - -void FORB::toMat8U(const std::vector &descriptors, - cv::Mat &mat) -{ - mat.create(descriptors.size(), 32, CV_8U); - - unsigned char *p = mat.ptr(); - - for(size_t i = 0; i < descriptors.size(); ++i, p += 32) - { - const unsigned char *d = descriptors[i].ptr(); - std::copy(d, d+32, p); - } - -} - -// -------------------------------------------------------------------------- - -} // namespace DBoW2 - - diff --git a/Thirdparty/DBoW2/DBoW2/FORB.h b/Thirdparty/DBoW2/DBoW2/FORB.h deleted file mode 100644 index a39599f20e..0000000000 --- a/Thirdparty/DBoW2/DBoW2/FORB.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * File: FORB.h - * Date: June 2012 - * Author: Dorian Galvez-Lopez - * Description: functions for ORB descriptors - * License: see the LICENSE.txt file - * - */ - -#ifndef __D_T_F_ORB__ -#define __D_T_F_ORB__ - -#include -#include -#include - -#include "FClass.h" - -namespace DBoW2 { - -/// Functions to manipulate ORB descriptors -class FORB: protected FClass -{ -public: - - /// Descriptor type - typedef cv::Mat TDescriptor; // CV_8U - /// Pointer to a single descriptor - typedef const TDescriptor *pDescriptor; - /// Descriptor length (in bytes) - static const int L; - - /** - * Calculates the mean value of a set of descriptors - * @param descriptors - * @param mean mean descriptor - */ - static void meanValue(const std::vector &descriptors, - TDescriptor &mean); - - /** - * Calculates the distance between two descriptors - * @param a - * @param b - * @return distance - */ - static int distance(const TDescriptor &a, const TDescriptor &b); - - /** - * Returns a string version of the descriptor - * @param a descriptor - * @return string version - */ - static std::string toString(const TDescriptor &a); - - /** - * Returns a descriptor from a string - * @param a descriptor - * @param s string version - */ - static void fromString(TDescriptor &a, const std::string &s); - - /** - * Returns a mat with the descriptors in float format - * @param descriptors - * @param mat (out) NxL 32F matrix - */ - static void toMat32F(const std::vector &descriptors, - cv::Mat &mat); - - static void toMat8U(const std::vector &descriptors, - cv::Mat &mat); - -}; - -} // namespace DBoW2 - -#endif - diff --git a/Thirdparty/DBoW2/DBoW2/TemplatedVocabulary.h b/Thirdparty/DBoW2/DBoW2/TemplatedVocabulary.h deleted file mode 100644 index 01959344ed..0000000000 --- a/Thirdparty/DBoW2/DBoW2/TemplatedVocabulary.h +++ /dev/null @@ -1,1665 +0,0 @@ -/** - * This is a modified version of TemplatedVocabulary.h from DBoW2 (see below). - * Added functions: Save and Load from text files without using cv::FileStorage. - * Date: August 2015 - * Raúl Mur-Artal - */ - -/** - * File: TemplatedVocabulary.h - * Date: February 2011 - * Author: Dorian Galvez-Lopez - * Description: templated vocabulary - * License: see the LICENSE.txt file - * - */ - -#ifndef __D_T_TEMPLATED_VOCABULARY__ -#define __D_T_TEMPLATED_VOCABULARY__ - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "FeatureVector.h" -#include "BowVector.h" -#include "ScoringObject.h" - -#include "../DUtils/Random.h" - -using namespace std; - -namespace DBoW2 { - -/// @param TDescriptor class of descriptor -/// @param F class of descriptor functions -template -/// Generic Vocabulary -class TemplatedVocabulary -{ -public: - - /** - * Initiates an empty vocabulary - * @param k branching factor - * @param L depth levels - * @param weighting weighting type - * @param scoring scoring type - */ - TemplatedVocabulary(int k = 10, int L = 5, - WeightingType weighting = TF_IDF, ScoringType scoring = L1_NORM); - - /** - * Creates the vocabulary by loading a file - * @param filename - */ - TemplatedVocabulary(const std::string &filename); - - /** - * Creates the vocabulary by loading a file - * @param filename - */ - TemplatedVocabulary(const char *filename); - - /** - * Copy constructor - * @param voc - */ - TemplatedVocabulary(const TemplatedVocabulary &voc); - - /** - * Destructor - */ - virtual ~TemplatedVocabulary(); - - /** - * Assigns the given vocabulary to this by copying its data and removing - * all the data contained by this vocabulary before - * @param voc - * @return reference to this vocabulary - */ - TemplatedVocabulary& operator=( - const TemplatedVocabulary &voc); - - /** - * Creates a vocabulary from the training features with the already - * defined parameters - * @param training_features - */ - virtual void create - (const std::vector > &training_features); - - /** - * Creates a vocabulary from the training features, setting the branching - * factor and the depth levels of the tree - * @param training_features - * @param k branching factor - * @param L depth levels - */ - virtual void create - (const std::vector > &training_features, - int k, int L); - - /** - * Creates a vocabulary from the training features, setting the branching - * factor nad the depth levels of the tree, and the weighting and scoring - * schemes - */ - virtual void create - (const std::vector > &training_features, - int k, int L, WeightingType weighting, ScoringType scoring); - - /** - * Returns the number of words in the vocabulary - * @return number of words - */ - virtual inline unsigned int size() const; - - /** - * Returns whether the vocabulary is empty (i.e. it has not been trained) - * @return true iff the vocabulary is empty - */ - virtual inline bool empty() const; - - /** - * Transforms a set of descriptores into a bow vector - * @param features - * @param v (out) bow vector of weighted words - */ - virtual void transform(const std::vector& features, BowVector &v) - const; - - /** - * Transform a set of descriptors into a bow vector and a feature vector - * @param features - * @param v (out) bow vector - * @param fv (out) feature vector of nodes and feature indexes - * @param levelsup levels to go up the vocabulary tree to get the node index - */ - virtual void transform(const std::vector& features, - BowVector &v, FeatureVector &fv, int levelsup) const; - - /** - * Transforms a single feature into a word (without weight) - * @param feature - * @return word id - */ - virtual WordId transform(const TDescriptor& feature) const; - - /** - * Returns the score of two vectors - * @param a vector - * @param b vector - * @return score between vectors - * @note the vectors must be already sorted and normalized if necessary - */ - inline double score(const BowVector &a, const BowVector &b) const; - - /** - * Returns the id of the node that is "levelsup" levels from the word given - * @param wid word id - * @param levelsup 0..L - * @return node id. if levelsup is 0, returns the node id associated to the - * word id - */ - virtual NodeId getParentNode(WordId wid, int levelsup) const; - - /** - * Returns the ids of all the words that are under the given node id, - * by traversing any of the branches that goes down from the node - * @param nid starting node id - * @param words ids of words - */ - void getWordsFromNode(NodeId nid, std::vector &words) const; - - /** - * Returns the branching factor of the tree (k) - * @return k - */ - inline int getBranchingFactor() const { return m_k; } - - /** - * Returns the depth levels of the tree (L) - * @return L - */ - inline int getDepthLevels() const { return m_L; } - - /** - * Returns the real depth levels of the tree on average - * @return average of depth levels of leaves - */ - float getEffectiveLevels() const; - - /** - * Returns the descriptor of a word - * @param wid word id - * @return descriptor - */ - virtual inline TDescriptor getWord(WordId wid) const; - - /** - * Returns the weight of a word - * @param wid word id - * @return weight - */ - virtual inline WordValue getWordWeight(WordId wid) const; - - /** - * Returns the weighting method - * @return weighting method - */ - inline WeightingType getWeightingType() const { return m_weighting; } - - /** - * Returns the scoring method - * @return scoring method - */ - inline ScoringType getScoringType() const { return m_scoring; } - - /** - * Changes the weighting method - * @param type new weighting type - */ - inline void setWeightingType(WeightingType type); - - /** - * Changes the scoring method - * @param type new scoring type - */ - void setScoringType(ScoringType type); - - /** - * Loads the vocabulary from a text file - * @param filename - */ - bool loadFromTextFile(const std::string &filename); - - /** - * Saves the vocabulary into a text file - * @param filename - */ - void saveToTextFile(const std::string &filename) const; - - /** - * Saves the vocabulary into a file - * @param filename - */ - void save(const std::string &filename) const; - - /** - * Loads the vocabulary from a file - * @param filename - */ - void load(const std::string &filename); - - /** - * Saves the vocabulary to a file storage structure - * @param fn node in file storage - */ - virtual void save(cv::FileStorage &fs, - const std::string &name = "vocabulary") const; - - /** - * Loads the vocabulary from a file storage node - * @param fn first node - * @param subname name of the child node of fn where the tree is stored. - * If not given, the fn node is used instead - */ - virtual void load(const cv::FileStorage &fs, - const std::string &name = "vocabulary"); - - /** - * Stops those words whose weight is below minWeight. - * Words are stopped by setting their weight to 0. There are not returned - * later when transforming image features into vectors. - * Note that when using IDF or TF_IDF, the weight is the idf part, which - * is equivalent to -log(f), where f is the frequency of the word - * (f = Ni/N, Ni: number of training images where the word is present, - * N: number of training images). - * Note that the old weight is forgotten, and subsequent calls to this - * function with a lower minWeight have no effect. - * @return number of words stopped now - */ - virtual int stopWords(double minWeight); - -protected: - - /// Pointer to descriptor - typedef const TDescriptor *pDescriptor; - - /// Tree node - struct Node - { - /// Node id - NodeId id; - /// Weight if the node is a word - WordValue weight; - /// Children - vector children; - /// Parent node (undefined in case of root) - NodeId parent; - /// Node descriptor - TDescriptor descriptor; - - /// Word id if the node is a word - WordId word_id; - - /** - * Empty constructor - */ - Node(): id(0), weight(0), parent(0), word_id(0){} - - /** - * Constructor - * @param _id node id - */ - Node(NodeId _id): id(_id), weight(0), parent(0), word_id(0){} - - /** - * Returns whether the node is a leaf node - * @return true iff the node is a leaf - */ - inline bool isLeaf() const { return children.empty(); } - }; - -protected: - - /** - * Creates an instance of the scoring object accoring to m_scoring - */ - void createScoringObject(); - - /** - * Returns a set of pointers to descriptores - * @param training_features all the features - * @param features (out) pointers to the training features - */ - void getFeatures( - const vector > &training_features, - vector &features) const; - - /** - * Returns the word id associated to a feature - * @param feature - * @param id (out) word id - * @param weight (out) word weight - * @param nid (out) if given, id of the node "levelsup" levels up - * @param levelsup - */ - virtual void transform(const TDescriptor &feature, - WordId &id, WordValue &weight, NodeId* nid = NULL, int levelsup = 0) const; - - /** - * Returns the word id associated to a feature - * @param feature - * @param id (out) word id - */ - virtual void transform(const TDescriptor &feature, WordId &id) const; - - /** - * Creates a level in the tree, under the parent, by running kmeans with - * a descriptor set, and recursively creates the subsequent levels too - * @param parent_id id of parent node - * @param descriptors descriptors to run the kmeans on - * @param current_level current level in the tree - */ - void HKmeansStep(NodeId parent_id, const vector &descriptors, - int current_level); - - /** - * Creates k clusters from the given descriptors with some seeding algorithm. - * @note In this class, kmeans++ is used, but this function should be - * overriden by inherited classes. - */ - virtual void initiateClusters(const vector &descriptors, - vector &clusters) const; - - /** - * Creates k clusters from the given descriptor sets by running the - * initial step of kmeans++ - * @param descriptors - * @param clusters resulting clusters - */ - void initiateClustersKMpp(const vector &descriptors, - vector &clusters) const; - - /** - * Create the words of the vocabulary once the tree has been built - */ - void createWords(); - - /** - * Sets the weights of the nodes of tree according to the given features. - * Before calling this function, the nodes and the words must be already - * created (by calling HKmeansStep and createWords) - * @param features - */ - void setNodeWeights(const vector > &features); - -protected: - - /// Branching factor - int m_k; - - /// Depth levels - int m_L; - - /// Weighting method - WeightingType m_weighting; - - /// Scoring method - ScoringType m_scoring; - - /// Object for computing scores - GeneralScoring* m_scoring_object; - - /// Tree nodes - std::vector m_nodes; - - /// Words of the vocabulary (tree leaves) - /// this condition holds: m_words[wid]->word_id == wid - std::vector m_words; - -}; - -// -------------------------------------------------------------------------- - -template -TemplatedVocabulary::TemplatedVocabulary - (int k, int L, WeightingType weighting, ScoringType scoring) - : m_k(k), m_L(L), m_weighting(weighting), m_scoring(scoring), - m_scoring_object(NULL) -{ - createScoringObject(); -} - -// -------------------------------------------------------------------------- - -template -TemplatedVocabulary::TemplatedVocabulary - (const std::string &filename): m_scoring_object(NULL) -{ - load(filename); -} - -// -------------------------------------------------------------------------- - -template -TemplatedVocabulary::TemplatedVocabulary - (const char *filename): m_scoring_object(NULL) -{ - load(filename); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::createScoringObject() -{ - delete m_scoring_object; - m_scoring_object = NULL; - - switch(m_scoring) - { - case L1_NORM: - m_scoring_object = new L1Scoring; - break; - - case L2_NORM: - m_scoring_object = new L2Scoring; - break; - - case CHI_SQUARE: - m_scoring_object = new ChiSquareScoring; - break; - - case KL: - m_scoring_object = new KLScoring; - break; - - case BHATTACHARYYA: - m_scoring_object = new BhattacharyyaScoring; - break; - - case DOT_PRODUCT: - m_scoring_object = new DotProductScoring; - break; - - } -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::setScoringType(ScoringType type) -{ - m_scoring = type; - createScoringObject(); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::setWeightingType(WeightingType type) -{ - this->m_weighting = type; -} - -// -------------------------------------------------------------------------- - -template -TemplatedVocabulary::TemplatedVocabulary( - const TemplatedVocabulary &voc) - : m_scoring_object(NULL) -{ - *this = voc; -} - -// -------------------------------------------------------------------------- - -template -TemplatedVocabulary::~TemplatedVocabulary() -{ - delete m_scoring_object; -} - -// -------------------------------------------------------------------------- - -template -TemplatedVocabulary& -TemplatedVocabulary::operator= - (const TemplatedVocabulary &voc) -{ - this->m_k = voc.m_k; - this->m_L = voc.m_L; - this->m_scoring = voc.m_scoring; - this->m_weighting = voc.m_weighting; - - this->createScoringObject(); - - this->m_nodes.clear(); - this->m_words.clear(); - - this->m_nodes = voc.m_nodes; - this->createWords(); - - return *this; -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::create( - const std::vector > &training_features) -{ - m_nodes.clear(); - m_words.clear(); - - // expected_nodes = Sum_{i=0..L} ( k^i ) - int expected_nodes = - (int)((pow((double)m_k, (double)m_L + 1) - 1)/(m_k - 1)); - - m_nodes.reserve(expected_nodes); // avoid allocations when creating the tree - - - vector features; - getFeatures(training_features, features); - - - // create root - m_nodes.push_back(Node(0)); // root - - // create the tree - HKmeansStep(0, features, 1); - - // create the words - createWords(); - - // and set the weight of each node of the tree - setNodeWeights(training_features); - -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::create( - const std::vector > &training_features, - int k, int L) -{ - m_k = k; - m_L = L; - - create(training_features); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::create( - const std::vector > &training_features, - int k, int L, WeightingType weighting, ScoringType scoring) -{ - m_k = k; - m_L = L; - m_weighting = weighting; - m_scoring = scoring; - createScoringObject(); - - create(training_features); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::getFeatures( - const vector > &training_features, - vector &features) const -{ - features.resize(0); - - typename vector >::const_iterator vvit; - typename vector::const_iterator vit; - for(vvit = training_features.begin(); vvit != training_features.end(); ++vvit) - { - features.reserve(features.size() + vvit->size()); - for(vit = vvit->begin(); vit != vvit->end(); ++vit) - { - features.push_back(&(*vit)); - } - } -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::HKmeansStep(NodeId parent_id, - const vector &descriptors, int current_level) -{ - if(descriptors.empty()) return; - - // features associated to each cluster - vector clusters; - vector > groups; // groups[i] = [j1, j2, ...] - // j1, j2, ... indices of descriptors associated to cluster i - - clusters.reserve(m_k); - groups.reserve(m_k); - - //const int msizes[] = { m_k, descriptors.size() }; - //cv::SparseMat assoc(2, msizes, CV_8U); - //cv::SparseMat last_assoc(2, msizes, CV_8U); - //// assoc.row(cluster_idx).col(descriptor_idx) = 1 iif associated - - if((int)descriptors.size() <= m_k) - { - // trivial case: one cluster per feature - groups.resize(descriptors.size()); - - for(unsigned int i = 0; i < descriptors.size(); i++) - { - groups[i].push_back(i); - clusters.push_back(*descriptors[i]); - } - } - else - { - // select clusters and groups with kmeans - - bool first_time = true; - bool goon = true; - - // to check if clusters move after iterations - vector last_association, current_association; - - while(goon) - { - // 1. Calculate clusters - - if(first_time) - { - // random sample - initiateClusters(descriptors, clusters); - } - else - { - // calculate cluster centres - - for(unsigned int c = 0; c < clusters.size(); ++c) - { - vector cluster_descriptors; - cluster_descriptors.reserve(groups[c].size()); - - /* - for(unsigned int d = 0; d < descriptors.size(); ++d) - { - if( assoc.find(c, d) ) - { - cluster_descriptors.push_back(descriptors[d]); - } - } - */ - - vector::const_iterator vit; - for(vit = groups[c].begin(); vit != groups[c].end(); ++vit) - { - cluster_descriptors.push_back(descriptors[*vit]); - } - - - F::meanValue(cluster_descriptors, clusters[c]); - } - - } // if(!first_time) - - // 2. Associate features with clusters - - // calculate distances to cluster centers - groups.clear(); - groups.resize(clusters.size(), vector()); - current_association.resize(descriptors.size()); - - //assoc.clear(); - - typename vector::const_iterator fit; - //unsigned int d = 0; - for(fit = descriptors.begin(); fit != descriptors.end(); ++fit)//, ++d) - { - double best_dist = F::distance(*(*fit), clusters[0]); - unsigned int icluster = 0; - - for(unsigned int c = 1; c < clusters.size(); ++c) - { - double dist = F::distance(*(*fit), clusters[c]); - if(dist < best_dist) - { - best_dist = dist; - icluster = c; - } - } - - //assoc.ref(icluster, d) = 1; - - groups[icluster].push_back(fit - descriptors.begin()); - current_association[ fit - descriptors.begin() ] = icluster; - } - - // kmeans++ ensures all the clusters has any feature associated with them - - // 3. check convergence - if(first_time) - { - first_time = false; - } - else - { - //goon = !eqUChar(last_assoc, assoc); - - goon = false; - for(unsigned int i = 0; i < current_association.size(); i++) - { - if(current_association[i] != last_association[i]){ - goon = true; - break; - } - } - } - - if(goon) - { - // copy last feature-cluster association - last_association = current_association; - //last_assoc = assoc.clone(); - } - - } // while(goon) - - } // if must run kmeans - - // create nodes - for(unsigned int i = 0; i < clusters.size(); ++i) - { - NodeId id = m_nodes.size(); - m_nodes.push_back(Node(id)); - m_nodes.back().descriptor = clusters[i]; - m_nodes.back().parent = parent_id; - m_nodes[parent_id].children.push_back(id); - } - - // go on with the next level - if(current_level < m_L) - { - // iterate again with the resulting clusters - const vector &children_ids = m_nodes[parent_id].children; - for(unsigned int i = 0; i < clusters.size(); ++i) - { - NodeId id = children_ids[i]; - - vector child_features; - child_features.reserve(groups[i].size()); - - vector::const_iterator vit; - for(vit = groups[i].begin(); vit != groups[i].end(); ++vit) - { - child_features.push_back(descriptors[*vit]); - } - - if(child_features.size() > 1) - { - HKmeansStep(id, child_features, current_level + 1); - } - } - } -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::initiateClusters - (const vector &descriptors, vector &clusters) const -{ - initiateClustersKMpp(descriptors, clusters); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::initiateClustersKMpp( - const vector &pfeatures, vector &clusters) const -{ - // Implements kmeans++ seeding algorithm - // Algorithm: - // 1. Choose one center uniformly at random from among the data points. - // 2. For each data point x, compute D(x), the distance between x and the nearest - // center that has already been chosen. - // 3. Add one new data point as a center. Each point x is chosen with probability - // proportional to D(x)^2. - // 4. Repeat Steps 2 and 3 until k centers have been chosen. - // 5. Now that the initial centers have been chosen, proceed using standard k-means - // clustering. - - DUtils::Random::SeedRandOnce(); - - clusters.resize(0); - clusters.reserve(m_k); - vector min_dists(pfeatures.size(), std::numeric_limits::max()); - - // 1. - - int ifeature = DUtils::Random::RandomInt(0, pfeatures.size()-1); - - // create first cluster - clusters.push_back(*pfeatures[ifeature]); - - // compute the initial distances - typename vector::const_iterator fit; - vector::iterator dit; - dit = min_dists.begin(); - for(fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit) - { - *dit = F::distance(*(*fit), clusters.back()); - } - - while((int)clusters.size() < m_k) - { - // 2. - dit = min_dists.begin(); - for(fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit) - { - if(*dit > 0) - { - double dist = F::distance(*(*fit), clusters.back()); - if(dist < *dit) *dit = dist; - } - } - - // 3. - double dist_sum = std::accumulate(min_dists.begin(), min_dists.end(), 0.0); - - if(dist_sum > 0) - { - double cut_d; - do - { - cut_d = DUtils::Random::RandomValue(0, dist_sum); - } while(cut_d == 0.0); - - double d_up_now = 0; - for(dit = min_dists.begin(); dit != min_dists.end(); ++dit) - { - d_up_now += *dit; - if(d_up_now >= cut_d) break; - } - - if(dit == min_dists.end()) - ifeature = pfeatures.size()-1; - else - ifeature = dit - min_dists.begin(); - - clusters.push_back(*pfeatures[ifeature]); - - } // if dist_sum > 0 - else - break; - - } // while(used_clusters < m_k) - -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::createWords() -{ - m_words.resize(0); - - if(!m_nodes.empty()) - { - m_words.reserve( (int)pow((double)m_k, (double)m_L) ); - - typename vector::iterator nit; - - nit = m_nodes.begin(); // ignore root - for(++nit; nit != m_nodes.end(); ++nit) - { - if(nit->isLeaf()) - { - nit->word_id = m_words.size(); - m_words.push_back( &(*nit) ); - } - } - } -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::setNodeWeights - (const vector > &training_features) -{ - const unsigned int NWords = m_words.size(); - const unsigned int NDocs = training_features.size(); - - if(m_weighting == TF || m_weighting == BINARY) - { - // idf part must be 1 always - for(unsigned int i = 0; i < NWords; i++) - m_words[i]->weight = 1; - } - else if(m_weighting == IDF || m_weighting == TF_IDF) - { - // IDF and TF-IDF: we calculte the idf path now - - // Note: this actually calculates the idf part of the tf-idf score. - // The complete tf-idf score is calculated in ::transform - - vector Ni(NWords, 0); - vector counted(NWords, false); - - typename vector >::const_iterator mit; - typename vector::const_iterator fit; - - for(mit = training_features.begin(); mit != training_features.end(); ++mit) - { - fill(counted.begin(), counted.end(), false); - - for(fit = mit->begin(); fit < mit->end(); ++fit) - { - WordId word_id; - transform(*fit, word_id); - - if(!counted[word_id]) - { - Ni[word_id]++; - counted[word_id] = true; - } - } - } - - // set ln(N/Ni) - for(unsigned int i = 0; i < NWords; i++) - { - if(Ni[i] > 0) - { - m_words[i]->weight = log((double)NDocs / (double)Ni[i]); - }// else // This cannot occur if using kmeans++ - } - - } - -} - -// -------------------------------------------------------------------------- - -template -inline unsigned int TemplatedVocabulary::size() const -{ - return m_words.size(); -} - -// -------------------------------------------------------------------------- - -template -inline bool TemplatedVocabulary::empty() const -{ - return m_words.empty(); -} - -// -------------------------------------------------------------------------- - -template -float TemplatedVocabulary::getEffectiveLevels() const -{ - long sum = 0; - typename std::vector::const_iterator wit; - for(wit = m_words.begin(); wit != m_words.end(); ++wit) - { - const Node *p = *wit; - - for(; p->id != 0; sum++) p = &m_nodes[p->parent]; - } - - return (float)((double)sum / (double)m_words.size()); -} - -// -------------------------------------------------------------------------- - -template -TDescriptor TemplatedVocabulary::getWord(WordId wid) const -{ - return m_words[wid]->descriptor; -} - -// -------------------------------------------------------------------------- - -template -WordValue TemplatedVocabulary::getWordWeight(WordId wid) const -{ - return m_words[wid]->weight; -} - -// -------------------------------------------------------------------------- - -template -WordId TemplatedVocabulary::transform - (const TDescriptor& feature) const -{ - if(empty()) - { - return 0; - } - - WordId wid; - transform(feature, wid); - return wid; -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::transform( - const std::vector& features, BowVector &v) const -{ - v.clear(); - - if(empty()) - { - return; - } - - // normalize - LNorm norm; - bool must = m_scoring_object->mustNormalize(norm); - - typename vector::const_iterator fit; - - if(m_weighting == TF || m_weighting == TF_IDF) - { - for(fit = features.begin(); fit < features.end(); ++fit) - { - WordId id; - WordValue w; - // w is the idf value if TF_IDF, 1 if TF - - transform(*fit, id, w); - - // not stopped - if(w > 0) v.addWeight(id, w); - } - - if(!v.empty() && !must) - { - // unnecessary when normalizing - const double nd = v.size(); - for(BowVector::iterator vit = v.begin(); vit != v.end(); vit++) - vit->second /= nd; - } - - } - else // IDF || BINARY - { - for(fit = features.begin(); fit < features.end(); ++fit) - { - WordId id; - WordValue w; - // w is idf if IDF, or 1 if BINARY - - transform(*fit, id, w); - - // not stopped - if(w > 0) v.addIfNotExist(id, w); - - } // if add_features - } // if m_weighting == ... - - if(must) v.normalize(norm); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::transform( - const std::vector& features, - BowVector &v, FeatureVector &fv, int levelsup) const -{ - v.clear(); - fv.clear(); - - if(empty()) // safe for subclasses - { - return; - } - - // normalize - LNorm norm; - bool must = m_scoring_object->mustNormalize(norm); - - typename vector::const_iterator fit; - - if(m_weighting == TF || m_weighting == TF_IDF) - { - unsigned int i_feature = 0; - for(fit = features.begin(); fit < features.end(); ++fit, ++i_feature) - { - WordId id; - NodeId nid; - WordValue w; - // w is the idf value if TF_IDF, 1 if TF - - transform(*fit, id, w, &nid, levelsup); - - if(w > 0) // not stopped - { - v.addWeight(id, w); - fv.addFeature(nid, i_feature); - } - } - - if(!v.empty() && !must) - { - // unnecessary when normalizing - const double nd = v.size(); - for(BowVector::iterator vit = v.begin(); vit != v.end(); vit++) - vit->second /= nd; - } - - } - else // IDF || BINARY - { - unsigned int i_feature = 0; - for(fit = features.begin(); fit < features.end(); ++fit, ++i_feature) - { - WordId id; - NodeId nid; - WordValue w; - // w is idf if IDF, or 1 if BINARY - - transform(*fit, id, w, &nid, levelsup); - - if(w > 0) // not stopped - { - v.addIfNotExist(id, w); - fv.addFeature(nid, i_feature); - } - } - } // if m_weighting == ... - - if(must) v.normalize(norm); -} - -// -------------------------------------------------------------------------- - -template -inline double TemplatedVocabulary::score - (const BowVector &v1, const BowVector &v2) const -{ - return m_scoring_object->score(v1, v2); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::transform - (const TDescriptor &feature, WordId &id) const -{ - WordValue weight; - transform(feature, id, weight); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::transform(const TDescriptor &feature, - WordId &word_id, WordValue &weight, NodeId *nid, int levelsup) const -{ - // propagate the feature down the tree - vector nodes; - typename vector::const_iterator nit; - - // level at which the node must be stored in nid, if given - const int nid_level = m_L - levelsup; - if(nid_level <= 0 && nid != NULL) *nid = 0; // root - - NodeId final_id = 0; // root - int current_level = 0; - - do - { - ++current_level; - nodes = m_nodes[final_id].children; - final_id = nodes[0]; - - double best_d = F::distance(feature, m_nodes[final_id].descriptor); - - for(nit = nodes.begin() + 1; nit != nodes.end(); ++nit) - { - NodeId id = *nit; - double d = F::distance(feature, m_nodes[id].descriptor); - if(d < best_d) - { - best_d = d; - final_id = id; - } - } - - if(nid != NULL && current_level == nid_level) - *nid = final_id; - - } while( !m_nodes[final_id].isLeaf() ); - - // turn node id into word id - word_id = m_nodes[final_id].word_id; - weight = m_nodes[final_id].weight; -} - -// -------------------------------------------------------------------------- - -template -NodeId TemplatedVocabulary::getParentNode - (WordId wid, int levelsup) const -{ - NodeId ret = m_words[wid]->id; // node id - while(levelsup > 0 && ret != 0) // ret == 0 --> root - { - --levelsup; - ret = m_nodes[ret].parent; - } - return ret; -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::getWordsFromNode - (NodeId nid, std::vector &words) const -{ - words.clear(); - - if(m_nodes[nid].isLeaf()) - { - words.push_back(m_nodes[nid].word_id); - } - else - { - words.reserve(m_k); // ^1, ^2, ... - - vector parents; - parents.push_back(nid); - - while(!parents.empty()) - { - NodeId parentid = parents.back(); - parents.pop_back(); - - const vector &child_ids = m_nodes[parentid].children; - vector::const_iterator cit; - - for(cit = child_ids.begin(); cit != child_ids.end(); ++cit) - { - const Node &child_node = m_nodes[*cit]; - - if(child_node.isLeaf()) - words.push_back(child_node.word_id); - else - parents.push_back(*cit); - - } // for each child - } // while !parents.empty - } -} - -// -------------------------------------------------------------------------- - -template -int TemplatedVocabulary::stopWords(double minWeight) -{ - int c = 0; - typename vector::iterator wit; - for(wit = m_words.begin(); wit != m_words.end(); ++wit) - { - if((*wit)->weight < minWeight) - { - ++c; - (*wit)->weight = 0; - } - } - return c; -} - -// -------------------------------------------------------------------------- - -template -bool TemplatedVocabulary::loadFromTextFile(const std::string &filename) -{ - ifstream f; - f.open(filename.c_str()); - - if(f.eof()) - return false; - - m_words.clear(); - m_nodes.clear(); - - string s; - getline(f,s); - stringstream ss; - ss << s; - ss >> m_k; - ss >> m_L; - int n1, n2; - ss >> n1; - ss >> n2; - - if(m_k<0 || m_k>20 || m_L<1 || m_L>10 || n1<0 || n1>5 || n2<0 || n2>3) - { - std::cerr << "Vocabulary loading failure: This is not a correct text file!" << endl; - return false; - } - - m_scoring = (ScoringType)n1; - m_weighting = (WeightingType)n2; - createScoringObject(); - - // nodes - int expected_nodes = - (int)((pow((double)m_k, (double)m_L + 1) - 1)/(m_k - 1)); - m_nodes.reserve(expected_nodes); - - m_words.reserve(pow((double)m_k, (double)m_L + 1)); - - m_nodes.resize(1); - m_nodes[0].id = 0; - while(!f.eof()) - { - string snode; - getline(f,snode); - stringstream ssnode; - ssnode << snode; - - int nid = m_nodes.size(); - m_nodes.resize(m_nodes.size()+1); - m_nodes[nid].id = nid; - - int pid ; - ssnode >> pid; - m_nodes[nid].parent = pid; - m_nodes[pid].children.push_back(nid); - - int nIsLeaf; - ssnode >> nIsLeaf; - - stringstream ssd; - for(int iD=0;iD> sElement; - ssd << sElement << " "; - } - F::fromString(m_nodes[nid].descriptor, ssd.str()); - - ssnode >> m_nodes[nid].weight; - - if(nIsLeaf>0) - { - int wid = m_words.size(); - m_words.resize(wid+1); - - m_nodes[nid].word_id = wid; - m_words[wid] = &m_nodes[nid]; - } - else - { - m_nodes[nid].children.reserve(m_k); - } - } - - return true; - -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::saveToTextFile(const std::string &filename) const -{ - fstream f; - f.open(filename.c_str(),ios_base::out); - f << m_k << " " << m_L << " " << " " << m_scoring << " " << m_weighting << endl; - - for(size_t i=1; i -void TemplatedVocabulary::save(const std::string &filename) const -{ - cv::FileStorage fs(filename.c_str(), cv::FileStorage::WRITE); - if(!fs.isOpened()) throw string("Could not open file ") + filename; - - save(fs); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::load(const std::string &filename) -{ - cv::FileStorage fs(filename.c_str(), cv::FileStorage::READ); - if(!fs.isOpened()) throw string("Could not open file ") + filename; - - this->load(fs); -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::save(cv::FileStorage &f, - const std::string &name) const -{ - // Format YAML: - // vocabulary - // { - // k: - // L: - // scoringType: - // weightingType: - // nodes - // [ - // { - // nodeId: - // parentId: - // weight: - // descriptor: - // } - // ] - // words - // [ - // { - // wordId: - // nodeId: - // } - // ] - // } - // - // The root node (index 0) is not included in the node vector - // - - f << name << "{"; - - f << "k" << m_k; - f << "L" << m_L; - f << "scoringType" << m_scoring; - f << "weightingType" << m_weighting; - - // tree - f << "nodes" << "["; - vector parents, children; - vector::const_iterator pit; - - parents.push_back(0); // root - - while(!parents.empty()) - { - NodeId pid = parents.back(); - parents.pop_back(); - - const Node& parent = m_nodes[pid]; - children = parent.children; - - for(pit = children.begin(); pit != children.end(); pit++) - { - const Node& child = m_nodes[*pit]; - - // save node data - f << "{:"; - f << "nodeId" << (int)child.id; - f << "parentId" << (int)pid; - f << "weight" << (double)child.weight; - f << "descriptor" << F::toString(child.descriptor); - f << "}"; - - // add to parent list - if(!child.isLeaf()) - { - parents.push_back(*pit); - } - } - } - - f << "]"; // nodes - - // words - f << "words" << "["; - - typename vector::const_iterator wit; - for(wit = m_words.begin(); wit != m_words.end(); wit++) - { - WordId id = wit - m_words.begin(); - f << "{:"; - f << "wordId" << (int)id; - f << "nodeId" << (int)(*wit)->id; - f << "}"; - } - - f << "]"; // words - - f << "}"; - -} - -// -------------------------------------------------------------------------- - -template -void TemplatedVocabulary::load(const cv::FileStorage &fs, - const std::string &name) -{ - m_words.clear(); - m_nodes.clear(); - - cv::FileNode fvoc = fs[name]; - - m_k = (int)fvoc["k"]; - m_L = (int)fvoc["L"]; - m_scoring = (ScoringType)((int)fvoc["scoringType"]); - m_weighting = (WeightingType)((int)fvoc["weightingType"]); - - createScoringObject(); - - // nodes - cv::FileNode fn = fvoc["nodes"]; - - m_nodes.resize(fn.size() + 1); // +1 to include root - m_nodes[0].id = 0; - - for(unsigned int i = 0; i < fn.size(); ++i) - { - NodeId nid = (int)fn[i]["nodeId"]; - NodeId pid = (int)fn[i]["parentId"]; - WordValue weight = (WordValue)fn[i]["weight"]; - string d = (string)fn[i]["descriptor"]; - - m_nodes[nid].id = nid; - m_nodes[nid].parent = pid; - m_nodes[nid].weight = weight; - m_nodes[pid].children.push_back(nid); - - F::fromString(m_nodes[nid].descriptor, d); - } - - // words - fn = fvoc["words"]; - - m_words.resize(fn.size()); - - for(unsigned int i = 0; i < fn.size(); ++i) - { - NodeId wid = (int)fn[i]["wordId"]; - NodeId nid = (int)fn[i]["nodeId"]; - - m_nodes[nid].word_id = wid; - m_words[wid] = &m_nodes[nid]; - } -} - -// -------------------------------------------------------------------------- - -/** - * Writes printable information of the vocabulary - * @param os stream to write to - * @param voc - */ -template -std::ostream& operator<<(std::ostream &os, - const TemplatedVocabulary &voc) -{ - os << "Vocabulary: k = " << voc.getBranchingFactor() - << ", L = " << voc.getDepthLevels() - << ", Weighting = "; - - switch(voc.getWeightingType()) - { - case TF_IDF: os << "tf-idf"; break; - case TF: os << "tf"; break; - case IDF: os << "idf"; break; - case BINARY: os << "binary"; break; - } - - os << ", Scoring = "; - switch(voc.getScoringType()) - { - case L1_NORM: os << "L1-norm"; break; - case L2_NORM: os << "L2-norm"; break; - case CHI_SQUARE: os << "Chi square distance"; break; - case KL: os << "KL-divergence"; break; - case BHATTACHARYYA: os << "Bhattacharyya coefficient"; break; - case DOT_PRODUCT: os << "Dot product"; break; - } - - os << ", Number of words = " << voc.size(); - - return os; -} - -} // namespace DBoW2 - -#endif diff --git a/Thirdparty/DBoW2/DBoW3/.vscode/settings.json b/Thirdparty/DBoW2/DBoW3/.vscode/settings.json new file mode 100644 index 0000000000..72ef32ae61 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "cmake.debugConfig": { + "args": [ + "orb", + "/home/codebind/DBow3/utils/images/image0.png", + "/home/codebind/DBow3/utils/images/image1.png", + "/home/codebind/DBow3/utils/images/image2.png", + "/home/codebind/DBow3/utils/images/image3.png" + ] + } +} \ No newline at end of file diff --git a/Thirdparty/DBoW2/DBoW3/CMakeLists.txt b/Thirdparty/DBoW2/DBoW3/CMakeLists.txt new file mode 100644 index 0000000000..225329a32e --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/CMakeLists.txt @@ -0,0 +1,270 @@ +# ---------------------------------------------------------------------------- +# Basic Configuration +# ---------------------------------------------------------------------------- +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) + +# set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +PROJECT(DBoW3) +set(PROJECT_VERSION "0.0.1") +string(REGEX MATCHALL "[0-9]" PROJECT_VERSION_PARTS "${PROJECT_VERSION}") +list(GET PROJECT_VERSION_PARTS 0 PROJECT_VERSION_MAJOR) +list(GET PROJECT_VERSION_PARTS 1 PROJECT_VERSION_MINOR) +list(GET PROJECT_VERSION_PARTS 2 PROJECT_VERSION_PATCH) +set(PROJECT_SOVERSION "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}") + +message("LIB_INSTALL_DIR: ${LIB_INSTALL_DIR}") + +#------------------------------------------------------ +# Build type +#------------------------------------------------------ + +IF(NOT CMAKE_BUILD_TYPE ) + SET( CMAKE_BUILD_TYPE "Release" ) +ENDIF() + +#------------------------------------------------------ +# Lib Names and Dirs +#------------------------------------------------------ + +if(WIN32) + # Postfix of DLLs: + SET(PROJECT_DLLVERSION "${PROJECT_VERSION_MAJOR}${PROJECT_VERSION_MINOR}${PROJECT_VERSION_PATCH}") + SET(RUNTIME_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin CACHE PATH "Directory for dlls and binaries") + SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin CACHE PATH "Directory for binaries") + SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin CACHE PATH "Directory for dlls") +else() + # Postfix of so's: + set(PROJECT_DLLVERSION) + set(LIB_INSTALL_DIR lib CACHE STRING "Install location of libraries (e.g. lib32 or lib64 for multilib installations)") + SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_INSTALL_PREFIX}/${LIB_INSTALL_DIR}/cmake/ /usr/${LIB_INSTALL_DIR}/cmake ) +endif() + + + +# +OPTION(BUILD_UTILS "Set to OFF to not build utils" ON) +OPTION(USE_CONTRIB "Set to ON if contrib are installed" OFF) +OPTION(BUILD_SHARED_LIBS "Set to OFF to build static libraries" ON) + +# ---------------------------------------------------------------------------- +# Find Dependencies +# ---------------------------------------------------------------------------- +find_package(OpenCV REQUIRED) +IF(USE_CONTRIB) +add_definitions(-DUSE_CONTRIB) +ENDIF() +if(NOT OpenCV_VERSION VERSION_LESS "3.0") + ADD_DEFINITIONS(-DOPENCV_VERSION_3) + SET(OPENCV_VERSION_3 ON) +ELSE() + SET(OPENCV_VERSION_3 OFF) +ENDIF() + +include_directories(${OpenCV_INCLUDE_DIRS}) + +SET(REQUIRED_LIBRARIES ${REQUIRED_LIBRARIES} ${OpenCV_LIBS}) + +# ---------------------------------------------------------------------------- +# PROJECT CONFIGURATION +# force some variables that could be defined in the command line to be written to cache +# ---------------------------------------------------------------------------- +OPTION(INSTALL_DOC "Set to ON to build/install Documentation" OFF) +IF (INSTALL_DOC) + FIND_PACKAGE(Doxygen REQUIRED) + MESSAGE( STATUS "INSTALL_DOC: ${INSTALL_DOC} ") + INCLUDE("${PROJECT_SOURCE_DIR}/generateDoc.cmake") + GENERATE_DOCUMENTATION(${PROJECT_SOURCE_DIR}/dox.in) +ENDIF() + +# ---------------------------------------------------------------------------- +# Uninstall target, for "make uninstall" +# ---------------------------------------------------------------------------- +CONFIGURE_FILE( "${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY) +ADD_CUSTOM_TARGET(uninstall "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake") + +# ---------------------------------------------------------------------------- +# create configuration file from .in file (If you use windows take care with paths) +# ---------------------------------------------------------------------------- + +CONFIGURE_FILE("${PROJECT_SOURCE_DIR}/config.cmake.in" "${PROJECT_BINARY_DIR}/Find${PROJECT_NAME}.cmake") +CONFIGURE_FILE("${PROJECT_SOURCE_DIR}/config.cmake.in" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake") +INSTALL(FILES "${PROJECT_BINARY_DIR}/Find${PROJECT_NAME}.cmake" DESTINATION ${LIB_INSTALL_DIR}/cmake/ ) +INSTALL(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" DESTINATION ${LIB_INSTALL_DIR}/cmake/${PROJECT_NAME} ) + + + + +# ---------------------------------------------------------------------------- +# Program Optimization and debug (Extracted from OpenCV) +# ---------------------------------------------------------------------------- +set(WARNINGS_ARE_ERRORS OFF CACHE BOOL "Treat warnings as errors") +set(WHOLE_PROGRAM_OPTIMIZATION OFF CACHE BOOL "Flags for whole program optimization.") + +set(EXTRA_C_FLAGS "") +set(EXTRA_C_FLAGS_RELEASE "") +set(EXTRA_C_FLAGS_DEBUG "") +set(EXTRA_EXE_LINKER_FLAGS "") +set(EXTRA_EXE_LINKER_FLAGS_RELEASE "") +set(EXTRA_EXE_LINKER_FLAGS_DEBUG "") + +IF(CMAKE_COMPILER_IS_GNUCXX OR MINGW) + set(ENABLE_PROFILING OFF CACHE BOOL "Enable profiling in the GCC compiler (Add flags: -g -pg)") + set(USE_OMIT_FRAME_POINTER ON CACHE BOOL "Enable -fomit-frame-pointer for GCC") + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES arm*) # We can use only -O2 because the -O3 causes gcc crash + set(USE_O2 ON CACHE BOOL "Enable -O2 for GCC") + set(USE_FAST_MATH OFF CACHE BOOL "Enable -ffast-math for GCC") + endif() + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES powerpc*) + set(USE_O3 ON CACHE BOOL "Enable -O3 for GCC") + set(USE_POWERPC ON CACHE BOOL "Enable PowerPC for GCC") + endif () + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES amd64* OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES x86_64*) + set(USE_O3 ON CACHE BOOL "Enable -O3 for GCC") + set(USE_FAST_MATH OFF CACHE BOOL "Enable -ffast-math for GCC") + set(USE_MMX ON CACHE BOOL "Enable MMX for GCC") + set(USE_SSE ON CACHE BOOL "Enable SSE for GCC") + set(USE_SSE2 ON CACHE BOOL "Enable SSE2 for GCC") + set(USE_SSE3 ON CACHE BOOL "Enable SSE3 for GCC") + endif() + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES i686* OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES x86) + set(USE_O3 ON CACHE BOOL "Enable -O3 for GCC") + set(USE_FAST_MATH OFF CACHE BOOL "Enable -ffast-math for GCC") + set(USE_MMX ON CACHE BOOL "Enable MMX for GCC") + set(USE_SSE OFF CACHE BOOL "Enable SSE for GCC") + set(USE_SSE2 OFF CACHE BOOL "Enable SSE2 for GCC") + set(USE_SSE3 OFF CACHE BOOL "Enable SSE3 for GCC") + endif () + + set(EXTRA_C_FLAGS "${EXTRA_C_FLAGS} -Wall") + + if(WARNINGS_ARE_ERRORS) + set(EXTRA_C_FLAGS "${EXTRA_C_FLAGS} -Werror") + endif() + + # The -Wno-long-long is required in 64bit systems when including sytem headers. + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES x86_64* OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES amd64*) + set(EXTRA_C_FLAGS "${EXTRA_C_FLAGS} -Wno-long-long") + endif() + + # Whole program optimization + if(WHOLE_PROGRAM_OPTIMIZATION) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -fwhole-program --combine") + endif() + + # Other optimizations + if(USE_OMIT_FRAME_POINTER) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -fomit-frame-pointer") + endif() + if(USE_O2) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -O2") + endif() + if(USE_O3) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -O3") + endif() + if(USE_FAST_MATH) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -ffast-math") + endif() + if(USE_POWERPC) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -mcpu=G3 -mtune=G5") + endif() + if(USE_MMX) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -mmmx") + endif() + if(USE_SSE) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -msse") + endif() + if(USE_SSE2) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -msse2") + endif() + if(USE_SSE3 AND NOT MINGW) # SSE3 should be disabled under MingW because it generates compiler errors + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -msse3") + endif() + + if(ENABLE_PROFILING) + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -pg -g") + else() + if(NOT APPLE) + set(EXTRA_C_FLAGS "${EXTRA_C_FLAGS} -ffunction-sections") + endif() + endif() + + + set(EXTRA_C_FLAGS_RELEASE "${EXTRA_C_FLAGS_RELEASE} -DNDEBUG ") + set(EXTRA_C_FLAGS_DEBUG "-g3 -O0 -DDEBUG -D_DEBUG -W -Wextra -Wno-return-type ") + + MESSAGE( STATUS "-------------------------------------------------------------------------------" ) + message( STATUS "GNU COMPILER") + MESSAGE( STATUS "-------------------------------------------------------------------------------" ) + + + + +ELSE() # MSVC + + +ENDIF()#END OF COMPILER SPECIFIC OPTIONS +SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS} ${EXTRA_C_FLAGS_RELEASE}") +SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS} ${EXTRA_C_FLAGS_DEBUG}") +SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -std=c++11") +SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -std=c++11") +set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELEASE} ${CMAKE_C_FLAGS_DEBUG}") +set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_DEBUG}") +SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${EXTRA_EXE_LINKER_FLAGS}") +SET(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} ${EXTRA_EXE_LINKER_FLAGS_RELEASE}") +SET(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} ${EXTRA_EXE_LINKER_FLAGS_DEBUG}") + + + +#------------------------------------------------ +# DIRS +#------------------------------------------------ +ADD_SUBDIRECTORY(src) +IF (BUILD_UTILS) +ADD_SUBDIRECTORY(utils) +ENDIF() + +IF (BUILD_TESTS) +ADD_SUBDIRECTORY(tests) +ENDIF() + + +# ---------------------------------------------------------------------------- +# display status message for important variables +# ---------------------------------------------------------------------------- +message( STATUS ) +MESSAGE( STATUS "-------------------------------------------------------------------------------" ) +message( STATUS "General configuration for ${PROJECT_NAME} ${PROJECT_VERSION}") +MESSAGE( STATUS "-------------------------------------------------------------------------------" ) +message(" Built as dynamic libs?:" ${BUILD_SHARED_LIBS}) +message(" Compiler:" "${CMAKE_COMPILER}" "${CMAKE_CXX_COMPILER}") + +message( STATUS "Build Type: ${CMAKE_BUILD_TYPE}") +message( STATUS "C++ flags (Release): ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}") +message( STATUS "C++ flags (Debug): ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}") +message( STATUS "C++ flags (Relase+Debug): ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") + +message( STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}") +message( STATUS "CMAKE_BINARY_DIR: ${CMAKE_BINARY_DIR}") + +MESSAGE( STATUS ) +MESSAGE( STATUS "CMAKE_SYSTEM_PROCESSOR = ${CMAKE_SYSTEM_PROCESSOR}" ) +MESSAGE( STATUS "CMAKE_INSTALL_PREFIX = ${CMAKE_INSTALL_PREFIX}" ) +MESSAGE( STATUS "CMAKE_BUILD_TYPE = ${CMAKE_BUILD_TYPE}" ) +MESSAGE( STATUS "CMAKE_MODULE_PATH = ${CMAKE_MODULE_PATH}" ) +MESSAGE( STATUS "BUILD_UTILS= ${BUILD_UTILS}" ) +MESSAGE( STATUS "BUILD_TESTS= ${BUILD_TESTS}" ) +MESSAGE( STATUS "OPENCV_DIR= ${OpenCV_DIR} VERSION=${OpenCV_VERSION}" ) + +MESSAGE( STATUS "USE_CONTRIB= ${USE_CONTRIB}" ) + +MESSAGE( STATUS ) +MESSAGE( STATUS "OpenCV_LIB_DIR=${OpenCV_LIB_DIR}") +MESSAGE( STATUS "CMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR}") + +MESSAGE( STATUS ) +MESSAGE( STATUS ) +MESSAGE( STATUS "Change a value with: cmake -D=" ) +MESSAGE( STATUS ) diff --git a/Thirdparty/DBoW2/DBoW3/LICENSE.txt b/Thirdparty/DBoW2/DBoW3/LICENSE.txt new file mode 100644 index 0000000000..7a63fa5940 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/LICENSE.txt @@ -0,0 +1,33 @@ +DBoW3: bag-of-words library for C++ with generic descriptors + +Copyright (c) 2016 Rafael Muñoz-Salinas +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The original author of the work must be notified of any + redistribution of source code or in binary form. +4. Neither the name of copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + + diff --git a/Thirdparty/DBoW2/DBoW3/README.md b/Thirdparty/DBoW2/DBoW3/README.md new file mode 100644 index 0000000000..d994fcfa2f --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/README.md @@ -0,0 +1,54 @@ + + +DBoW3 +===== + +## +## For an improved version of this project, please see FBOW https://github.com/rmsalinas/fbow. + + + + +DBoW3 is an improved version of the DBow2 library, an open source C++ library for indexing and converting images into a bag-of-word representation. It implements a hierarchical tree for approximating nearest neighbours in the image feature space and creating a visual vocabulary. DBoW3 also implements an image database with inverted and direct files to index images and enabling quick queries and feature comparisons. The main differences with the previous DBow2 library are: + + * DBoW3 only requires OpenCV. DBoW2 dependency of DLIB is been removed. + * DBoW3 is able to use both binary and floating point descriptors out of the box. No need to reimplement any class for any descriptor. + * DBoW3 compiles both in linux and windows. + * Some pieces of code have been rewritten to optimize speed. The interface of DBoW3 has been simplified. + * Possibility of using binary files. Binary files are 4-5 times faster to load/save than yml. Also, they can be compressed. + * Compatible with DBoW2 yml files + +## +## Citing + +If you use this software in an academic work, please cite: +```@online{DBoW3, author = {Rafael Muñoz-Salinas}, + title = {{DBoW3} DBoW3}, + year = 2017, + url = {https://github.com/rmsalinas/DBow3}, + urldate = {2017-02-17} + } +``` + + +## Installation notes + +DBoW3 requires OpenCV only. + +For compiling the utils/demo_general.cpp you must compile against OpenCV 3. If you have installed the contrib_modules, use cmake option -DUSE_CONTRIB=ON to enable SURF. + +## How to use + +Check utils/demo_general.cpp + +### Classes + +DBoW3 has two main classes: `Vocabulary` and `Database`. These implement the visual vocabulary to convert images into bag-of-words vectors and the database to index images. +See utils/demo_general.cpp for an example + +### Load/Store Vocabulary + +The file orbvoc.dbow3 is the ORB vocabulary in ORBSLAM2 but in binary format of DBoW3: https://github.com/raulmur/ORB_SLAM2/tree/master/Vocabulary + + + diff --git a/Thirdparty/DBoW2/DBoW3/cmake_uninstall.cmake.in b/Thirdparty/DBoW2/DBoW3/cmake_uninstall.cmake.in new file mode 100644 index 0000000000..81482da1ac --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/cmake_uninstall.cmake.in @@ -0,0 +1,28 @@ +# ----------------------------------------------- +# File that provides "make uninstall" target +# We use the file 'install_manifest.txt' +# ----------------------------------------------- +IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"") +ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + +FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) +STRING(REGEX REPLACE "\n" ";" files "${files}") +FOREACH(file ${files}) + MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"") +# IF(EXISTS "$ENV{DESTDIR}${file}") +# EXEC_PROGRAM( +# "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" +# OUTPUT_VARIABLE rm_out +# RETURN_VALUE rm_retval +# ) + EXECUTE_PROCESS(COMMAND rm $ENV{DESTDIR}${file}) +# IF(NOT "${rm_retval}" STREQUAL 0) +# MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") +# ENDIF(NOT "${rm_retval}" STREQUAL 0) +# ELSE(EXISTS "$ENV{DESTDIR}${file}") +# MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") +# ENDIF(EXISTS "$ENV{DESTDIR}${file}") +ENDFOREACH(file) + + diff --git a/Thirdparty/DBoW2/DBoW3/config.cmake.in b/Thirdparty/DBoW2/DBoW3/config.cmake.in new file mode 100644 index 0000000000..9d15feb95d --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/config.cmake.in @@ -0,0 +1,36 @@ +# =================================================================================== +# @PROJECT_NAME@ CMake configuration file +# +# ** File generated automatically, do not modify ** +# +# Usage from an external project: +# In your CMakeLists.txt, add these lines: +# +# FIND_PACKAGE(@PROJECT_NAME@ REQUIRED ) +# TARGET_LINK_LIBRARIES(MY_TARGET_NAME ${@PROJECT_NAME@_LIBS}) +# +# This file will define the following variables: +# - @PROJECT_NAME@_LIBS : The list of libraries to links against. +# - @PROJECT_NAME@_LIB_DIR : The directory where lib files are. Calling LINK_DIRECTORIES +# with this path is NOT needed. +# - @PROJECT_NAME@_VERSION : The version of this PROJECT_NAME build. Example: "1.2.0" +# - @PROJECT_NAME@_VERSION_MAJOR : Major version part of VERSION. Example: "1" +# - @PROJECT_NAME@_VERSION_MINOR : Minor version part of VERSION. Example: "2" +# - @PROJECT_NAME@_VERSION_PATCH : Patch version part of VERSION. Example: "0" +# +# =================================================================================== +INCLUDE_DIRECTORIES("@CMAKE_INSTALL_PREFIX@/include") +SET(@PROJECT_NAME@_INCLUDE_DIRS "@CMAKE_INSTALL_PREFIX@/include") + +LINK_DIRECTORIES("@CMAKE_INSTALL_PREFIX@/@LIB_INSTALL_DIR@") +SET(@PROJECT_NAME@_LIB_DIR "@CMAKE_INSTALL_PREFIX@/@LIB_INSTALL_DIR@") + +SET(@PROJECT_NAME@_LIBS @REQUIRED_LIBRARIES@ @PROJECT_NAME@@PROJECT_DLLVERSION@) +SET(@PROJECT_NAME@_LIBRARIES @REQUIRED_LIBRARIES@ @PROJECT_NAME@@PROJECT_DLLVERSION@) + +SET(@PROJECT_NAME@_FOUND YES) +SET(@PROJECT_NAME@_FOUND "YES") +SET(@PROJECT_NAME@_VERSION @PROJECT_VERSION@) +SET(@PROJECT_NAME@_VERSION_MAJOR @PROJECT_VERSION_MAJOR@) +SET(@PROJECT_NAME@_VERSION_MINOR @PROJECT_VERSION_MINOR@) +SET(@PROJECT_NAME@_VERSION_PATCH @PROJECT_VERSION_PATCH@) diff --git a/Thirdparty/DBoW2/DBoW3/orbvoc.dbow3 b/Thirdparty/DBoW2/DBoW3/orbvoc.dbow3 new file mode 100644 index 0000000000..04c01796f5 Binary files /dev/null and b/Thirdparty/DBoW2/DBoW3/orbvoc.dbow3 differ diff --git a/Thirdparty/DBoW2/DBoW2/BowVector.cpp b/Thirdparty/DBoW2/DBoW3/src/BowVector.cpp similarity index 74% rename from Thirdparty/DBoW2/DBoW2/BowVector.cpp rename to Thirdparty/DBoW2/DBoW3/src/BowVector.cpp index 1337fa3e5b..d132213df7 100644 --- a/Thirdparty/DBoW2/DBoW2/BowVector.cpp +++ b/Thirdparty/DBoW2/DBoW3/src/BowVector.cpp @@ -15,7 +15,7 @@ #include "BowVector.h" -namespace DBoW2 { +namespace DBoW3 { // -------------------------------------------------------------------------- @@ -64,7 +64,7 @@ void BowVector::normalize(LNorm norm_type) double norm = 0.0; BowVector::iterator it; - if(norm_type == DBoW2::L1) + if(norm_type == DBoW3::L1) { for(it = begin(); it != end(); ++it) norm += fabs(it->second); @@ -90,7 +90,7 @@ std::ostream& operator<< (std::ostream &out, const BowVector &v) BowVector::const_iterator vit; std::vector::const_iterator iit; unsigned int i = 0; - const unsigned int N = v.size(); + const size_t N = v.size(); for(vit = v.begin(); vit != v.end(); ++vit, ++i) { out << "<" << vit->first << ", " << vit->second << ">"; @@ -123,8 +123,41 @@ void BowVector::saveM(const std::string &filename, size_t W) const f.close(); } +// -------------------------------------------------------------------------- + +void BowVector::toStream(std::ostream &str)const{ + uint32_t s=size(); + str.write((char*)&s,sizeof(s)); + for(auto d:*this){ + str.write((char*)&d.first,sizeof(d.first)); + str.write((char*)&d.second,sizeof(d.second)); + } +} +// -------------------------------------------------------------------------- + +void BowVector::fromStream(std::istream &str){ +clear(); +uint32_t s; + +str.read((char*)&s,sizeof(s)); +for(int i=0;i #include #include - +#include + #include +#include "exports.h" #include #include - -namespace DBoW2 { +#if _WIN32 +#include +#endif +namespace DBoW3 { /// Id of words typedef unsigned int WordId; @@ -25,7 +28,7 @@ typedef unsigned int WordId; /// Value of a word typedef double WordValue; -/// Id of nodes in the vocabulary treee +/// Id of nodes in the vocabulary tree typedef unsigned int NodeId; /// L-norms for normalization @@ -52,20 +55,19 @@ enum ScoringType CHI_SQUARE, KL, BHATTACHARYYA, - DOT_PRODUCT, + DOT_PRODUCT }; /// Vector of words to represent images -class BowVector: +class DBOW_API BowVector: public std::map { - friend class boost::serialization::access; + friend class boost::serialization::access; template void serialize(Archive& ar, const int version) { ar & boost::serialization::base_object >(*this); } - public: /** @@ -112,8 +114,14 @@ class BowVector: * @param W number of words in the vocabulary */ void saveM(const std::string &filename, size_t W) const; + + //returns a unique number from the configuration + uint64_t getSignature()const; + //serialization + void toStream(std::ostream &str)const; + void fromStream(std::istream &str); }; -} // namespace DBoW2 +} // namespace DBoW3 #endif diff --git a/Thirdparty/DBoW2/DBoW3/src/CMakeLists.txt b/Thirdparty/DBoW2/DBoW3/src/CMakeLists.txt new file mode 100644 index 0000000000..a9c52b4620 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/CMakeLists.txt @@ -0,0 +1,35 @@ +INCLUDE_DIRECTORIES(. ./utils/) + +FILE(GLOB hdrs_base "*.h" ) +FILE(GLOB srcs_base "*.c*") + +FILE(GLOB hdrs ${hdrs_base} ) +FILE(GLOB srcs ${srcs_base} ) + + +ADD_LIBRARY(${PROJECT_NAME} ${srcs} ${hdrs}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR} ) + +SET_TARGET_PROPERTIES(${PROJECT_NAME} PROPERTIES # create *nix style library versions + symbolic links + DEFINE_SYMBOL DBOW_DSO_EXPORTS + VERSION ${PROJECT_VERSION} + SOVERSION ${PROJECT_SOVERSION} + CLEAN_DIRECT_OUTPUT 1 # allow creating static and shared libs without conflicts + OUTPUT_NAME "${PROJECT_NAME}${PROJECT_DLLVERSION}" # avoid conflicts between library and binary target names +) + +TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${REQUIRED_LIBRARIES} ) + +INSTALL(TARGETS ${PROJECT_NAME} + RUNTIME DESTINATION bin COMPONENT main # Install the dll file in bin directory + LIBRARY DESTINATION ${LIB_INSTALL_DIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE COMPONENT main + ARCHIVE DESTINATION ${LIB_INSTALL_DIR} COMPONENT main) # Install the dll.a file in lib directory + + + +INSTALL(FILES ${hdrs_base} + DESTINATION include/${PROJECT_NAME} + COMPONENT main) + + + diff --git a/Thirdparty/DBoW2/DBoW3/src/DBoW3.h b/Thirdparty/DBoW2/DBoW3/src/DBoW3.h new file mode 100644 index 0000000000..c8bd762541 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/DBoW3.h @@ -0,0 +1,68 @@ +/* + * File: DBoW2.h + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: Generic include file for the DBoW2 classes and + * the specialized vocabularies and databases + * License: see the LICENSE.txt file + * + */ + +/*! \mainpage DBoW3 Library + * + * DBoW3 library for C++: + * Bag-of-word image database for image retrieval. + * + * Written by Rafael Muñoz Salinas, + * University of Cordoba (Spain) + * + * + * \section requirements Requirements + * This library requires the OpenCV libraries, + * as well as the boost::dynamic_bitset class. + * + * \section citation Citation + * If you use this software in academic works, please cite: +
+   @@ARTICLE{GalvezTRO12,
+    author={Galvez-Lopez, Dorian and Tardos, J. D.}, 
+    journal={IEEE Transactions on Robotics},
+    title={Bags of Binary Words for Fast Place Recognition in Image Sequences},
+    year={2012},
+    month={October},
+    volume={28},
+    number={5},
+    pages={1188--1197},
+    doi={10.1109/TRO.2012.2197158},
+    ISSN={1552-3098}
+  }
+ 
+ * + * \section license License + * This file is licensed under a Creative Commons + * Attribution-NonCommercial-ShareAlike 3.0 license. + * This file can be freely used and users can use, download and edit this file + * provided that credit is attributed to the original author. No users are + * permitted to use this file for commercial purposes unless explicit permission + * is given by the original author. Derivative works must be licensed using the + * same or similar license. + * Check http://creativecommons.org/licenses/by-nc-sa/3.0/ to obtain further + * details. + * + */ + +#ifndef __D_T_DBOW3__ +#define __D_T_DBOW3__ + +/// Includes all the data structures to manage vocabularies and image databases + +#include "Vocabulary.h" +#include "Database.h" +#include "BowVector.h" +#include "FeatureVector.h" +#include "QueryResults.h" + + + +#endif + diff --git a/Thirdparty/DBoW2/DBoW3/src/Database.cpp b/Thirdparty/DBoW2/DBoW3/src/Database.cpp new file mode 100644 index 0000000000..d8a1b8160e --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/Database.cpp @@ -0,0 +1,1004 @@ +#include "Database.h" + +namespace DBoW3{ + +// -------------------------------------------------------------------------- + + +Database::Database + (bool use_di, int di_levels) + : m_voc(NULL), m_use_di(use_di), m_dilevels(di_levels), m_nentries(0) +{ +} + +// -------------------------------------------------------------------------- + +Database::Database + (const Vocabulary &voc, bool use_di, int di_levels) + : m_voc(NULL), m_use_di(use_di), m_dilevels(di_levels) +{ + setVocabulary(voc); + clear(); +} + +// -------------------------------------------------------------------------- + + +Database::Database + (const Database &db) + : m_voc(NULL) +{ + *this = db; +} + +// -------------------------------------------------------------------------- + + +Database::Database + (const std::string &filename) + : m_voc(NULL) +{ + load(filename); +} + +// -------------------------------------------------------------------------- + + +Database::Database + (const char *filename) + : m_voc(NULL) +{ + load(filename); +} + +// -------------------------------------------------------------------------- + + +Database::~Database(void) +{ + delete m_voc; +} + +// -------------------------------------------------------------------------- + + +Database& Database::operator= + (const Database &db) +{ + if(this != &db) + { + m_dfile = db.m_dfile; + m_dilevels = db.m_dilevels; + m_ifile = db.m_ifile; + m_nentries = db.m_nentries; + m_use_di = db.m_use_di; + if (db.m_voc!=0) setVocabulary(*db.m_voc); + } + return *this; +} + +// -------------------------------------------------------------------------- + +EntryId Database::add( + const cv::Mat &features, + BowVector *bowvec, FeatureVector *fvec) +{ + std::vector vf(features.rows); + for(int r=0;r &features, + BowVector *bowvec, FeatureVector *fvec) +{ + BowVector aux; + BowVector& v = (bowvec ? *bowvec : aux); + + if(m_use_di && fvec != NULL) + { + m_voc->transform(features, v, *fvec, m_dilevels); // with features + return add(v, *fvec); + } + else if(m_use_di) + { + FeatureVector fv; + m_voc->transform(features, v, fv, m_dilevels); // with features + return add(v, fv); + } + else if(fvec != NULL) + { + m_voc->transform(features, v, *fvec, m_dilevels); // with features + return add(v); + } + else + { + m_voc->transform(features, v); // with features + return add(v); + } +} + +// --------------------------------------------------------------------------- + + +EntryId Database::add(const BowVector &v, + const FeatureVector &fv) +{ + EntryId entry_id = m_nentries++; + + BowVector::const_iterator vit; + std::vector::const_iterator iit; + + if(m_use_di) + { + // update direct file + if(entry_id == m_dfile.size()) + { + m_dfile.push_back(fv); + } + else + { + m_dfile[entry_id] = fv; + } + } + + // update inverted file + for(vit = v.begin(); vit != v.end(); ++vit) + { + const WordId& word_id = vit->first; + const WordValue& word_weight = vit->second; + + IFRow& ifrow = m_ifile[word_id]; + ifrow.push_back(IFPair(entry_id, word_weight)); + } + + return entry_id; +} + +// -------------------------------------------------------------------------- + + + void Database::setVocabulary + (const Vocabulary& voc) +{ + delete m_voc; + m_voc = new Vocabulary(voc); + clear(); +} + +// -------------------------------------------------------------------------- + + + void Database::setVocabulary + (const Vocabulary& voc, bool use_di, int di_levels) +{ + m_use_di = use_di; + m_dilevels = di_levels; + delete m_voc; + m_voc = new Vocabulary(voc); + clear(); +} + +// -------------------------------------------------------------------------- + + + const Vocabulary* +Database::getVocabulary() const +{ + return m_voc; +} + +// -------------------------------------------------------------------------- + + + void Database::clear() +{ + // resize vectors + m_ifile.resize(0); + m_ifile.resize(m_voc->size()); + m_dfile.resize(0); + m_nentries = 0; +} + +// -------------------------------------------------------------------------- + + +void Database::allocate(int nd, int ni) +{ + // m_ifile already contains |words| items + if(ni > 0) + { + for(auto rit = m_ifile.begin(); rit != m_ifile.end(); ++rit) + { + int n = (int)rit->size(); + if(ni > n) + { + rit->resize(ni); + rit->resize(n); + } + } + } + + if(m_use_di && (int)m_dfile.size() < nd) + { + m_dfile.resize(nd); + } +} + + + +// -------------------------------------------------------------------------- + +void Database::query( + const cv::Mat &features, + QueryResults &ret, int max_results, int max_id) const +{ + + std::vector vf(features.rows); + for(int r=0;r &features, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector vec; + m_voc->transform(features, vec); + query(vec, ret, max_results, max_id); +} + +// -------------------------------------------------------------------------- + + +void Database::query( + const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + ret.resize(0); + + switch(m_voc->getScoringType()) + { + case L1_NORM: + queryL1(vec, ret, max_results, max_id); + break; + + case L2_NORM: + queryL2(vec, ret, max_results, max_id); + break; + + case CHI_SQUARE: + queryChiSquare(vec, ret, max_results, max_id); + break; + + case KL: + queryKL(vec, ret, max_results, max_id); + break; + + case BHATTACHARYYA: + queryBhattacharyya(vec, ret, max_results, max_id); + break; + + case DOT_PRODUCT: + queryDotProduct(vec, ret, max_results, max_id); + break; + } +} + +// -------------------------------------------------------------------------- + + +void Database::queryL1(const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + + std::map pairs; + std::map::iterator pit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(auto rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value = fabs(qvalue - dvalue) - fabs(qvalue) - fabs(dvalue); + + pit = pairs.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second += value; + } + else + { + pairs.insert(pit, + std::map::value_type(entry_id, value)); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + for(pit = pairs.begin(); pit != pairs.end(); ++pit) + { + ret.push_back(Result(pit->first, pit->second)); + } + + // resulting "scores" are now in [-2 best .. 0 worst] + + // sort vector in ascending order of score + std::sort(ret.begin(), ret.end()); + // (ret is inverted now --the lower the better--) + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // complete and scale score to [0 worst .. 1 best] + // ||v - w||_{L1} = 2 + Sum(|v_i - w_i| - |v_i| - |w_i|) + // for all i | v_i != 0 and w_i != 0 + // (Nister, 2006) + // scaled_||v - w||_{L1} = 1 - 0.5 * ||v - w||_{L1} + QueryResults::iterator qit; + for(qit = ret.begin(); qit != ret.end(); qit++) + qit->Score = -qit->Score/2.0; +} + +// -------------------------------------------------------------------------- + + +void Database::queryL2(const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + + std::map pairs; + std::map::iterator pit; + + //map counters; + //map::iterator cit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(auto rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value = - qvalue * dvalue; // minus sign for sorting trick + + pit = pairs.lower_bound(entry_id); + //cit = counters.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second += value; + //cit->second += 1; + } + else + { + pairs.insert(pit, + std::map::value_type(entry_id, value)); + + //counters.insert(cit, + // map::value_type(entry_id, 1)); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + //cit = counters.begin(); + for(pit = pairs.begin(); pit != pairs.end(); ++pit)//, ++cit) + { + ret.push_back(Result(pit->first, pit->second));// / cit->second)); + } + + // resulting "scores" are now in [-1 best .. 0 worst] + + // sort vector in ascending order of score + std::sort(ret.begin(), ret.end()); + // (ret is inverted now --the lower the better--) + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // complete and scale score to [0 worst .. 1 best] + // ||v - w||_{L2} = sqrt( 2 - 2 * Sum(v_i * w_i) + // for all i | v_i != 0 and w_i != 0 ) + // (Nister, 2006) + QueryResults::iterator qit; + for(qit = ret.begin(); qit != ret.end(); qit++) + { + if(qit->Score <= -1.0) // rounding error + qit->Score = 1.0; + else + qit->Score = 1.0 - sqrt(1.0 + qit->Score); // [0..1] + // the + sign is ok, it is due to - sign in + // value = - qvalue * dvalue + } + +} + +// -------------------------------------------------------------------------- + + +void Database::queryChiSquare(const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + + std::map > pairs; + std::map >::iterator pit; + + std::map > sums; // < sum vi, sum wi > + std::map >::iterator sit; + + // In the current implementation, we suppose vec is not normalized + + //map expected; + //map::iterator eit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(auto rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + // (v-w)^2/(v+w) - v - w = -4 vw/(v+w) + // we move the 4 out + double value = 0; + if(qvalue + dvalue != 0.0) // words may have weight zero + value = - qvalue * dvalue / (qvalue + dvalue); + + pit = pairs.lower_bound(entry_id); + sit = sums.lower_bound(entry_id); + //eit = expected.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second.first += value; + pit->second.second += 1; + //eit->second += dvalue; + sit->second.first += qvalue; + sit->second.second += dvalue; + } + else + { + pairs.insert(pit, + std::map >::value_type(entry_id, + std::make_pair(value, 1) )); + //expected.insert(eit, + // map::value_type(entry_id, dvalue)); + + sums.insert(sit, + std::map >::value_type(entry_id, + std::make_pair(qvalue, dvalue) )); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + sit = sums.begin(); + for(pit = pairs.begin(); pit != pairs.end(); ++pit, ++sit) + { + if(pit->second.second >= MIN_COMMON_WORDS) + { + ret.push_back(Result(pit->first, pit->second.first)); + ret.back().nWords = pit->second.second; + ret.back().sumCommonVi = sit->second.first; + ret.back().sumCommonWi = sit->second.second; + ret.back().expectedChiScore = + 2 * sit->second.second / (1 + sit->second.second); + } + + //ret.push_back(Result(pit->first, pit->second)); + } + + // resulting "scores" are now in [-2 best .. 0 worst] + // we have to add +2 to the scores to obtain the chi square score + + // sort vector in ascending order of score + std::sort(ret.begin(), ret.end()); + // (ret is inverted now --the lower the better--) + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // complete and scale score to [0 worst .. 1 best] + QueryResults::iterator qit; + for(qit = ret.begin(); qit != ret.end(); qit++) + { + // this takes the 4 into account + qit->Score = - 2. * qit->Score; // [0..1] + + qit->chiScore = qit->Score; + } + +} + +// -------------------------------------------------------------------------- + + +void Database::queryKL(const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + + std::map pairs; + std::map::iterator pit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& vi = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(auto rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& wi = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value = 0; + if(vi != 0 && wi != 0) value = vi * log(vi/wi); + + pit = pairs.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second += value; + } + else + { + pairs.insert(pit, + std::map::value_type(entry_id, value)); + } + } + + } // for each inverted row + } // for each query word + + // resulting "scores" are now in [-X worst .. 0 best .. X worst] + // but we cannot make sure which ones are better without calculating + // the complete score + + // complete scores and move to vector + ret.reserve(pairs.size()); + for(pit = pairs.begin(); pit != pairs.end(); ++pit) + { + EntryId eid = pit->first; + double value = 0.0; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordValue &vi = vit->second; + const IFRow& row = m_ifile[vit->first]; + + if(vi != 0) + { + if(row.end() == find(row.begin(), row.end(), eid )) + { + value += vi * (log(vi) - GeneralScoring::LOG_EPS); + } + } + } + + pit->second += value; + + // to vector + ret.push_back(Result(pit->first, pit->second)); + } + + // real scores are now in [0 best .. X worst] + + // sort vector in ascending order + // (scores are inverted now --the lower the better--) + std::sort(ret.begin(), ret.end()); + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // cannot scale scores + +} + +// -------------------------------------------------------------------------- + + +void Database::queryBhattacharyya( + const BowVector &vec, QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + + //map pairs; + //map::iterator pit; + + std::map > pairs; // > + std::map >::iterator pit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(auto rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value = sqrt(qvalue * dvalue); + + pit = pairs.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second.first += value; + pit->second.second += 1; + } + else + { + pairs.insert(pit, + std::map >::value_type(entry_id, + std::make_pair(value, 1))); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + for(pit = pairs.begin(); pit != pairs.end(); ++pit) + { + if(pit->second.second >= MIN_COMMON_WORDS) + { + ret.push_back(Result(pit->first, pit->second.first)); + ret.back().nWords = pit->second.second; + ret.back().bhatScore = pit->second.first; + } + } + + // scores are already in [0..1] + + // sort vector in descending order + std::sort(ret.begin(), ret.end(), Result::gt); + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + +} + +// --------------------------------------------------------------------------- + + +void Database::queryDotProduct( + const BowVector &vec, QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + + std::map pairs; + std::map::iterator pit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(auto rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value; + if(this->m_voc->getWeightingType() == BINARY) + value = 1; + else + value = qvalue * dvalue; + + pit = pairs.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second += value; + } + else + { + pairs.insert(pit, + std::map::value_type(entry_id, value)); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + for(pit = pairs.begin(); pit != pairs.end(); ++pit) + { + ret.push_back(Result(pit->first, pit->second)); + } + + // scores are the greater the better + + // sort vector in descending order + std::sort(ret.begin(), ret.end(), Result::gt); + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // these scores cannot be scaled +} + +// --------------------------------------------------------------------------- + + +const FeatureVector& Database::retrieveFeatures + (EntryId id) const +{ + assert(id < size()); + return m_dfile[id]; +} + +// -------------------------------------------------------------------------- + + +void Database::save(const std::string &filename) const +{ + cv::FileStorage fs(filename.c_str(), cv::FileStorage::WRITE); + if(!fs.isOpened()) throw std::string("Could not open file ") + filename; + + save(fs); +} + +// -------------------------------------------------------------------------- + + +void Database::save(cv::FileStorage &fs, + const std::string &name) const +{ + // Format YAML: + // vocabulary { ... see TemplatedVocabulary::save } + // database + // { + // nEntries: + // usingDI: + // diLevels: + // invertedIndex + // [ + // [ + // { + // imageId: + // weight: + // } + // ] + // ] + // directIndex + // [ + // [ + // { + // nodeId: + // features: [ ] + // } + // ] + // ] + + // invertedIndex[i] is for the i-th word + // directIndex[i] is for the i-th entry + // directIndex may be empty if not using direct index + // + // imageId's and nodeId's must be stored in ascending order + // (according to the construction of the indexes) + + m_voc->save(fs); + + fs << name << "{"; + + fs << "nEntries" << m_nentries; + fs << "usingDI" << (m_use_di ? 1 : 0); + fs << "diLevels" << m_dilevels; + + fs << "invertedIndex" << "["; + + for(auto iit = m_ifile.begin(); iit != m_ifile.end(); ++iit) + { + fs << "["; // word of IF + for(auto irit = iit->begin(); irit != iit->end(); ++irit) + { + fs << "{:" + << "imageId" << (int)irit->entry_id + << "weight" << irit->word_weight + << "}"; + } + fs << "]"; // word of IF + } + + fs << "]"; // invertedIndex + + fs << "directIndex" << "["; + + for(auto dit = m_dfile.begin(); dit != m_dfile.end(); ++dit) + { + fs << "["; // entry of DF + + for(auto drit = dit->begin(); drit != dit->end(); ++drit) + { + NodeId nid = drit->first; + const std::vector& features = drit->second; + + // save info of last_nid + fs << "{"; + fs << "nodeId" << (int)nid; + // msvc++ 2010 with opencv 2.3.1 does not allow FileStorage::operator<< + // with vectors of unsigned int + fs << "features" << "[" + << *(const std::vector*)(&features) << "]"; + fs << "}"; + } + + fs << "]"; // entry of DF + } + + fs << "]"; // directIndex + + fs << "}"; // database +} + +// -------------------------------------------------------------------------- + + +void Database::load(const std::string &filename) +{ + cv::FileStorage fs(filename.c_str(), cv::FileStorage::READ); + if(!fs.isOpened()) throw std::string("Could not open file ") + filename; + + load(fs); +} + +// -------------------------------------------------------------------------- + + +void Database::load(const cv::FileStorage &fs, + const std::string &name) +{ + // load voc first + // subclasses must instantiate m_voc before calling this ::load + if(!m_voc) m_voc = new Vocabulary; + + m_voc->load(fs); + + // load database now + clear(); // resizes inverted file + + cv::FileNode fdb = fs[name]; + + m_nentries = (int)fdb["nEntries"]; + m_use_di = (int)fdb["usingDI"] != 0; + m_dilevels = (int)fdb["diLevels"]; + + cv::FileNode fn = fdb["invertedIndex"]; + for(WordId wid = 0; wid < fn.size(); ++wid) + { + cv::FileNode fw = fn[wid]; + + for(unsigned int i = 0; i < fw.size(); ++i) + { + EntryId eid = (int)fw[i]["imageId"]; + WordValue v = fw[i]["weight"]; + + m_ifile[wid].push_back(IFPair(eid, v)); + } + } + + if(m_use_di) + { + fn = fdb["directIndex"]; + + m_dfile.resize(fn.size()); + assert(m_nentries == (int)fn.size()); + + FeatureVector::iterator dit; + for(EntryId eid = 0; eid < fn.size(); ++eid) + { + cv::FileNode fe = fn[eid]; + + m_dfile[eid].clear(); + for(unsigned int i = 0; i < fe.size(); ++i) + { + NodeId nid = (int)fe[i]["nodeId"]; + + dit = m_dfile[eid].insert(m_dfile[eid].end(), + make_pair(nid, std::vector() )); + + // this failed to compile with some opencv versions (2.3.1) + //fe[i]["features"] >> dit->second; + + // this was ok until OpenCV 2.4.1 + //std::vector aux; + //fe[i]["features"] >> aux; // OpenCV < 2.4.1 + //dit->second.resize(aux.size()); + //std::copy(aux.begin(), aux.end(), dit->second.begin()); + + cv::FileNode ff = fe[i]["features"][0]; + dit->second.reserve(ff.size()); + + cv::FileNodeIterator ffit; + for(ffit = ff.begin(); ffit != ff.end(); ++ffit) + { + dit->second.push_back((int)*ffit); + } + } + } // for each entry + } // if use_id + +} + + +std::ostream& operator<<(std::ostream &os, + const Database &db) +{ + os << "Database: Entries = " << db.size() << ", " + "Using direct index = " << (db.usingDirectIndex() ? "yes" : "no"); + + if(db.usingDirectIndex()) + os << ", Direct index levels = " << db.getDirectIndexLevels(); + + os << ". " << *db.getVocabulary(); + return os; +} + +} diff --git a/Thirdparty/DBoW2/DBoW3/src/Database.h b/Thirdparty/DBoW2/DBoW3/src/Database.h new file mode 100644 index 0000000000..f548c9df28 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/Database.h @@ -0,0 +1,356 @@ +/** + * File: Database.h + * Date: March 2011 + * Modified By Rafael Muñoz in 2016 + * Author: Dorian Galvez-Lopez + * Description: database of images + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_DATABASE__ +#define __D_T_DATABASE__ + +#include +#include +#include +#include +#include +#include + +#include "Vocabulary.h" +#include "QueryResults.h" +#include "ScoringObject.h" +#include "BowVector.h" +#include "FeatureVector.h" +#include "exports.h" + +namespace DBoW3 { + +// For query functions +static int MIN_COMMON_WORDS = 5; + + /// Database +class DBOW_API Database +{ +public: + + /** + * Creates an empty database without vocabulary + * @param use_di a direct index is used to store feature indexes + * @param di_levels levels to go up the vocabulary tree to select the + * node id to store in the direct index when adding images + */ + explicit Database(bool use_di = true, int di_levels = 0); + + /** + * Creates a database with the given vocabulary + * @param T class inherited from Vocabulary + * @param voc vocabulary + * @param use_di a direct index is used to store feature indexes + * @param di_levels levels to go up the vocabulary tree to select the + * node id to store in the direct index when adding images + */ + + explicit Database(const Vocabulary &voc, bool use_di = true, + int di_levels = 0); + + /** + * Copy constructor. Copies the vocabulary too + * @param db object to copy + */ + Database(const Database &db); + + /** + * Creates the database from a file + * @param filename + */ + Database(const std::string &filename); + + /** + * Creates the database from a file + * @param filename + */ + Database(const char *filename); + + /** + * Destructor + */ + virtual ~Database(void); + + /** + * Copies the given database and its vocabulary + * @param db database to copy + */ + Database& operator=( + const Database &db); + + /** + * Sets the vocabulary to use and clears the content of the database. + * @param T class inherited from Vocabulary + * @param voc vocabulary to copy + */ + void setVocabulary(const Vocabulary &voc); + + /** + * Sets the vocabulary to use and the direct index parameters, and clears + * the content of the database + * @param T class inherited from Vocabulary + * @param voc vocabulary to copy + * @param use_di a direct index is used to store feature indexes + * @param di_levels levels to go up the vocabulary tree to select the + * node id to store in the direct index when adding images + */ + + void setVocabulary(const Vocabulary& voc, bool use_di, int di_levels = 0); + + /** + * Returns a pointer to the vocabulary used + * @return vocabulary + */ + const Vocabulary* getVocabulary() const; + + /** + * Allocates some memory for the direct and inverted indexes + * @param nd number of expected image entries in the database + * @param ni number of expected words per image + * @note Use 0 to ignore a parameter + */ + void allocate(int nd = 0, int ni = 0); + + /** + * Adds an entry to the database and returns its index + * @param features features of the new entry + * @param bowvec if given, the bow vector of these features is returned + * @param fvec if given, the vector of nodes and feature indexes is returned + * @return id of new entry + */ + EntryId add(const std::vector &features, + BowVector *bowvec = NULL, FeatureVector *fvec = NULL); + /** + * Adds an entry to the database and returns its index + * @param features features of the new entry, one per row + * @param bowvec if given, the bow vector of these features is returned + * @param fvec if given, the vector of nodes and feature indexes is returned + * @return id of new entry + */ + EntryId add(const cv::Mat &features, + BowVector *bowvec = NULL, FeatureVector *fvec = NULL); + + /** + * Adss an entry to the database and returns its index + * @param vec bow vector + * @param fec feature vector to add the entry. Only necessary if using the + * direct index + * @return id of new entry + */ + EntryId add(const BowVector &vec, + const FeatureVector &fec = FeatureVector() ); + + /** + * Empties the database + */ + void clear(); + + /** + * Returns the number of entries in the database + * @return number of entries in the database + */ + unsigned int size() const{ return m_nentries;} + + + /** + * Checks if the direct index is being used + * @return true iff using direct index + */ + bool usingDirectIndex() const{ return m_use_di;} + + /** + * Returns the di levels when using direct index + * @return di levels + */ + int getDirectIndexLevels() const{ return m_dilevels;} + + /** + * Queries the database with some features + * @param features query features + * @param ret (out) query results + * @param max_results number of results to return. <= 0 means all + * @param max_id only entries with id <= max_id are returned in ret. + * < 0 means all + */ + void query(const std::vector &features, QueryResults &ret, + int max_results = 1, int max_id = -1) const; + /** + * Queries the database with some features + * @param features query features,one per row + * @param ret (out) query results + * @param max_results number of results to return. <= 0 means all + * @param max_id only entries with id <= max_id are returned in ret. + * < 0 means all + */ + void query(const cv::Mat &features, QueryResults &ret, + int max_results = 1, int max_id = -1) const; + + /** + * Queries the database with a vector + * @param vec bow vector already normalized + * @param ret results + * @param max_results number of results to return. <= 0 means all + * @param max_id only entries with id <= max_id are returned in ret. + * < 0 means all + */ + void query(const BowVector &vec, QueryResults &ret, + int max_results = 1, int max_id = -1) const; + + /** + * Returns the a feature vector associated with a database entry + * @param id entry id (must be < size()) + * @return const reference to map of nodes and their associated features in + * the given entry + */ + const FeatureVector& retrieveFeatures(EntryId id) const; + + /** + * Stores the database in a file + * @param filename + */ + void save(const std::string &filename) const; + + /** + * Loads the database from a file + * @param filename + */ + void load(const std::string &filename); + + /** + * Stores the database in the given file storage structure + * @param fs + * @param name node name + */ + virtual void save(cv::FileStorage &fs, + const std::string &name = "database") const; + + /** + * Loads the database from the given file storage structure + * @param fs + * @param name node name + */ + virtual void load(const cv::FileStorage &fs, + const std::string &name = "database"); + + // -------------------------------------------------------------------------- + + /** + * Writes printable information of the database + * @param os stream to write to + * @param db + */ + DBOW_API friend std::ostream& operator<<(std::ostream &os, + const Database &db); + + + +protected: + + /// Query with L1 scoring + void queryL1(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with L2 scoring + void queryL2(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with Chi square scoring + void queryChiSquare(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with Bhattacharyya scoring + void queryBhattacharyya(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with KL divergence scoring + void queryKL(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with dot product scoring + void queryDotProduct(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + +protected: + + /* Inverted file declaration */ + + /// Item of IFRow + struct IFPair + { + /// Entry id + EntryId entry_id; + + /// Word weight in this entry + WordValue word_weight; + + /** + * Creates an empty pair + */ + IFPair(){} + + /** + * Creates an inverted file pair + * @param eid entry id + * @param wv word weight + */ + IFPair(EntryId eid, WordValue wv): entry_id(eid), word_weight(wv) {} + + /** + * Compares the entry ids + * @param eid + * @return true iff this entry id is the same as eid + */ + inline bool operator==(EntryId eid) const { return entry_id == eid; } + }; + + /// Row of InvertedFile + typedef std::list IFRow; + // IFRows are sorted in ascending entry_id order + + /// Inverted index + typedef std::vector InvertedFile; + // InvertedFile[word_id] --> inverted file of that word + + /* Direct file declaration */ + + /// Direct index + typedef std::vector DirectFile; + // DirectFile[entry_id] --> [ directentry, ... ] + +protected: + + /// Associated vocabulary + Vocabulary *m_voc; + + /// Flag to use direct index + bool m_use_di; + + /// Levels to go up the vocabulary tree to select nodes to store + /// in the direct index + int m_dilevels; + + /// Inverted file (must have size() == |words|) + InvertedFile m_ifile; + + /// Direct file (resized for allocation) + DirectFile m_dfile; + + /// Number of valid entries in m_dfile + int m_nentries; + +}; + + + +// -------------------------------------------------------------------------- + +} // namespace DBoW3 + +#endif diff --git a/Thirdparty/DBoW2/DBoW3/src/DescManip.cpp b/Thirdparty/DBoW2/DBoW3/src/DescManip.cpp new file mode 100644 index 0000000000..5baab7bdb5 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/DescManip.cpp @@ -0,0 +1,286 @@ +/** + * File: DescManip.cpp + * Date: June 2012 + * Author: Dorian Galvez-Lopez + * Description: functions for ORB descriptors + * License: see the LICENSE.txt file + * + */ + +#include +#include +#include +#include +#include +#include + +#include "DescManip.h" + +using namespace std; + +namespace DBoW3 { + +// -------------------------------------------------------------------------- + +void DescManip::meanValue(const std::vector &descriptors, + cv::Mat &mean) +{ + + if(descriptors.empty()) return; + + if(descriptors.size() == 1) + { + mean = descriptors[0].clone(); + return; + } + //binary descriptor + if (descriptors[0].type()==CV_8U ){ + //determine number of bytes of the binary descriptor + int L= getDescSizeBytes( descriptors[0]); + vector sum( L * 8, 0); + + for(size_t i = 0; i < descriptors.size(); ++i) + { + const cv::Mat &d = descriptors[i]; + const unsigned char *p = d.ptr(); + + for(int j = 0; j < d.cols; ++j, ++p) + { + if(*p & (1 << 7)) ++sum[ j*8 ]; + if(*p & (1 << 6)) ++sum[ j*8 + 1 ]; + if(*p & (1 << 5)) ++sum[ j*8 + 2 ]; + if(*p & (1 << 4)) ++sum[ j*8 + 3 ]; + if(*p & (1 << 3)) ++sum[ j*8 + 4 ]; + if(*p & (1 << 2)) ++sum[ j*8 + 5 ]; + if(*p & (1 << 1)) ++sum[ j*8 + 6 ]; + if(*p & (1)) ++sum[ j*8 + 7 ]; + } + } + + mean = cv::Mat::zeros(1, L, CV_8U); + unsigned char *p = mean.ptr(); + + const int N2 = (int)descriptors.size() / 2 + descriptors.size() % 2; + for(size_t i = 0; i < sum.size(); ++i) + { + if(sum[i] >= N2) + { + // set bit + *p |= 1 << (7 - (i % 8)); + } + + if(i % 8 == 7) ++p; + } + } + //non binary descriptor + else{ + assert(descriptors[0].type()==CV_32F );//ensure it is float + + mean.create(1, descriptors[0].cols,descriptors[0].type()); + mean.setTo(cv::Scalar::all(0)); + float inv_s =1./double( descriptors.size()); + for(size_t i=0;i(); // a & b are actually CV_8U + pb = b.ptr(); + + uint64_t v, ret = 0; + for(size_t i = 0; i < a.cols / sizeof(uint64_t); ++i, ++pa, ++pb) + { + v = *pa ^ *pb; + v = v - ((v >> 1) & (uint64_t)~(uint64_t)0/3); + v = (v & (uint64_t)~(uint64_t)0/15*3) + ((v >> 2) & + (uint64_t)~(uint64_t)0/15*3); + v = (v + (v >> 4)) & (uint64_t)~(uint64_t)0/255*15; + ret += (uint64_t)(v * ((uint64_t)~(uint64_t)0/255)) >> + (sizeof(uint64_t) - 1) * CHAR_BIT; + } + + return ret; + } + else{ + double sqd = 0.; + assert(a.type()==CV_32F); + assert(a.rows==1); + const float *a_ptr=a.ptr(0); + const float *b_ptr=b.ptr(0); + for(int i = 0; i < a.cols; i ++) + sqd += (a_ptr[i ] - b_ptr[i ])*(a_ptr[i ] - b_ptr[i ]); + return sqd; + } +} + + + + +// -------------------------------------------------------------------------- + +std::string DescManip::toString(const cv::Mat &a) +{ + stringstream ss; + //introduce a magic value to distinguish from DBOw2 + ss<<"dbw3 "; + //save size and type + + + ss <(); + for(int i = 0; i < a.cols; ++i, ++p) + ss << (int)*p << " "; + }else{ + + const float *p = a.ptr(); + for(int i = 0; i < a.cols; ++i, ++p) + ss << *p << " "; + + } + + return ss.str(); +} + +// -------------------------------------------------------------------------- + +void DescManip::fromString(cv::Mat &a, const std::string &s) +{ + + //check if the dbow3 is present + string ss_aux;ss_aux.reserve(10); + for(size_t i=0;i<10 && i data;data.reserve(100); + while( ss>>val) data.push_back(val); + //copy to a + a.create(1,data.size(),CV_8UC1); + memcpy(a.ptr(0),&data[0],data.size()); + } + else { + char szSign[10]; + int type,cols; + stringstream ss(s); + ss >> szSign >> type >> cols; + a.create(1, cols, type); + if(type==CV_8UC1){ + unsigned char *p = a.ptr(); + int n; + for(int i = 0; i < a.cols; ++i, ++p) + if ( ss >> n) *p = (unsigned char)n; + } + else{ + float *p = a.ptr(); + for(int i = 0; i < a.cols; ++i, ++p) + if ( !(ss >> *p))cerr<<"Error reading. Unexpected EOF. DescManip::fromString"< &descriptors, + cv::Mat &mat) +{ + if(descriptors.empty()) + { + mat.release(); + return; + } + + if(descriptors[0].type()==CV_8UC1){ + + const size_t N = descriptors.size(); + int L=getDescSizeBytes(descriptors[0]); + mat.create(N, L*8, CV_32F); + float *p = mat.ptr(); + + for(size_t i = 0; i < N; ++i) + { + const int C = descriptors[i].cols; + const unsigned char *desc = descriptors[i].ptr(); + + for(int j = 0; j < C; ++j, p += 8) + { + p[0] = (desc[j] & (1 << 7) ? 1 : 0); + p[1] = (desc[j] & (1 << 6) ? 1 : 0); + p[2] = (desc[j] & (1 << 5) ? 1 : 0); + p[3] = (desc[j] & (1 << 4) ? 1 : 0); + p[4] = (desc[j] & (1 << 3) ? 1 : 0); + p[5] = (desc[j] & (1 << 2) ? 1 : 0); + p[6] = (desc[j] & (1 << 1) ? 1 : 0); + p[7] = desc[j] & (1); + } + } + } + else{ + assert(descriptors[0].type()==CV_32F); + const int N = descriptors.size(); + int L=descriptors[0].cols; + mat.create(N, L, CV_32F); + for(int i = 0; i < N; ++i) + memcpy(mat.ptr(i),descriptors[i].ptr(0),sizeof(float)*L); + } +} + +void DescManip::toStream(const cv::Mat &m,std::ostream &str){ + assert(m.rows==1 || m.isContinuous()); + int type=m.type(); + int cols=m.cols; + int rows=m.rows; + str.write((char*)&cols,sizeof(cols)); + str.write((char*)&rows,sizeof(rows)); + str.write((char*)&type,sizeof(type)); + str.write((char*)m.ptr(0),m.elemSize()*m.cols); +} + +void DescManip::fromStream(cv::Mat &m,std::istream &str){ + int type,cols,rows; + str.read((char*)&cols,sizeof(cols)); + str.read((char*)&rows,sizeof(rows)); + str.read((char*)&type,sizeof(type)); + m.create(rows,cols,type); + str.read((char*)m.ptr(0),m.elemSize()*m.cols); +} + +std::string DescManip::LegacytoString(const cv::Mat &a) +{ + stringstream ss; + const unsigned char *p = a.ptr(); + + for(int i = 0; i < a.cols; ++i, ++p) + { + ss << (int)*p << " "; + } + + return ss.str(); +} + + +// -------------------------------------------------------------------------- + +} // namespace DBoW3 + diff --git a/Thirdparty/DBoW2/DBoW3/src/DescManip.h b/Thirdparty/DBoW2/DBoW3/src/DescManip.h new file mode 100644 index 0000000000..cb6e664506 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/DescManip.h @@ -0,0 +1,101 @@ +/** + * File: FClass.h + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: generic FClass to instantiate templated classes + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_DESCMANIP__ +#define __D_T_DESCMANIP__ + +#include +#include +#include +#include "exports.h" + +namespace DBoW3 { + +/// Class to manipulate descriptors (calculating means, differences and IO routines) +class DBOW_API DescManip +{ +public: + /** + * Calculates the mean value of a set of descriptors + * @param descriptors + * @param mean mean descriptor + */ + static void meanValue(const std::vector &descriptors, + cv::Mat &mean) ; + + /** + * Calculates the distance between two descriptors + * @param a + * @param b + * @return distance + */ + static double distance(const cv::Mat &a, const cv::Mat &b); + static inline uint32_t distance_8uc1(const cv::Mat &a, const cv::Mat &b); + + /** + * Returns a string version of the descriptor + * @param a descriptor + * @return string version + */ + static std::string toString(const cv::Mat &a); + + /** + * Returns a descriptor from a string + * @param a descriptor + * @param s string version + */ + static void fromString(cv::Mat &a, const std::string &s); + + /** + * Returns a mat with the descriptors in float format + * @param descriptors + * @param mat (out) NxL 32F matrix + */ + static void toMat32F(const std::vector &descriptors, + cv::Mat &mat); + + /**io routines*/ + static void toStream(const cv::Mat &m,std::ostream &str); + static void fromStream(cv::Mat &m,std::istream &str); +public: + /**Returns the number of bytes of the descriptor + * used for binary descriptors only*/ + static size_t getDescSizeBytes(const cv::Mat & d){return d.cols* d.elemSize();} + + static std::string LegacytoString(const cv::Mat &a); +}; + +uint32_t DescManip::distance_8uc1(const cv::Mat &a, const cv::Mat &b){ + //binary descriptor + + // Bit count function got from: + // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan + // This implementation assumes that a.cols (CV_8U) % sizeof(uint64_t) == 0 + + const uint64_t *pa, *pb; + pa = a.ptr(); // a & b are actually CV_8U + pb = b.ptr(); + + uint64_t v, ret = 0; + int n=a.cols / sizeof(uint64_t); + for(size_t i = 0; i < n; ++i, ++pa, ++pb) + { + v = *pa ^ *pb; + v = v - ((v >> 1) & (uint64_t)~(uint64_t)0/3); + v = (v & (uint64_t)~(uint64_t)0/15*3) + ((v >> 2) & + (uint64_t)~(uint64_t)0/15*3); + v = (v + (v >> 4)) & (uint64_t)~(uint64_t)0/255*15; + ret += (uint64_t)(v * ((uint64_t)~(uint64_t)0/255)) >> + (sizeof(uint64_t) - 1) * CHAR_BIT; + } + return ret; +} +} // namespace DBoW3 + +#endif diff --git a/Thirdparty/DBoW2/DBoW2/FeatureVector.cpp b/Thirdparty/DBoW2/DBoW3/src/FeatureVector.cpp similarity index 97% rename from Thirdparty/DBoW2/DBoW2/FeatureVector.cpp rename to Thirdparty/DBoW2/DBoW3/src/FeatureVector.cpp index c055a15767..880eab1971 100644 --- a/Thirdparty/DBoW2/DBoW2/FeatureVector.cpp +++ b/Thirdparty/DBoW2/DBoW3/src/FeatureVector.cpp @@ -12,7 +12,7 @@ #include #include -namespace DBoW2 { +namespace DBoW3 { // --------------------------------------------------------------------------- @@ -82,4 +82,4 @@ std::ostream& operator<<(std::ostream &out, // --------------------------------------------------------------------------- -} // namespace DBoW2 +} // namespace DBoW3 diff --git a/Thirdparty/DBoW2/DBoW2/FeatureVector.h b/Thirdparty/DBoW2/DBoW3/src/FeatureVector.h similarity index 76% rename from Thirdparty/DBoW2/DBoW2/FeatureVector.h rename to Thirdparty/DBoW2/DBoW3/src/FeatureVector.h index 426f36dd39..7321a08b04 100644 --- a/Thirdparty/DBoW2/DBoW2/FeatureVector.h +++ b/Thirdparty/DBoW2/DBoW3/src/FeatureVector.h @@ -12,24 +12,26 @@ #include "BowVector.h" #include +#include #include -#include +#include "exports.h" #include #include -namespace DBoW2 { +namespace DBoW3 { /// Vector of nodes with indexes of local features -class FeatureVector: +class DBOW_API FeatureVector: public std::map > { - friend class boost::serialization::access; - template - void serialize(Archive& ar, const int version) - { - ar & boost::serialization::base_object > >(*this); - } + + friend class boost::serialization::access; + template + void serialize(Archive& ar, const int version) + { + ar & boost::serialization::base_object > >(*this); + } public: @@ -60,7 +62,7 @@ class FeatureVector: }; -} // namespace DBoW2 +} // namespace DBoW3 #endif diff --git a/Thirdparty/DBoW2/DBoW3/src/QueryResults.cpp b/Thirdparty/DBoW2/DBoW3/src/QueryResults.cpp new file mode 100644 index 0000000000..7062400633 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/QueryResults.cpp @@ -0,0 +1,63 @@ +/** + * File: QueryResults.cpp + * Date: March, November 2011 + * Author: Dorian Galvez-Lopez + * Description: structure to store results of database queries + * License: see the LICENSE.txt file + * + */ + +#include +#include +#include "QueryResults.h" + +using namespace std; + +namespace DBoW3 +{ + +// --------------------------------------------------------------------------- + +ostream & operator<<(ostream& os, const Result& ret ) +{ + os << ""; + return os; +} + +// --------------------------------------------------------------------------- + +ostream & operator<<(ostream& os, const QueryResults& ret ) +{ + if(ret.size() == 1) + os << "1 result:" << endl; + else + os << ret.size() << " results:" << endl; + + QueryResults::const_iterator rit; + for(rit = ret.begin(); rit != ret.end(); ++rit) + { + os << *rit; + if(rit + 1 != ret.end()) os << endl; + } + return os; +} + +// --------------------------------------------------------------------------- + +void QueryResults::saveM(const std::string &filename) const +{ + fstream f(filename.c_str(), ios::out); + + QueryResults::const_iterator qit; + for(qit = begin(); qit != end(); ++qit) + { + f << qit->Id << " " << qit->Score << endl; + } + + f.close(); +} + +// --------------------------------------------------------------------------- + +} // namespace DBoW3 + diff --git a/Thirdparty/DBoW2/DBoW3/src/QueryResults.h b/Thirdparty/DBoW2/DBoW3/src/QueryResults.h new file mode 100644 index 0000000000..bebbd1e7de --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/QueryResults.h @@ -0,0 +1,205 @@ +/** + * File: QueryResults.h + * Date: March, November 2011 + * Author: Dorian Galvez-Lopez + * Description: structure to store results of database queries + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_QUERY_RESULTS__ +#define __D_T_QUERY_RESULTS__ + +#include +#include "exports.h" +namespace DBoW3 { + +/// Id of entries of the database +typedef unsigned int EntryId; + +/// Single result of a query +class DBOW_API Result +{ +public: + + /// Entry id + EntryId Id; + + /// Score obtained + double Score; + + /// debug + int nWords; // words in common + // !!! this is filled only by Bhatt score! + // (and for BCMatching, BCThresholding then) + + double bhatScore, chiScore; + /// debug + + // only done by ChiSq and BCThresholding + double sumCommonVi; + double sumCommonWi; + double expectedChiScore; + /// debug + + /** + * Empty constructors + */ + inline Result(){} + + /** + * Creates a result with the given data + * @param _id entry id + * @param _score score + */ + inline Result(EntryId _id, double _score): Id(_id), Score(_score){} + + /** + * Compares the scores of two results + * @return true iff this.score < r.score + */ + inline bool operator<(const Result &r) const + { + return this->Score < r.Score; + } + + /** + * Compares the scores of two results + * @return true iff this.score > r.score + */ + inline bool operator>(const Result &r) const + { + return this->Score > r.Score; + } + + /** + * Compares the entry id of the result + * @return true iff this.id == id + */ + inline bool operator==(EntryId id) const + { + return this->Id == id; + } + + /** + * Compares the score of this entry with a given one + * @param s score to compare with + * @return true iff this score < s + */ + inline bool operator<(double s) const + { + return this->Score < s; + } + + /** + * Compares the score of this entry with a given one + * @param s score to compare with + * @return true iff this score > s + */ + inline bool operator>(double s) const + { + return this->Score > s; + } + + /** + * Compares the score of two results + * @param a + * @param b + * @return true iff a.Score > b.Score + */ + static inline bool gt(const Result &a, const Result &b) + { + return a.Score > b.Score; + } + + /** + * Compares the scores of two results + * @return true iff a.Score > b.Score + */ + inline static bool ge(const Result &a, const Result &b) + { + return a.Score > b.Score; + } + + /** + * Returns true iff a.Score >= b.Score + * @param a + * @param b + * @return true iff a.Score >= b.Score + */ + static inline bool geq(const Result &a, const Result &b) + { + return a.Score >= b.Score; + } + + /** + * Returns true iff a.Score >= s + * @param a + * @param s + * @return true iff a.Score >= s + */ + static inline bool geqv(const Result &a, double s) + { + return a.Score >= s; + } + + + /** + * Returns true iff a.Id < b.Id + * @param a + * @param b + * @return true iff a.Id < b.Id + */ + static inline bool ltId(const Result &a, const Result &b) + { + return a.Id < b.Id; + } + + /** + * Prints a string version of the result + * @param os ostream + * @param ret Result to print + */ + friend std::ostream & operator<<(std::ostream& os, const Result& ret ); +}; + +/// Multiple results from a query +class QueryResults: public std::vector +{ +public: + + /** + * Multiplies all the scores in the vector by factor + * @param factor + */ + inline void scaleScores(double factor); + + /** + * Prints a string version of the results + * @param os ostream + * @param ret QueryResults to print + */ + DBOW_API friend std::ostream & operator<<(std::ostream& os, const QueryResults& ret ); + + /** + * Saves a matlab file with the results + * @param filename + */ + void saveM(const std::string &filename) const; + +}; + +// -------------------------------------------------------------------------- + +inline void QueryResults::scaleScores(double factor) +{ + for(QueryResults::iterator qit = begin(); qit != end(); ++qit) + qit->Score *= factor; +} + +// -------------------------------------------------------------------------- + +} // namespace TemplatedBoW + +#endif + diff --git a/Thirdparty/DBoW2/DBoW2/ScoringObject.cpp b/Thirdparty/DBoW2/DBoW3/src/ScoringObject.cpp similarity index 99% rename from Thirdparty/DBoW2/DBoW2/ScoringObject.cpp rename to Thirdparty/DBoW2/DBoW3/src/ScoringObject.cpp index 063a96e87d..7cf08123e7 100644 --- a/Thirdparty/DBoW2/DBoW2/ScoringObject.cpp +++ b/Thirdparty/DBoW2/DBoW3/src/ScoringObject.cpp @@ -8,10 +8,10 @@ */ #include -#include "TemplatedVocabulary.h" +#include "Vocabulary.h" #include "BowVector.h" -using namespace DBoW2; +using namespace DBoW3; // If you change the type of WordValue, make sure you change also the // epsilon value (this is needed by the KL method) diff --git a/Thirdparty/DBoW2/DBoW2/ScoringObject.h b/Thirdparty/DBoW2/DBoW3/src/ScoringObject.h similarity index 96% rename from Thirdparty/DBoW2/DBoW2/ScoringObject.h rename to Thirdparty/DBoW2/DBoW3/src/ScoringObject.h index 8d5b82192a..8d6c64e63d 100644 --- a/Thirdparty/DBoW2/DBoW2/ScoringObject.h +++ b/Thirdparty/DBoW2/DBoW3/src/ScoringObject.h @@ -11,11 +11,11 @@ #define __D_T_SCORING_OBJECT__ #include "BowVector.h" - -namespace DBoW2 { +#include "exports.h" +namespace DBoW3 { /// Base class of scoring functions -class GeneralScoring +class DBOW_API GeneralScoring { public: /** @@ -39,9 +39,8 @@ class GeneralScoring static const double LOG_EPS; // If you change the type of WordValue, make sure you change also the // epsilon value (this is needed by the KL method) - - virtual ~GeneralScoring() {} //!< Required for virtual base classes + virtual ~GeneralScoring() {} //!< Required for virtual base classes }; /** @@ -90,7 +89,7 @@ class __SCORING_CLASS(DotProductScoring, false, L1); #undef __SCORING_CLASS -} // namespace DBoW2 +} // namespace DBoW3 #endif diff --git a/Thirdparty/DBoW2/DBoW3/src/Vocabulary.cpp b/Thirdparty/DBoW2/DBoW3/src/Vocabulary.cpp new file mode 100644 index 0000000000..ae57e87eba --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/Vocabulary.cpp @@ -0,0 +1,1546 @@ +#include "Vocabulary.h" +#include "DescManip.h" +#include "quicklz.h" +#include "../../DUtils/Random.h" +#include +//#include "timers.h" +namespace DBoW3{ +// -------------------------------------------------------------------------- + + +Vocabulary::Vocabulary + (int k, int L, WeightingType weighting, ScoringType scoring) + : m_k(k), m_L(L), m_weighting(weighting), m_scoring(scoring), + m_scoring_object(NULL) +{ + createScoringObject(); +} + +// -------------------------------------------------------------------------- + + +Vocabulary::Vocabulary + (const std::string &filename): m_scoring_object(NULL) +{ + load(filename); +} + +// -------------------------------------------------------------------------- + + +Vocabulary::Vocabulary + (const char *filename): m_scoring_object(NULL) +{ + load(filename); +} + +// -------------------------------------------------------------------------- + + +Vocabulary::Vocabulary + (std::istream& stream): m_scoring_object(NULL) +{ + load(stream); +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::createScoringObject() +{ + delete m_scoring_object; + m_scoring_object = NULL; + + switch(m_scoring) + { + case L1_NORM: + m_scoring_object = new L1Scoring; + break; + + case L2_NORM: + m_scoring_object = new L2Scoring; + break; + + case CHI_SQUARE: + m_scoring_object = new ChiSquareScoring; + break; + + case KL: + m_scoring_object = new KLScoring; + break; + + case BHATTACHARYYA: + m_scoring_object = new BhattacharyyaScoring; + break; + + case DOT_PRODUCT: + m_scoring_object = new DotProductScoring; + break; + + } +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::setScoringType(ScoringType type) +{ + m_scoring = type; + createScoringObject(); +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::setWeightingType(WeightingType type) +{ + this->m_weighting = type; +} + +// -------------------------------------------------------------------------- + + +Vocabulary::Vocabulary( + const Vocabulary &voc) + : m_scoring_object(NULL) +{ + *this = voc; +} + +// -------------------------------------------------------------------------- + + +Vocabulary::~Vocabulary() +{ + delete m_scoring_object; +} + +// -------------------------------------------------------------------------- + + +Vocabulary& +Vocabulary::operator= + (const Vocabulary &voc) +{ + this->m_k = voc.m_k; + this->m_L = voc.m_L; + this->m_scoring = voc.m_scoring; + this->m_weighting = voc.m_weighting; + + this->createScoringObject(); + + this->m_nodes.clear(); + this->m_words.clear(); + + this->m_nodes = voc.m_nodes; + this->createWords(); + + return *this; +} + + + +void Vocabulary::create( + const std::vector< cv::Mat > &training_features) +{ + std::vector > vtf(training_features.size()); + for(size_t i=0;i > &training_features) +{ + m_nodes.clear(); + m_words.clear(); + + // expected_nodes = Sum_{i=0..L} ( k^i ) + int expected_nodes = + (int)((pow((double)m_k, (double)m_L + 1) - 1)/(m_k - 1)); + + m_nodes.reserve(expected_nodes); // avoid allocations when creating the tree + + + std::vector features; + getFeatures(training_features, features); + + + // create root + m_nodes.push_back(Node(0)); // root + + // create the tree + HKmeansStep(0, features, 1); + + // create the words + createWords(); + + // and set the weight of each node of the tree + setNodeWeights(training_features); + +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::create( + const std::vector > &training_features, + int k, int L) +{ + m_k = k; + m_L = L; + + create(training_features); +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::create( + const std::vector > &training_features, + int k, int L, WeightingType weighting, ScoringType scoring) +{ + m_k = k; + m_L = L; + m_weighting = weighting; + m_scoring = scoring; + createScoringObject(); + + create(training_features); +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::getFeatures( + const std::vector > &training_features, + std::vector &features) const +{ + features.resize(0); + for(size_t i=0;i &descriptors, int current_level) +{ + + if(descriptors.empty()) return; + + // features associated to each cluster + std::vector clusters; + std::vector > groups; // groups[i] = [j1, j2, ...] + // j1, j2, ... indices of descriptors associated to cluster i + + clusters.reserve(m_k); + groups.reserve(m_k); + + + if((int)descriptors.size() <= m_k) + { + // trivial case: one cluster per feature + groups.resize(descriptors.size()); + + for(unsigned int i = 0; i < descriptors.size(); i++) + { + groups[i].push_back(i); + clusters.push_back(descriptors[i]); + } + } + else + { + // select clusters and groups with kmeans + + bool first_time = true; + bool goon = true; + + // to check if clusters move after iterations + std::vector last_association, current_association; + + while(goon) + { + // 1. Calculate clusters + + if(first_time) + { + // random sample + initiateClusters(descriptors, clusters); + } + else + { + // calculate cluster centres + + for(unsigned int c = 0; c < clusters.size(); ++c) + { + std::vector cluster_descriptors; + cluster_descriptors.reserve(groups[c].size()); + std::vector::const_iterator vit; + for(vit = groups[c].begin(); vit != groups[c].end(); ++vit) + { + cluster_descriptors.push_back(descriptors[*vit]); + } + + DescManip::meanValue(cluster_descriptors, clusters[c]); + } + + } // if(!first_time) + + // 2. Associate features with clusters + + // calculate distances to cluster centers + groups.clear(); + groups.resize(clusters.size(), std::vector()); + current_association.resize(descriptors.size()); + + //assoc.clear(); + + //unsigned int d = 0; + for(auto fit = descriptors.begin(); fit != descriptors.end(); ++fit)//, ++d) + { + double best_dist = DescManip::distance((*fit), clusters[0]); + unsigned int icluster = 0; + + for(unsigned int c = 1; c < clusters.size(); ++c) + { + double dist = DescManip::distance((*fit), clusters[c]); + if(dist < best_dist) + { + best_dist = dist; + icluster = c; + } + } + + //assoc.ref(icluster, d) = 1; + + groups[icluster].push_back(fit - descriptors.begin()); + current_association[ fit - descriptors.begin() ] = icluster; + } + + // kmeans++ ensures all the clusters has any feature associated with them + + // 3. check convergence + if(first_time) + { + first_time = false; + } + else + { + //goon = !eqUChar(last_assoc, assoc); + + goon = false; + for(unsigned int i = 0; i < current_association.size(); i++) + { + if(current_association[i] != last_association[i]){ + goon = true; + break; + } + } + } + + if(goon) + { + // copy last feature-cluster association + last_association = current_association; + //last_assoc = assoc.clone(); + } + + } // while(goon) + + } // if must run kmeans + + // create nodes + for(unsigned int i = 0; i < clusters.size(); ++i) + { + NodeId id = m_nodes.size(); + m_nodes.push_back(Node(id)); + m_nodes.back().descriptor = clusters[i]; + m_nodes.back().parent = parent_id; + m_nodes[parent_id].children.push_back(id); + } + + // go on with the next level + if(current_level < m_L) + { + // iterate again with the resulting clusters + const std::vector &children_ids = m_nodes[parent_id].children; + for(unsigned int i = 0; i < clusters.size(); ++i) + { + NodeId id = children_ids[i]; + + std::vector child_features; + child_features.reserve(groups[i].size()); + + std::vector::const_iterator vit; + for(vit = groups[i].begin(); vit != groups[i].end(); ++vit) + { + child_features.push_back(descriptors[*vit]); + } + + if(child_features.size() > 1) + { + HKmeansStep(id, child_features, current_level + 1); + } + } + } +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::initiateClusters + (const std::vector &descriptors, + std::vector &clusters) const +{ + initiateClustersKMpp(descriptors, clusters); +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::initiateClustersKMpp( + const std::vector &pfeatures, + std::vector &clusters) const +{ + // Implements kmeans++ seeding algorithm + // Algorithm: + // 1. Choose one center uniformly at random from among the data points. + // 2. For each data point x, compute D(x), the distance between x and the nearest + // center that has already been chosen. + // 3. Add one new data point as a center. Each point x is chosen with probability + // proportional to D(x)^2. + // 4. Repeat Steps 2 and 3 until k centers have been chosen. + // 5. Now that the initial centers have been chosen, proceed using standard k-means + // clustering. + + +// DUtils::Random::SeedRandOnce(); + + clusters.resize(0); + clusters.reserve(m_k); + std::vector min_dists(pfeatures.size(), std::numeric_limits::max()); + + // 1. + + int ifeature = rand()% pfeatures.size();//DUtils::Random::RandomInt(0, pfeatures.size()-1); + + // create first cluster + clusters.push_back(pfeatures[ifeature]); + + // compute the initial distances + std::vector::iterator dit; + dit = min_dists.begin(); + for(auto fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit) + { + *dit = DescManip::distance((*fit), clusters.back()); + } + + while((int)clusters.size() < m_k) + { + // 2. + dit = min_dists.begin(); + for(auto fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit) + { + if(*dit > 0) + { + double dist = DescManip::distance((*fit), clusters.back()); + if(dist < *dit) *dit = dist; + } + } + + // 3. + double dist_sum = std::accumulate(min_dists.begin(), min_dists.end(), 0.0); + + if(dist_sum > 0) + { + double cut_d; + do + { + + cut_d = (double(rand())/ double(RAND_MAX))* dist_sum; + } while(cut_d == 0.0); + + double d_up_now = 0; + for(dit = min_dists.begin(); dit != min_dists.end(); ++dit) + { + d_up_now += *dit; + if(d_up_now >= cut_d) break; + } + + if(dit == min_dists.end()) + ifeature = pfeatures.size()-1; + else + ifeature = dit - min_dists.begin(); + + + clusters.push_back(pfeatures[ifeature]); + } // if dist_sum > 0 + else + break; + + } // while(used_clusters < m_k) + +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::createWords() +{ + m_words.resize(0); + + if(!m_nodes.empty()) + { + m_words.reserve( (int)pow((double)m_k, (double)m_L) ); + + + auto nit = m_nodes.begin(); // ignore root + for(++nit; nit != m_nodes.end(); ++nit) + { + if(nit->isLeaf()) + { + nit->word_id = m_words.size(); + m_words.push_back( &(*nit) ); + } + } + } +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::setNodeWeights + (const std::vector > &training_features) +{ + const unsigned int NWords = m_words.size(); + const unsigned int NDocs = training_features.size(); + + if(m_weighting == TF || m_weighting == BINARY) + { + // idf part must be 1 always + for(unsigned int i = 0; i < NWords; i++) + m_words[i]->weight = 1; + } + else if(m_weighting == IDF || m_weighting == TF_IDF) + { + // IDF and TF-IDF: we calculte the idf path now + + // Note: this actually calculates the idf part of the tf-idf score. + // The complete tf-idf score is calculated in ::transform + + std::vector Ni(NWords, 0); + std::vector counted(NWords, false); + + + for(auto mit = training_features.begin(); mit != training_features.end(); ++mit) + { + fill(counted.begin(), counted.end(), false); + + for(auto fit = mit->begin(); fit < mit->end(); ++fit) + { + WordId word_id; + transform(*fit, word_id); + + if(!counted[word_id]) + { + Ni[word_id]++; + counted[word_id] = true; + } + } + } + + // set ln(N/Ni) + for(unsigned int i = 0; i < NWords; i++) + { + if(Ni[i] > 0) + { + m_words[i]->weight = log((double)NDocs / (double)Ni[i]); + }// else // This cannot occur if using kmeans++ + } + + } + +} + +// -------------------------------------------------------------------------- + + + + + + +// -------------------------------------------------------------------------- + + +float Vocabulary::getEffectiveLevels() const +{ + long sum = 0; + for(auto wit = m_words.begin(); wit != m_words.end(); ++wit) + { + const Node *p = *wit; + + for(; p->id != 0; sum++) p = &m_nodes[p->parent]; + } + + return (float)((double)sum / (double)m_words.size()); +} + +// -------------------------------------------------------------------------- + + +cv::Mat Vocabulary::getWord(WordId wid) const +{ + return m_words[wid]->descriptor; +} + +// -------------------------------------------------------------------------- + + +WordValue Vocabulary::getWordWeight(WordId wid) const +{ + return m_words[wid]->weight; +} + +// -------------------------------------------------------------------------- + + +WordId Vocabulary::transform + (const cv::Mat& feature) const +{ + if(empty()) + { + return 0; + } + + WordId wid; + transform(feature, wid); + return wid; +} + +// -------------------------------------------------------------------------- + +void Vocabulary::transform( + const cv::Mat& features, BowVector &v) const +{ + // std::vector vf(features.rows); + // for(int r=0;rmustNormalize(norm); + + + if(m_weighting == TF || m_weighting == TF_IDF) + { + for(int r=0;r 0) v.addWeight(id, w); + } + + if(!v.empty() && !must) + { + // unnecessary when normalizing + const double nd = v.size(); + for(BowVector::iterator vit = v.begin(); vit != v.end(); vit++) + vit->second /= nd; + } + + } + else // IDF || BINARY + { + for(int r=0;r 0) v.addIfNotExist(id, w); + + } // if add_features + } // if m_weighting == ... + + if(must) v.normalize(norm); + +} + + + +void Vocabulary::transform( + const std::vector& features, BowVector &v) const +{ + v.clear(); + + if(empty()) + { + return; + } + + // normalize + LNorm norm; + bool must = m_scoring_object->mustNormalize(norm); + + + if(m_weighting == TF || m_weighting == TF_IDF) + { + for(auto fit = features.begin(); fit < features.end(); ++fit) + { + WordId id; + WordValue w; + // w is the idf value if TF_IDF, 1 if TF + + transform(*fit, id, w); + + // not stopped + if(w > 0) v.addWeight(id, w); + } + + if(!v.empty() && !must) + { + // unnecessary when normalizing + const double nd = v.size(); + for(BowVector::iterator vit = v.begin(); vit != v.end(); vit++) + vit->second /= nd; + } + + } + else // IDF || BINARY + { + for(auto fit = features.begin(); fit < features.end(); ++fit) + { + WordId id; + WordValue w; + // w is idf if IDF, or 1 if BINARY + + transform(*fit, id, w); + + // not stopped + if(w > 0) v.addIfNotExist(id, w); + + } // if add_features + } // if m_weighting == ... + + if(must) v.normalize(norm); +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::transform( + const std::vector& features, + BowVector &v, FeatureVector &fv, int levelsup) const +{ + v.clear(); + fv.clear(); + + if(empty()) // safe for subclasses + { + return; + } + + // normalize + LNorm norm; + bool must = m_scoring_object->mustNormalize(norm); + + + if(m_weighting == TF || m_weighting == TF_IDF) + { + unsigned int i_feature = 0; + for(auto fit = features.begin(); fit < features.end(); ++fit, ++i_feature) + { + WordId id; + NodeId nid; + WordValue w; + // w is the idf value if TF_IDF, 1 if TF + + transform(*fit, id, w, &nid, levelsup); + + if(w > 0) // not stopped + { + v.addWeight(id, w); + fv.addFeature(nid, i_feature); + } + } + + if(!v.empty() && !must) + { + // unnecessary when normalizing + const double nd = v.size(); + for(BowVector::iterator vit = v.begin(); vit != v.end(); vit++) + vit->second /= nd; + } + + } + else // IDF || BINARY + { + unsigned int i_feature = 0; + for(auto fit = features.begin(); fit < features.end(); ++fit, ++i_feature) + { + WordId id; + NodeId nid; + WordValue w; + // w is idf if IDF, or 1 if BINARY + + transform(*fit, id, w, &nid, levelsup); + + if(w > 0) // not stopped + { + v.addIfNotExist(id, w); + fv.addFeature(nid, i_feature); + } + } + } // if m_weighting == ... + + if(must) v.normalize(norm); +} + +// -------------------------------------------------------------------------- + + +// -------------------------------------------------------------------------- + + +void Vocabulary::transform + (const cv::Mat &feature, WordId &id) const +{ + WordValue weight; + transform(feature, id, weight); +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::transform(const cv::Mat &feature, + WordId &word_id, WordValue &weight, NodeId *nid, int levelsup) const +{ + // propagate the feature down the tree + + + // level at which the node must be stored in nid, if given + const int nid_level = m_L - levelsup; + if(nid_level <= 0 && nid != NULL) *nid = 0; // root + + NodeId final_id = 0; // root + int current_level = 0; + + do + { + ++current_level; + auto const &nodes = m_nodes[final_id].children; + double best_d = std::numeric_limits::max(); +// DescManip::distance(feature, m_nodes[final_id].descriptor); + + for(const auto &id:nodes) + { + double d = DescManip::distance(feature, m_nodes[id].descriptor); + if(d < best_d) + { + best_d = d; + final_id = id; + } + } + + if(nid != NULL && current_level == nid_level) + *nid = final_id; + + } while( !m_nodes[final_id].isLeaf() ); + + // turn node id into word id + word_id = m_nodes[final_id].word_id; + weight = m_nodes[final_id].weight; +} + + + +void Vocabulary::transform(const cv::Mat &feature, + WordId &word_id, WordValue &weight ) const +{ + // propagate the feature down the tree + + + // level at which the node must be stored in nid, if given + + NodeId final_id = 0; // root +//maximum speed by computing here distance and avoid calling to DescManip::distance + + //binary descriptor + // int ntimes=0; + if (feature.type()==CV_8U){ + do + { + auto const &nodes = m_nodes[final_id].children; + uint64_t best_d = std::numeric_limits::max(); + int idx=0,bestidx=0; + for(const auto &id:nodes) + { + //compute distance + // std::cout<::max(); + int idx = 0, bestidx = 0; + for (const auto &id : nodes) + { + //compute distance + // std::cout<(); +// for(int i=0;i(0); +// const float *b_ptr=b.ptr(0); +// for(int i = 0; i < a.cols; i ++) +// sqd += (a_ptr[i ] - b_ptr[i ])*(a_ptr[i ] - b_ptr[i ]); +// return sqd; +// } + + +// do +// { +// auto const &nodes = m_nodes[final_id].children; +// double best_d = std::numeric_limits::max(); + +// for(const auto &id:nodes) +// { +// double d = DescManip::distance(feature, m_nodes[id].descriptor); +// if(d < best_d) +// { +// best_d = d; +// final_id = id; +// } +// } +// } while( !m_nodes[final_id].isLeaf() ); + + // turn node id into word id + word_id = m_nodes[final_id].word_id; + weight = m_nodes[final_id].weight; +} +// -------------------------------------------------------------------------- + +NodeId Vocabulary::getParentNode + (WordId wid, int levelsup) const +{ + NodeId ret = m_words[wid]->id; // node id + while(levelsup > 0 && ret != 0) // ret == 0 --> root + { + --levelsup; + ret = m_nodes[ret].parent; + } + return ret; +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::getWordsFromNode + (NodeId nid, std::vector &words) const +{ + words.clear(); + + if(m_nodes[nid].isLeaf()) + { + words.push_back(m_nodes[nid].word_id); + } + else + { + words.reserve(m_k); // ^1, ^2, ... + + std::vector parents; + parents.push_back(nid); + + while(!parents.empty()) + { + NodeId parentid = parents.back(); + parents.pop_back(); + + const std::vector &child_ids = m_nodes[parentid].children; + std::vector::const_iterator cit; + + for(cit = child_ids.begin(); cit != child_ids.end(); ++cit) + { + const Node &child_node = m_nodes[*cit]; + + if(child_node.isLeaf()) + words.push_back(child_node.word_id); + else + parents.push_back(*cit); + + } // for each child + } // while !parents.empty + } +} + +// -------------------------------------------------------------------------- + + +int Vocabulary::stopWords(double minWeight) +{ + int c = 0; + for(auto wit = m_words.begin(); wit != m_words.end(); ++wit) + { + if((*wit)->weight < minWeight) + { + ++c; + (*wit)->weight = 0; + } + } + return c; +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::save(const std::string &filename, bool binary_compressed) const +{ + + if ( filename.find(".yml")==std::string::npos){ + std::ofstream file_out(filename,std::ios::binary); + if (!file_out) throw std::runtime_error("Vocabulary::saveBinary Could not open file :"+filename+" for writing"); + toStream(file_out,binary_compressed); + } + else{ + cv::FileStorage fs(filename.c_str(), cv::FileStorage::WRITE); + if(!fs.isOpened()) throw std::string("Could not open file ") + filename; + save(fs); + } +} + +// -------------------------------------------------------------------------- + + +void Vocabulary::load(const std::string &filename) +{ + //check first if it is a binary file + std::ifstream ifile(filename,std::ios::binary); + if (!ifile) throw std::runtime_error("Vocabulary::load Could not open file :"+filename+" for reading"); + if(!load(ifile)) { + if ( filename.find(".txt")!=std::string::npos) { + load_fromtxt(filename); + } else { + cv::FileStorage fs(filename.c_str(), cv::FileStorage::READ); + if(!fs.isOpened()) throw std::string("Could not open file ") + filename; + load(fs); + } + } +} + + +bool Vocabulary::load(std::istream &ifile) +{ + uint64_t sig;//magic number describing the file + ifile.read((char*)&sig,sizeof(sig)); + if (sig != 88877711233) // Check if it is a binary file. + return false; + + ifile.seekg(0,std::ios::beg); + fromStream(ifile); + return true; +} + + +void Vocabulary::save(cv::FileStorage &f, + const std::string &name) const +{ + + f << name << "{"; + + f << "k" << m_k; + f << "L" << m_L; + f << "scoringType" << m_scoring; + f << "weightingType" << m_weighting; + + // tree + f << "nodes" << "["; + std::vector parents, children; + std::vector::const_iterator pit; + + parents.push_back(0); // root + + while(!parents.empty()) + { + NodeId pid = parents.back(); + parents.pop_back(); + + const Node& parent = m_nodes[pid]; + children = parent.children; + + for(pit = children.begin(); pit != children.end(); pit++) + { + const Node& child = m_nodes[*pit]; + std::cout<id; + f << "}"; + } + + f << "]"; // words + + f << "}"; + +} + +void Vocabulary::toStream( std::ostream &out_str, bool compressed) const throw(std::exception){ + + uint64_t sig=88877711233;//magic number describing the file + out_str.write((char*)&sig,sizeof(sig)); + out_str.write((char*)&compressed,sizeof(compressed)); + uint32_t nnodes=m_nodes.size(); + out_str.write((char*)&nnodes,sizeof(nnodes)); + if (nnodes==0)return; + //save everything to a stream + std::stringstream aux_stream; + aux_stream.write((char*)&m_k,sizeof(m_k)); + aux_stream.write((char*)&m_L,sizeof(m_L)); + aux_stream.write((char*)&m_scoring,sizeof(m_scoring)); + aux_stream.write((char*)&m_weighting,sizeof(m_weighting)); + //nodes + std::vector parents={0};// root + + + while(!parents.empty()) + { + NodeId pid = parents.back(); + parents.pop_back(); + + const Node& parent = m_nodes[pid]; + + for(auto pit :parent.children) + { + + const Node& child = m_nodes[pit]; + aux_stream.write((char*)&child.id,sizeof(child.id)); + aux_stream.write((char*)&pid,sizeof(pid)); + aux_stream.write((char*)&child.weight,sizeof(child.weight)); + DescManip::toStream(child.descriptor,aux_stream); + // add to parent list + if(!child.isLeaf()) parents.push_back(pit); + } + } + //words + //save size + uint32_t m_words_size=m_words.size(); + aux_stream.write((char*)&m_words_size,sizeof(m_words_size)); + for(auto wit = m_words.begin(); wit != m_words.end(); wit++) + { + WordId id = wit - m_words.begin(); + aux_stream.write((char*)&id,sizeof(id)); + aux_stream.write((char*)&(*wit)->id,sizeof((*wit)->id)); + } + + + //now, decide if compress or not + if (compressed){ + qlz_state_compress state_compress; + memset(&state_compress, 0, sizeof(qlz_state_compress)); + //Create output buffer + int chunkSize=10000; + std::vector compressed( chunkSize+size_t(400), 0); + std::vector input( chunkSize, 0); + int64_t total_size= static_cast(aux_stream.tellp()); + uint64_t total_compress_size=0; + //calculate how many chunks will be written + uint32_t nChunks= total_size / chunkSize; + if ( total_size%chunkSize!=0) nChunks++; + out_str.write((char*)&nChunks, sizeof(nChunks)); + //start compressing the chunks + while (total_size != 0){ + int readSize=chunkSize; + if (total_size>m_k>>m_L>>n1>>n2; + } + if(m_k<0 || m_k>20 || m_L<1 || m_L>10 || n1<0 || n1>5 || n2<0 || n2>3) + throw std::runtime_error( "Vocabulary loading failure: This is not a correct text file!" ); + + m_scoring = (ScoringType)n1; + m_weighting = (WeightingType)n2; + createScoringObject(); + // nodes + int expected_nodes = + (int)((pow((double)m_k, (double)m_L + 1) - 1)/(m_k - 1)); + m_nodes.reserve(expected_nodes); + + m_words.reserve(pow((double)m_k, (double)m_L + 1)); + + m_nodes.resize(1); + m_nodes[0].id = 0; + + int counter=0; + while(!ifile.eof()){ + std::string snode; + getline(ifile,snode); + if (counter++%100==0)std::cerr<<"."; + // std::cout<> pid; + m_nodes[nid].parent = pid; + m_nodes[pid].children.push_back(nid); + + int nIsLeaf; + ssnode >> nIsLeaf; + + //read until the end and add to data + std::vector data;data.reserve(100); + float d; + while( ssnode>>d) data.push_back(d); + //the weight is the last + m_nodes[nid].weight=data.back(); + data.pop_back();//remove + //the rest, to the descriptor + m_nodes[nid].descriptor.create(1,data.size(),CV_8UC1); + auto ptr=m_nodes[nid].descriptor.ptr(0); + for(auto d:data) *ptr++=d; + + + if(nIsLeaf>0) + { + int wid = m_words.size(); + m_words.resize(wid+1); + + m_nodes[nid].word_id = wid; + m_words[wid] = &m_nodes[nid]; + } + else + { + m_nodes[nid].children.reserve(m_k); + } + } +} +void Vocabulary::fromStream( std::istream &str ) throw(std::exception){ + + + m_words.clear(); + m_nodes.clear(); + uint64_t sig=0;//magic number describing the file + str.read((char*)&sig,sizeof(sig)); + if (sig!=88877711233) throw std::runtime_error("Vocabulary::fromStream is not of appropriate type"); + bool compressed; + str.read((char*)&compressed,sizeof(compressed)); + uint32_t nnodes; + str.read((char*)&nnodes,sizeof(nnodes)); + if(nnodes==0)return; + std::stringstream decompressed_stream; + std::istream *_used_str=0; + if (compressed){ + qlz_state_decompress state_decompress; + memset(&state_decompress, 0, sizeof(qlz_state_decompress)); + int chunkSize=10000; + std::vector decompressed(chunkSize); + std::vector input(chunkSize+400); + //read how many chunks are there + uint32_t nChunks; + str.read((char*)&nChunks,sizeof(nChunks)); + for(int i=0;iread((char*)&m_k,sizeof(m_k)); + _used_str->read((char*)&m_L,sizeof(m_L)); + _used_str->read((char*)&m_scoring,sizeof(m_scoring)); + _used_str->read((char*)&m_weighting,sizeof(m_weighting)); + + createScoringObject(); + m_nodes.resize(nnodes ); + m_nodes[0].id = 0; + + + + for(size_t i = 1; i < m_nodes.size(); ++i) + { + NodeId nid; + _used_str->read((char*)&nid,sizeof(NodeId)); + Node& child = m_nodes[nid]; + child.id=nid; + _used_str->read((char*)&child.parent,sizeof(child.parent)); + _used_str->read((char*)&child.weight,sizeof(child.weight)); + DescManip::fromStream(child.descriptor,*_used_str); + m_nodes[child.parent].children.push_back(child.id); + } + // // words + uint32_t m_words_size; + _used_str->read((char*)&m_words_size,sizeof(m_words_size)); + m_words.resize(m_words_size); + for(unsigned int i = 0; i < m_words.size(); ++i) + { + WordId wid;NodeId nid; + _used_str->read((char*)&wid,sizeof(wid)); + _used_str->read((char*)&nid,sizeof(nid)); + m_nodes[nid].word_id = wid; + m_words[wid] = &m_nodes[nid]; + } +} +// -------------------------------------------------------------------------- + + + +void Vocabulary::load(const cv::FileStorage &fs, + const std::string &name) +{ + m_words.clear(); + m_nodes.clear(); + + cv::FileNode fvoc = fs[name]; + + m_k = (int)fvoc["k"]; + m_L = (int)fvoc["L"]; + m_scoring = (ScoringType)((int)fvoc["scoringType"]); + m_weighting = (WeightingType)((int)fvoc["weightingType"]); + + createScoringObject(); + + // nodes + cv::FileNode fn = fvoc["nodes"]; + + m_nodes.resize(fn.size() + 1); // +1 to include root + m_nodes[0].id = 0; + + for(unsigned int i = 0; i < fn.size(); ++i) + { + NodeId nid = (int)fn[i]["nodeId"]; + NodeId pid = (int)fn[i]["parentId"]; + WordValue weight = (WordValue)fn[i]["weight"]; + std::string d = (std::string)fn[i]["descriptor"]; + + m_nodes[nid].id = nid; + m_nodes[nid].parent = pid; + m_nodes[nid].weight = weight; + m_nodes[pid].children.push_back(nid); + + DescManip::fromString(m_nodes[nid].descriptor, d); + } + + // words + fn = fvoc["words"]; + + m_words.resize(fn.size()); + + for(unsigned int i = 0; i < fn.size(); ++i) + { + NodeId wid = (int)fn[i]["wordId"]; + NodeId nid = (int)fn[i]["nodeId"]; + + m_nodes[nid].word_id = wid; + m_words[wid] = &m_nodes[nid]; + } +} + +// -------------------------------------------------------------------------- + +/** + * Writes printable information of the vocabulary + * @param os stream to write to + * @param voc + */ + +std::ostream& operator<<(std::ostream &os, + const Vocabulary &voc) +{ + os << "Vocabulary: k = " << voc.getBranchingFactor() + << ", L = " << voc.getDepthLevels() + << ", Weighting = "; + + switch(voc.getWeightingType()) + { + case TF_IDF: os << "tf-idf"; break; + case TF: os << "tf"; break; + case IDF: os << "idf"; break; + case BINARY: os << "binary"; break; + } + + os << ", Scoring = "; + switch(voc.getScoringType()) + { + case L1_NORM: os << "L1-norm"; break; + case L2_NORM: os << "L2-norm"; break; + case CHI_SQUARE: os << "Chi square distance"; break; + case KL: os << "KL-divergence"; break; + case BHATTACHARYYA: os << "Bhattacharyya coefficient"; break; + case DOT_PRODUCT: os << "Dot product"; break; + } + + os << ", Number of words = " << voc.size(); + + return os; +} +/** + * @brief Vocabulary::clear + */ +void Vocabulary::clear(){ + delete m_scoring_object; + m_scoring_object=0; + m_nodes.clear(); + m_words.clear(); + +} +int Vocabulary::getDescritorSize()const +{ + if (m_words.size()==0)return -1; + else return m_words[0]->descriptor.cols; +} +int Vocabulary::getDescritorType()const{ + + if (m_words.size()==0)return -1; + else return m_words[0]->descriptor.type(); +} + + +void Vocabulary::saveToTextFile(const std::string &filename) const +{ + fstream f; + f.open(filename.c_str(),ios_base::out); + f << m_k << " " << m_L << " " << " " << m_scoring << " " << m_weighting << endl; + + for(size_t i=1; i + +#include +#include +#include +#include +#include +#include +#include +#include "exports.h" +#include "FeatureVector.h" +#include "BowVector.h" +#include "ScoringObject.h" +#include +using namespace std; +using namespace std; +using namespace std; +using namespace std; +namespace DBoW3 { +/// Vocabulary +class DBOW_API Vocabulary +{ +friend class FastSearch; +public: + + /** + * Initiates an empty vocabulary + * @param k branching factor + * @param L depth levels + * @param weighting weighting type + * @param scoring scoring type + */ + Vocabulary(int k = 10, int L = 5, + WeightingType weighting = TF_IDF, ScoringType scoring = L1_NORM); + + /** + * Creates the vocabulary by loading a file + * @param filename + */ + Vocabulary(const std::string &filename); + + /** + * Creates the vocabulary by loading a file + * @param filename + */ + Vocabulary(const char *filename); + + /** + * Creates the vocabulary by loading an input stream + * @param filename + */ + Vocabulary(std::istream &filename); + + /** + * Copy constructor + * @param voc + */ + Vocabulary(const Vocabulary &voc); + + /** + * Destructor + */ + virtual ~Vocabulary(); + + /** + * Assigns the given vocabulary to this by copying its data and removing + * all the data contained by this vocabulary before + * @param voc + * @return reference to this vocabulary + */ + Vocabulary& operator=( + const Vocabulary &voc); + + /** + * Creates a vocabulary from the training features with the already + * defined parameters + * @param training_features + */ + virtual void create + (const std::vector > &training_features); + /** + * Creates a vocabulary from the training features with the already + * defined parameters + * @param training_features. Each row of a matrix is a feature + */ + virtual void create + (const std::vector &training_features); + + /** + * Creates a vocabulary from the training features, setting the branching + * factor and the depth levels of the tree + * @param training_features + * @param k branching factor + * @param L depth levels + */ + virtual void create + (const std::vector > &training_features, + int k, int L); + + /** + * Creates a vocabulary from the training features, setting the branching + * factor nad the depth levels of the tree, and the weighting and scoring + * schemes + */ + virtual void create + (const std::vector > &training_features, + int k, int L, WeightingType weighting, ScoringType scoring); + + /** + * Returns the number of words in the vocabulary + * @return number of words + */ + virtual inline unsigned int size() const{ return (unsigned int)m_words.size();} + + + /** + * Returns whether the vocabulary is empty (i.e. it has not been trained) + * @return true iff the vocabulary is empty + */ + virtual inline bool empty() const{ return m_words.empty();} + + /** Clears the vocabulary object + */ + void clear(); + /** + * Transforms a set of descriptores into a bow vector + * @param features + * @param v (out) bow vector of weighted words + */ + virtual void transform(const std::vector& features, BowVector &v) + const; + /** + * Transforms a set of descriptores into a bow vector + * @param features, one per row + * @param v (out) bow vector of weighted words + */ + virtual void transform(const cv::Mat & features, BowVector &v) + const; + /** + * Transform a set of descriptors into a bow vector and a feature vector + * @param features + * @param v (out) bow vector + * @param fv (out) feature vector of nodes and feature indexes + * @param levelsup levels to go up the vocabulary tree to get the node index + */ + virtual void transform(const std::vector& features, + BowVector &v, FeatureVector &fv, int levelsup) const; + + /** + * Transforms a single feature into a word (without weight) + * @param feature + * @return word id + */ + virtual WordId transform(const cv::Mat& feature) const; + + /** + * Returns the score of two vectors + * @param a vector + * @param b vector + * @return score between vectors + * @note the vectors must be already sorted and normalized if necessary + */ + double score(const BowVector &a, const BowVector &b) const{ return m_scoring_object->score(a, b);} + + /** + * Returns the id of the node that is "levelsup" levels from the word given + * @param wid word id + * @param levelsup 0..L + * @return node id. if levelsup is 0, returns the node id associated to the + * word id + */ + virtual NodeId getParentNode(WordId wid, int levelsup) const; + + /** + * Returns the ids of all the words that are under the given node id, + * by traversing any of the branches that goes down from the node + * @param nid starting node id + * @param words ids of words + */ + void getWordsFromNode(NodeId nid, std::vector &words) const; + + /** + * Returns the branching factor of the tree (k) + * @return k + */ + inline int getBranchingFactor() const { return m_k; } + + /** + * Returns the depth levels of the tree (L) + * @return L + */ + inline int getDepthLevels() const { return m_L; } + + /** + * Returns the real depth levels of the tree on average + * @return average of depth levels of leaves + */ + float getEffectiveLevels() const; + + /** + * Returns the descriptor of a word + * @param wid word id + * @return descriptor + */ + virtual inline cv::Mat getWord(WordId wid) const; + + /** + * Returns the weight of a word + * @param wid word id + * @return weight + */ + virtual inline WordValue getWordWeight(WordId wid) const; + + /** + * Returns the weighting method + * @return weighting method + */ + inline WeightingType getWeightingType() const { return m_weighting; } + + /** + * Returns the scoring method + * @return scoring method + */ + inline ScoringType getScoringType() const { return m_scoring; } + + /** + * Changes the weighting method + * @param type new weighting type + */ + inline void setWeightingType(WeightingType type); + + /** + * Changes the scoring method + * @param type new scoring type + */ + void setScoringType(ScoringType type); + + /** + * Saves the vocabulary into a file. If filename extension contains .yml, opencv YALM format is used. Otherwise, binary format is employed + * @param filename + */ + void save(const std::string &filename, bool binary_compressed=true) const; + + /** + * Loads the vocabulary from a file created with save + * @param filename. + */ + void load(const std::string &filename); + + /** + * Loads the vocabulary from an input stream created with save + * @param stream. + */ + bool load(std::istream &stream); + + /** + * Saves the vocabulary to a file storage structure + * @param fn node in file storage + */ + virtual void save(cv::FileStorage &fs, + const std::string &name = "vocabulary") const; + + /** + * Loads the vocabulary from a file storage node + * @param fn first node + * @param subname name of the child node of fn where the tree is stored. + * If not given, the fn node is used instead + */ + virtual void load(const cv::FileStorage &fs, + const std::string &name = "vocabulary"); + + /** + * Stops those words whose weight is below minWeight. + * Words are stopped by setting their weight to 0. There are not returned + * later when transforming image features into vectors. + * Note that when using IDF or TF_IDF, the weight is the idf part, which + * is equivalent to -log(f), where f is the frequency of the word + * (f = Ni/N, Ni: number of training images where the word is present, + * N: number of training images). + * Note that the old weight is forgotten, and subsequent calls to this + * function with a lower minWeight have no effect. + * @return number of words stopped now + */ + virtual int stopWords(double minWeight); + + + /** Returns the size of the descriptor employed. If the Vocabulary is empty, returns -1 + */ + int getDescritorSize()const; + /** Returns the type of the descriptor employed normally(8U_C1, 32F_C1) + */ + int getDescritorType()const; + //io to-from a stream + void toStream( std::ostream &str, bool compressed=true) const throw(std::exception); + void fromStream( std::istream &str ) throw(std::exception); + + void saveToTextFile(const std::string &filename) const; + + protected: + + /// reference to descriptor + typedef const cv::Mat pDescriptor; + + /// Tree node + struct Node + { + /// Node id + NodeId id; + /// Weight if the node is a word + WordValue weight; + /// Children + std::vector children; + /// Parent node (undefined in case of root) + NodeId parent; + /// Node descriptor + cv::Mat descriptor; + + /// Word id if the node is a word + WordId word_id; + + /** + * Empty constructor + */ + Node(): id(0), weight(0), parent(0), word_id(0){} + + /** + * Constructor + * @param _id node id + */ + Node(NodeId _id): id(_id), weight(0), parent(0), word_id(0){} + + /** + * Returns whether the node is a leaf node + * @return true iff the node is a leaf + */ + inline bool isLeaf() const { return children.empty(); } + }; + +protected: + + /** + * Creates an instance of the scoring object accoring to m_scoring + */ + void createScoringObject(); + + /** + * Returns a set of pointers to descriptores + * @param training_features all the features + * @param features (out) pointers to the training features + */ + void getFeatures(const std::vector > &training_features, + std::vector &features) const; + + /** + * Returns the word id associated to a feature + * @param feature + * @param id (out) word id + * @param weight (out) word weight + * @param nid (out) if given, id of the node "levelsup" levels up + * @param levelsup + */ + virtual void transform(const cv::Mat &feature, + WordId &id, WordValue &weight, NodeId* nid , int levelsup = 0) const; + /** + * Returns the word id associated to a feature + * @param feature + * @param id (out) word id + * @param weight (out) word weight + * @param nid (out) if given, id of the node "levelsup" levels up + * @param levelsup + */ + virtual void transform(const cv::Mat &feature, + WordId &id, WordValue &weight ) const; + + /** + * Returns the word id associated to a feature + * @param feature + * @param id (out) word id + */ + virtual void transform(const cv::Mat &feature, WordId &id) const; + + /** + * Creates a level in the tree, under the parent, by running kmeans with + * a descriptor set, and recursively creates the subsequent levels too + * @param parent_id id of parent node + * @param descriptors descriptors to run the kmeans on + * @param current_level current level in the tree + */ + void HKmeansStep(NodeId parent_id, const std::vector &descriptors, + int current_level); + + /** + * Creates k clusters from the given descriptors with some seeding algorithm. + * @note In this class, kmeans++ is used, but this function should be + * overriden by inherited classes. + */ + virtual void initiateClusters(const std::vector &descriptors, + std::vector &clusters) const; + + /** + * Creates k clusters from the given descriptor sets by running the + * initial step of kmeans++ + * @param descriptors + * @param clusters resulting clusters + */ + void initiateClustersKMpp(const std::vector &descriptors, + std::vector &clusters) const; + + /** + * Create the words of the vocabulary once the tree has been built + */ + void createWords(); + + /** + * Sets the weights of the nodes of tree according to the given features. + * Before calling this function, the nodes and the words must be already + * created (by calling HKmeansStep and createWords) + * @param features + */ + void setNodeWeights(const std::vector > &features); + + + /** + * Writes printable information of the vocabulary + * @param os stream to write to + * @param voc + */ + DBOW_API friend std::ostream& operator<<(std::ostream &os, const Vocabulary &voc); + + /**Loads from ORBSLAM txt files + */ + void load_fromtxt(const std::string &filename)throw(std::runtime_error); + +protected: + + /// Branching factor + int m_k; + + /// Depth levels + int m_L; + + /// Weighting method + WeightingType m_weighting; + + /// Scoring method + ScoringType m_scoring; + + /// Object for computing scores + GeneralScoring* m_scoring_object; + + /// Tree nodes + std::vector m_nodes; + + /// Words of the vocabulary (tree leaves) + /// this condition holds: m_words[wid]->word_id == wid + std::vector m_words; +public: + //for debug (REMOVE) + inline Node* getNodeWord(uint32_t idx){return m_words[idx];} + +}; + + +} // namespace DBoW3 + +#endif diff --git a/Thirdparty/DBoW2/DBoW3/src/exports.h b/Thirdparty/DBoW2/DBoW3/src/exports.h new file mode 100644 index 0000000000..c32495384b --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/exports.h @@ -0,0 +1,51 @@ +/***************************** +Copyright 2014 Rafael Muñoz Salinas. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are +permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of + conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list + of conditions and the following disclaimer in the documentation and/or other materials + provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those of the +authors and should not be interpreted as representing official policies, either expressed +or implied, of Rafael Muñoz Salinas. +********************************/ + + + +#ifndef __DBOW_CORE_TYPES_H__ +#define __DBOW_CORE_TYPES_H__ + +#if !defined _CRT_SECURE_NO_DEPRECATE && _MSC_VER > 1300 +#define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */ +#endif + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined DBOW_DSO_EXPORTS + #define DBOW_API __declspec(dllexport) + #pragma warning ( disable : 4251 ) //disable warning to templates with dll linkage. + #pragma warning ( disable : 4290 ) //disable warning due to exception specifications. + #pragma warning ( disable : 4996 ) //disable warning regarding unsafe vsprintf. + #pragma warning ( disable : 4244 ) //disable warning convesions with lost of data. + +#else + #define DBOW_API +#endif + + +#define DBOW_VERSION "3.0.0" +#endif diff --git a/Thirdparty/DBoW2/DBoW3/src/quicklz.c b/Thirdparty/DBoW2/DBoW3/src/quicklz.c new file mode 100644 index 0000000000..3742129023 --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/quicklz.c @@ -0,0 +1,848 @@ +// Fast data compression library +// Copyright (C) 2006-2011 Lasse Mikkel Reinhold +// lar@quicklz.com +// +// QuickLZ can be used for free under the GPL 1, 2 or 3 license (where anything +// released into public must be open source) or under a commercial license if such +// has been acquired (see http://www.quicklz.com/order.html). The commercial license +// does not cover derived or ported versions created by third parties under GPL. + +// 1.5.0 final + +#include "quicklz.h" + +#if QLZ_VERSION_MAJOR != 1 || QLZ_VERSION_MINOR != 5 || QLZ_VERSION_REVISION != 0 + #error quicklz.c and quicklz.h have different versions +#endif + +#if (defined(__X86__) || defined(__i386__) || defined(i386) || defined(_M_IX86) || defined(__386__) || defined(__x86_64__) || defined(_M_X64)) + #define X86X64 +#endif + +#define MINOFFSET 2 +#define UNCONDITIONAL_MATCHLEN 6 +#define UNCOMPRESSED_END 4 +#define CWORD_LEN 4 + +#if QLZ_COMPRESSION_LEVEL == 1 && defined QLZ_PTR_64 && QLZ_STREAMING_BUFFER == 0 + #define OFFSET_BASE source + #define CAST (ui32)(size_t) +#else + #define OFFSET_BASE 0 + #define CAST +#endif + +int qlz_get_setting(int setting) +{ + switch (setting) + { + case 0: return QLZ_COMPRESSION_LEVEL; + case 1: return sizeof(qlz_state_compress); + case 2: return sizeof(qlz_state_decompress); + case 3: return QLZ_STREAMING_BUFFER; +#ifdef QLZ_MEMORY_SAFE + case 6: return 1; +#else + case 6: return 0; +#endif + case 7: return QLZ_VERSION_MAJOR; + case 8: return QLZ_VERSION_MINOR; + case 9: return QLZ_VERSION_REVISION; + } + return -1; +} + +#if QLZ_COMPRESSION_LEVEL == 1 +static int same(const unsigned char *src, size_t n) +{ + while(n > 0 && *(src + n) == *src) + n--; + return n == 0 ? 1 : 0; +} +#endif + +static void reset_table_compress(qlz_state_compress *state) +{ + int i; + for(i = 0; i < QLZ_HASH_VALUES; i++) + { +#if QLZ_COMPRESSION_LEVEL == 1 + state->hash[i].offset = 0; +#else + state->hash_counter[i] = 0; +#endif + } +} + +static void reset_table_decompress(qlz_state_decompress *state) +{ + int i; + (void)state; + (void)i; +#if QLZ_COMPRESSION_LEVEL == 2 + for(i = 0; i < QLZ_HASH_VALUES; i++) + { + state->hash_counter[i] = 0; + } +#endif +} + +static __inline ui32 hash_func(ui32 i) +{ +#if QLZ_COMPRESSION_LEVEL == 2 + return ((i >> 9) ^ (i >> 13) ^ i) & (QLZ_HASH_VALUES - 1); +#else + return ((i >> 12) ^ i) & (QLZ_HASH_VALUES - 1); +#endif +} + +static __inline ui32 fast_read(void const *src, ui32 bytes) +{ +#ifndef X86X64 + unsigned char *p = (unsigned char*)src; + switch (bytes) + { + case 4: + return(*p | *(p + 1) << 8 | *(p + 2) << 16 | *(p + 3) << 24); + case 3: + return(*p | *(p + 1) << 8 | *(p + 2) << 16); + case 2: + return(*p | *(p + 1) << 8); + case 1: + return(*p); + } + return 0; +#else + if (bytes >= 1 && bytes <= 4) + return *((ui32*)src); + else + return 0; +#endif +} + +static __inline ui32 hashat(const unsigned char *src) +{ + ui32 fetch, hash; + fetch = fast_read(src, 3); + hash = hash_func(fetch); + return hash; +} + +static __inline void fast_write(ui32 f, void *dst, size_t bytes) +{ +#ifndef X86X64 + unsigned char *p = (unsigned char*)dst; + + switch (bytes) + { + case 4: + *p = (unsigned char)f; + *(p + 1) = (unsigned char)(f >> 8); + *(p + 2) = (unsigned char)(f >> 16); + *(p + 3) = (unsigned char)(f >> 24); + return; + case 3: + *p = (unsigned char)f; + *(p + 1) = (unsigned char)(f >> 8); + *(p + 2) = (unsigned char)(f >> 16); + return; + case 2: + *p = (unsigned char)f; + *(p + 1) = (unsigned char)(f >> 8); + return; + case 1: + *p = (unsigned char)f; + return; + } +#else + switch (bytes) + { + case 4: + *((ui32*)dst) = f; + return; + case 3: + *((ui32*)dst) = f; + return; + case 2: + *((ui16 *)dst) = (ui16)f; + return; + case 1: + *((unsigned char*)dst) = (unsigned char)f; + return; + } +#endif +} + + +size_t qlz_size_decompressed(const char *source) +{ + ui32 n, r; + n = (((*source) & 2) == 2) ? 4 : 1; + r = fast_read(source + 1 + n, n); + r = r & (0xffffffff >> ((4 - n)*8)); + return r; +} + +size_t qlz_size_compressed(const char *source) +{ + ui32 n, r; + n = (((*source) & 2) == 2) ? 4 : 1; + r = fast_read(source + 1, n); + r = r & (0xffffffff >> ((4 - n)*8)); + return r; +} + +size_t qlz_size_header(const char *source) +{ + size_t n = 2*((((*source) & 2) == 2) ? 4 : 1) + 1; + return n; +} + + +static __inline void memcpy_up(unsigned char *dst, const unsigned char *src, ui32 n) +{ + // Caution if modifying memcpy_up! Overlap of dst and src must be special handled. +#ifndef X86X64 + unsigned char *end = dst + n; + while(dst < end) + { + *dst = *src; + dst++; + src++; + } +#else + ui32 f = 0; + do + { + *(ui32 *)(dst + f) = *(ui32 *)(src + f); + f += MINOFFSET + 1; + } + while (f < n); +#endif +} + +static __inline void update_hash(qlz_state_decompress *state, const unsigned char *s) +{ +#if QLZ_COMPRESSION_LEVEL == 1 + ui32 hash; + hash = hashat(s); + state->hash[hash].offset = s; + state->hash_counter[hash] = 1; +#elif QLZ_COMPRESSION_LEVEL == 2 + ui32 hash; + unsigned char c; + hash = hashat(s); + c = state->hash_counter[hash]; + state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = s; + c++; + state->hash_counter[hash] = c; +#endif + (void)state; + (void)s; +} + +#if QLZ_COMPRESSION_LEVEL <= 2 +static void update_hash_upto(qlz_state_decompress *state, unsigned char **lh, const unsigned char *max) +{ + while(*lh < max) + { + (*lh)++; + update_hash(state, *lh); + } +} +#endif + +static size_t qlz_compress_core(const unsigned char *source, unsigned char *destination, size_t size, qlz_state_compress *state) +{ + const unsigned char *last_byte = source + size - 1; + const unsigned char *src = source; + unsigned char *cword_ptr = destination; + unsigned char *dst = destination + CWORD_LEN; + ui32 cword_val = 1U << 31; + const unsigned char *last_matchstart = last_byte - UNCONDITIONAL_MATCHLEN - UNCOMPRESSED_END; + ui32 fetch = 0; + unsigned int lits = 0; + + (void) lits; + + if(src <= last_matchstart) + fetch = fast_read(src, 3); + + while(src <= last_matchstart) + { + if ((cword_val & 1) == 1) + { + // store uncompressed if compression ratio is too low + if (src > source + (size >> 1) && dst - destination > src - source - ((src - source) >> 5)) + return 0; + + fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN); + + cword_ptr = dst; + dst += CWORD_LEN; + cword_val = 1U << 31; + fetch = fast_read(src, 3); + } +#if QLZ_COMPRESSION_LEVEL == 1 + { + const unsigned char *o; + ui32 hash, cached; + + hash = hash_func(fetch); + cached = fetch ^ state->hash[hash].cache; + state->hash[hash].cache = fetch; + + o = state->hash[hash].offset + OFFSET_BASE; + state->hash[hash].offset = CAST(src - OFFSET_BASE); + +#ifdef X86X64 + if ((cached & 0xffffff) == 0 && o != OFFSET_BASE && (src - o > MINOFFSET || (src == o + 1 && lits >= 3 && src > source + 3 && same(src - 3, 6)))) + { + if(cached != 0) + { +#else + if (cached == 0 && o != OFFSET_BASE && (src - o > MINOFFSET || (src == o + 1 && lits >= 3 && src > source + 3 && same(src - 3, 6)))) + { + if (*(o + 3) != *(src + 3)) + { +#endif + hash <<= 4; + cword_val = (cword_val >> 1) | (1U << 31); + fast_write((3 - 2) | hash, dst, 2); + src += 3; + dst += 2; + } + else + { + const unsigned char *old_src = src; + size_t matchlen; + hash <<= 4; + + cword_val = (cword_val >> 1) | (1U << 31); + src += 4; + + if(*(o + (src - old_src)) == *src) + { + src++; + if(*(o + (src - old_src)) == *src) + { + size_t q = last_byte - UNCOMPRESSED_END - (src - 5) + 1; + size_t remaining = q > 255 ? 255 : q; + src++; + while(*(o + (src - old_src)) == *src && (size_t)(src - old_src) < remaining) + src++; + } + } + + matchlen = src - old_src; + if (matchlen < 18) + { + fast_write((ui32)(matchlen - 2) | hash, dst, 2); + dst += 2; + } + else + { + fast_write((ui32)(matchlen << 16) | hash, dst, 3); + dst += 3; + } + } + fetch = fast_read(src, 3); + lits = 0; + } + else + { + lits++; + *dst = *src; + src++; + dst++; + cword_val = (cword_val >> 1); +#ifdef X86X64 + fetch = fast_read(src, 3); +#else + fetch = (fetch >> 8 & 0xffff) | (*(src + 2) << 16); +#endif + } + } +#elif QLZ_COMPRESSION_LEVEL >= 2 + { + const unsigned char *o, *offset2; + ui32 hash, matchlen, k, m, best_k = 0; + unsigned char c; + size_t remaining = (last_byte - UNCOMPRESSED_END - src + 1) > 255 ? 255 : (last_byte - UNCOMPRESSED_END - src + 1); + (void)best_k; + + + //hash = hashat(src); + fetch = fast_read(src, 3); + hash = hash_func(fetch); + + c = state->hash_counter[hash]; + + offset2 = state->hash[hash].offset[0]; + if(offset2 < src - MINOFFSET && c > 0 && ((fast_read(offset2, 3) ^ fetch) & 0xffffff) == 0) + { + matchlen = 3; + if(*(offset2 + matchlen) == *(src + matchlen)) + { + matchlen = 4; + while(*(offset2 + matchlen) == *(src + matchlen) && matchlen < remaining) + matchlen++; + } + } + else + matchlen = 0; + for(k = 1; k < QLZ_POINTERS && c > k; k++) + { + o = state->hash[hash].offset[k]; +#if QLZ_COMPRESSION_LEVEL == 3 + if(((fast_read(o, 3) ^ fetch) & 0xffffff) == 0 && o < src - MINOFFSET) +#elif QLZ_COMPRESSION_LEVEL == 2 + if(*(src + matchlen) == *(o + matchlen) && ((fast_read(o, 3) ^ fetch) & 0xffffff) == 0 && o < src - MINOFFSET) +#endif + { + m = 3; + while(*(o + m) == *(src + m) && m < remaining) + m++; +#if QLZ_COMPRESSION_LEVEL == 3 + if ((m > matchlen) || (m == matchlen && o > offset2)) +#elif QLZ_COMPRESSION_LEVEL == 2 + if (m > matchlen) +#endif + { + offset2 = o; + matchlen = m; + best_k = k; + } + } + } + o = offset2; + state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src; + c++; + state->hash_counter[hash] = c; + +#if QLZ_COMPRESSION_LEVEL == 3 + if(matchlen > 2 && src - o < 131071) + { + ui32 u; + size_t offset = src - o; + + for(u = 1; u < matchlen; u++) + { + hash = hashat(src + u); + c = state->hash_counter[hash]++; + state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src + u; + } + + cword_val = (cword_val >> 1) | (1U << 31); + src += matchlen; + + if(matchlen == 3 && offset <= 63) + { + *dst = (unsigned char)(offset << 2); + dst++; + } + else if (matchlen == 3 && offset <= 16383) + { + ui32 f = (ui32)((offset << 2) | 1); + fast_write(f, dst, 2); + dst += 2; + } + else if (matchlen <= 18 && offset <= 1023) + { + ui32 f = ((matchlen - 3) << 2) | ((ui32)offset << 6) | 2; + fast_write(f, dst, 2); + dst += 2; + } + + else if(matchlen <= 33) + { + ui32 f = ((matchlen - 2) << 2) | ((ui32)offset << 7) | 3; + fast_write(f, dst, 3); + dst += 3; + } + else + { + ui32 f = ((matchlen - 3) << 7) | ((ui32)offset << 15) | 3; + fast_write(f, dst, 4); + dst += 4; + } + } + else + { + *dst = *src; + src++; + dst++; + cword_val = (cword_val >> 1); + } +#elif QLZ_COMPRESSION_LEVEL == 2 + + if(matchlen > 2) + { + cword_val = (cword_val >> 1) | (1U << 31); + src += matchlen; + + if (matchlen < 10) + { + ui32 f = best_k | ((matchlen - 2) << 2) | (hash << 5); + fast_write(f, dst, 2); + dst += 2; + } + else + { + ui32 f = best_k | (matchlen << 16) | (hash << 5); + fast_write(f, dst, 3); + dst += 3; + } + } + else + { + *dst = *src; + src++; + dst++; + cword_val = (cword_val >> 1); + } +#endif + } +#endif + } + while (src <= last_byte) + { + if ((cword_val & 1) == 1) + { + fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN); + cword_ptr = dst; + dst += CWORD_LEN; + cword_val = 1U << 31; + } +#if QLZ_COMPRESSION_LEVEL < 3 + if (src <= last_byte - 3) + { +#if QLZ_COMPRESSION_LEVEL == 1 + ui32 hash, fetch; + fetch = fast_read(src, 3); + hash = hash_func(fetch); + state->hash[hash].offset = CAST(src - OFFSET_BASE); + state->hash[hash].cache = fetch; +#elif QLZ_COMPRESSION_LEVEL == 2 + ui32 hash; + unsigned char c; + hash = hashat(src); + c = state->hash_counter[hash]; + state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src; + c++; + state->hash_counter[hash] = c; +#endif + } +#endif + *dst = *src; + src++; + dst++; + cword_val = (cword_val >> 1); + } + + while((cword_val & 1) != 1) + cword_val = (cword_val >> 1); + + fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN); + + // min. size must be 9 bytes so that the qlz_size functions can take 9 bytes as argument + return dst - destination < 9 ? 9 : dst - destination; +} + +static size_t qlz_decompress_core(const unsigned char *source, unsigned char *destination, size_t size, qlz_state_decompress *state, const unsigned char *history) +{ + const unsigned char *src = source + qlz_size_header((const char *)source); + unsigned char *dst = destination; + const unsigned char *last_destination_byte = destination + size - 1; + ui32 cword_val = 1; + const unsigned char *last_matchstart = last_destination_byte - UNCONDITIONAL_MATCHLEN - UNCOMPRESSED_END; + unsigned char *last_hashed = destination - 1; + const unsigned char *last_source_byte = source + qlz_size_compressed((const char *)source) - 1; + static const ui32 bitlut[16] = {4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0}; + + (void) last_source_byte; + (void) last_hashed; + (void) state; + (void) history; + + for(;;) + { + ui32 fetch; + + if (cword_val == 1) + { +#ifdef QLZ_MEMORY_SAFE + if(src + CWORD_LEN - 1 > last_source_byte) + return 0; +#endif + cword_val = fast_read(src, CWORD_LEN); + src += CWORD_LEN; + } + +#ifdef QLZ_MEMORY_SAFE + if(src + 4 - 1 > last_source_byte) + return 0; +#endif + + fetch = fast_read(src, 4); + + if ((cword_val & 1) == 1) + { + ui32 matchlen; + const unsigned char *offset2; + +#if QLZ_COMPRESSION_LEVEL == 1 + ui32 hash; + cword_val = cword_val >> 1; + hash = (fetch >> 4) & 0xfff; + offset2 = (const unsigned char *)(size_t)state->hash[hash].offset; + + if((fetch & 0xf) != 0) + { + matchlen = (fetch & 0xf) + 2; + src += 2; + } + else + { + matchlen = *(src + 2); + src += 3; + } + +#elif QLZ_COMPRESSION_LEVEL == 2 + ui32 hash; + unsigned char c; + cword_val = cword_val >> 1; + hash = (fetch >> 5) & 0x7ff; + c = (unsigned char)(fetch & 0x3); + offset2 = state->hash[hash].offset[c]; + + if((fetch & (28)) != 0) + { + matchlen = ((fetch >> 2) & 0x7) + 2; + src += 2; + } + else + { + matchlen = *(src + 2); + src += 3; + } + +#elif QLZ_COMPRESSION_LEVEL == 3 + ui32 offset; + cword_val = cword_val >> 1; + if ((fetch & 3) == 0) + { + offset = (fetch & 0xff) >> 2; + matchlen = 3; + src++; + } + else if ((fetch & 2) == 0) + { + offset = (fetch & 0xffff) >> 2; + matchlen = 3; + src += 2; + } + else if ((fetch & 1) == 0) + { + offset = (fetch & 0xffff) >> 6; + matchlen = ((fetch >> 2) & 15) + 3; + src += 2; + } + else if ((fetch & 127) != 3) + { + offset = (fetch >> 7) & 0x1ffff; + matchlen = ((fetch >> 2) & 0x1f) + 2; + src += 3; + } + else + { + offset = (fetch >> 15); + matchlen = ((fetch >> 7) & 255) + 3; + src += 4; + } + + offset2 = dst - offset; +#endif + +#ifdef QLZ_MEMORY_SAFE + if(offset2 < history || offset2 > dst - MINOFFSET - 1) + return 0; + + if(matchlen > (ui32)(last_destination_byte - dst - UNCOMPRESSED_END + 1)) + return 0; +#endif + + memcpy_up(dst, offset2, matchlen); + dst += matchlen; + +#if QLZ_COMPRESSION_LEVEL <= 2 + update_hash_upto(state, &last_hashed, dst - matchlen); + last_hashed = dst - 1; +#endif + } + else + { + if (dst < last_matchstart) + { + unsigned int n = bitlut[cword_val & 0xf]; +#ifdef X86X64 + *(ui32 *)dst = *(ui32 *)src; +#else + memcpy_up(dst, src, 4); +#endif + cword_val = cword_val >> n; + dst += n; + src += n; +#if QLZ_COMPRESSION_LEVEL <= 2 + update_hash_upto(state, &last_hashed, dst - 3); +#endif + } + else + { + while(dst <= last_destination_byte) + { + if (cword_val == 1) + { + src += CWORD_LEN; + cword_val = 1U << 31; + } +#ifdef QLZ_MEMORY_SAFE + if(src >= last_source_byte + 1) + return 0; +#endif + *dst = *src; + dst++; + src++; + cword_val = cword_val >> 1; + } + +#if QLZ_COMPRESSION_LEVEL <= 2 + update_hash_upto(state, &last_hashed, last_destination_byte - 3); // todo, use constant +#endif + return size; + } + + } + } +} + +size_t qlz_compress(const void *source, char *destination, size_t size, qlz_state_compress *state) +{ + size_t r; + ui32 compressed; + size_t base; + + if(size == 0 || size > 0xffffffff - 400) + return 0; + + if(size < 216) + base = 3; + else + base = 9; + +#if QLZ_STREAMING_BUFFER > 0 + if (state->stream_counter + size - 1 >= QLZ_STREAMING_BUFFER) +#endif + { + reset_table_compress(state); + r = base + qlz_compress_core((const unsigned char *)source, (unsigned char*)destination + base, size, state); +#if QLZ_STREAMING_BUFFER > 0 + reset_table_compress(state); +#endif + if(r == base) + { + memcpy(destination + base, source, size); + r = size + base; + compressed = 0; + } + else + { + compressed = 1; + } + state->stream_counter = 0; + } +#if QLZ_STREAMING_BUFFER > 0 + else + { + unsigned char *src = state->stream_buffer + state->stream_counter; + + memcpy(src, source, size); + r = base + qlz_compress_core(src, (unsigned char*)destination + base, size, state); + + if(r == base) + { + memcpy(destination + base, src, size); + r = size + base; + compressed = 0; + reset_table_compress(state); + } + else + { + compressed = 1; + } + state->stream_counter += size; + } +#endif + if(base == 3) + { + *destination = (unsigned char)(0 | compressed); + *(destination + 1) = (unsigned char)r; + *(destination + 2) = (unsigned char)size; + } + else + { + *destination = (unsigned char)(2 | compressed); + fast_write((ui32)r, destination + 1, 4); + fast_write((ui32)size, destination + 5, 4); + } + + *destination |= (QLZ_COMPRESSION_LEVEL << 2); + *destination |= (1 << 6); + *destination |= ((QLZ_STREAMING_BUFFER == 0 ? 0 : (QLZ_STREAMING_BUFFER == 100000 ? 1 : (QLZ_STREAMING_BUFFER == 1000000 ? 2 : 3))) << 4); + +// 76543210 +// 01SSLLHC + + return r; +} + +size_t qlz_decompress(const char *source, void *destination, qlz_state_decompress *state) +{ + size_t dsiz = qlz_size_decompressed(source); + +#if QLZ_STREAMING_BUFFER > 0 + if (state->stream_counter + qlz_size_decompressed(source) - 1 >= QLZ_STREAMING_BUFFER) +#endif + { + if((*source & 1) == 1) + { + reset_table_decompress(state); + dsiz = qlz_decompress_core((const unsigned char *)source, (unsigned char *)destination, dsiz, state, (const unsigned char *)destination); + } + else + { + memcpy(destination, source + qlz_size_header(source), dsiz); + } + state->stream_counter = 0; + reset_table_decompress(state); + } +#if QLZ_STREAMING_BUFFER > 0 + else + { + unsigned char *dst = state->stream_buffer + state->stream_counter; + if((*source & 1) == 1) + { + dsiz = qlz_decompress_core((const unsigned char *)source, dst, dsiz, state, (const unsigned char *)state->stream_buffer); + } + else + { + memcpy(dst, source + qlz_size_header(source), dsiz); + reset_table_decompress(state); + } + memcpy(destination, dst, dsiz); + state->stream_counter += dsiz; + } +#endif + return dsiz; +} + diff --git a/Thirdparty/DBoW2/DBoW3/src/quicklz.h b/Thirdparty/DBoW2/DBoW3/src/quicklz.h new file mode 100644 index 0000000000..6a710f11bb --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/src/quicklz.h @@ -0,0 +1,150 @@ +#ifndef QLZ_HEADER +#define QLZ_HEADER + +// Fast data compression library +// Copyright (C) 2006-2011 Lasse Mikkel Reinhold +// lar@quicklz.com +// +// QuickLZ can be used for free under the GPL 1, 2 or 3 license (where anything +// released into public must be open source) or under a commercial license if such +// has been acquired (see http://www.quicklz.com/order.html). The commercial license +// does not cover derived or ported versions created by third parties under GPL. + +// You can edit following user settings. Data must be decompressed with the same +// setting of QLZ_COMPRESSION_LEVEL and QLZ_STREAMING_BUFFER as it was compressed +// (see manual). If QLZ_STREAMING_BUFFER > 0, scratch buffers must be initially +// zeroed out (see manual). First #ifndef makes it possible to define settings from +// the outside like the compiler command line. + +// 1.5.0 final + +#ifndef QLZ_COMPRESSION_LEVEL + + // 1 gives fastest compression speed. 3 gives fastest decompression speed and best + // compression ratio. + #define QLZ_COMPRESSION_LEVEL 1 + //#define QLZ_COMPRESSION_LEVEL 2 + //#define QLZ_COMPRESSION_LEVEL 3 + + // If > 0, zero out both states prior to first call to qlz_compress() or qlz_decompress() + // and decompress packets in the same order as they were compressed + #define QLZ_STREAMING_BUFFER 0 + //#define QLZ_STREAMING_BUFFER 100000 + //#define QLZ_STREAMING_BUFFER 1000000 + + // Guarantees that decompression of corrupted data cannot crash. Decreases decompression + // speed 10-20%. Compression speed not affected. + //#define QLZ_MEMORY_SAFE +#endif + +#define QLZ_VERSION_MAJOR 1 +#define QLZ_VERSION_MINOR 5 +#define QLZ_VERSION_REVISION 0 + +// Using size_t, memset() and memcpy() +#include + +// Verify compression level +#if QLZ_COMPRESSION_LEVEL != 1 && QLZ_COMPRESSION_LEVEL != 2 && QLZ_COMPRESSION_LEVEL != 3 +#error QLZ_COMPRESSION_LEVEL must be 1, 2 or 3 +#endif + +typedef unsigned int ui32; +typedef unsigned short int ui16; + +// Decrease QLZ_POINTERS for level 3 to increase compression speed. Do not touch any other values! +#if QLZ_COMPRESSION_LEVEL == 1 +#define QLZ_POINTERS 1 +#define QLZ_HASH_VALUES 4096 +#elif QLZ_COMPRESSION_LEVEL == 2 +#define QLZ_POINTERS 4 +#define QLZ_HASH_VALUES 2048 +#elif QLZ_COMPRESSION_LEVEL == 3 +#define QLZ_POINTERS 16 +#define QLZ_HASH_VALUES 4096 +#endif + +// Detect if pointer size is 64-bit. It's not fatal if some 64-bit target is not detected because this is only for adding an optional 64-bit optimization. +#if defined _LP64 || defined __LP64__ || defined __64BIT__ || _ADDR64 || defined _WIN64 || defined __arch64__ || __WORDSIZE == 64 || (defined __sparc && defined __sparcv9) || defined __x86_64 || defined __amd64 || defined __x86_64__ || defined _M_X64 || defined _M_IA64 || defined __ia64 || defined __IA64__ + #define QLZ_PTR_64 +#endif + +// hash entry +typedef struct +{ +#if QLZ_COMPRESSION_LEVEL == 1 + ui32 cache; +#if defined QLZ_PTR_64 && QLZ_STREAMING_BUFFER == 0 + unsigned int offset; +#else + const unsigned char *offset; +#endif +#else + const unsigned char *offset[QLZ_POINTERS]; +#endif + +} qlz_hash_compress; + +typedef struct +{ +#if QLZ_COMPRESSION_LEVEL == 1 + const unsigned char *offset; +#else + const unsigned char *offset[QLZ_POINTERS]; +#endif +} qlz_hash_decompress; + + +// states +typedef struct +{ + #if QLZ_STREAMING_BUFFER > 0 + unsigned char stream_buffer[QLZ_STREAMING_BUFFER]; + #endif + size_t stream_counter; + qlz_hash_compress hash[QLZ_HASH_VALUES]; + unsigned char hash_counter[QLZ_HASH_VALUES]; +} qlz_state_compress; + + +#if QLZ_COMPRESSION_LEVEL == 1 || QLZ_COMPRESSION_LEVEL == 2 + typedef struct + { +#if QLZ_STREAMING_BUFFER > 0 + unsigned char stream_buffer[QLZ_STREAMING_BUFFER]; +#endif + qlz_hash_decompress hash[QLZ_HASH_VALUES]; + unsigned char hash_counter[QLZ_HASH_VALUES]; + size_t stream_counter; + } qlz_state_decompress; +#elif QLZ_COMPRESSION_LEVEL == 3 + typedef struct + { +#if QLZ_STREAMING_BUFFER > 0 + unsigned char stream_buffer[QLZ_STREAMING_BUFFER]; +#endif +#if QLZ_COMPRESSION_LEVEL <= 2 + qlz_hash_decompress hash[QLZ_HASH_VALUES]; +#endif + size_t stream_counter; + } qlz_state_decompress; +#endif + + +#if defined (__cplusplus) +extern "C" { +#endif + +// Public functions of QuickLZ +size_t qlz_size_decompressed(const char *source); +size_t qlz_size_compressed(const char *source); +size_t qlz_compress(const void *source, char *destination, size_t size, qlz_state_compress *state); +size_t qlz_decompress(const char *source, void *destination, qlz_state_decompress *state); +int qlz_get_setting(int setting); + +#if defined (__cplusplus) +} +#endif + +#endif + diff --git a/Thirdparty/DBoW2/DBoW3/tests/CMakeLists.txt b/Thirdparty/DBoW2/DBoW3/tests/CMakeLists.txt new file mode 100644 index 0000000000..0c699c13ad --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/tests/CMakeLists.txt @@ -0,0 +1,8 @@ + INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/src) +LINK_LIBRARIES(${PROJECT_NAME}) + + +ADD_EXECUTABLE(test_iobinary test_iobinary.cpp ) +ADD_EXECUTABLE(test_bigvoc test_bigvoc.cpp ) +ADD_EXECUTABLE(test_flann test_flann.cpp ) +ADD_EXECUTABLE(test_fbow test_fbow.cpp ) diff --git a/Thirdparty/DBoW2/DBoW3/tests/nanoflann.hpp b/Thirdparty/DBoW2/DBoW3/tests/nanoflann.hpp new file mode 100644 index 0000000000..00c3c5811d --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/tests/nanoflann.hpp @@ -0,0 +1,1395 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * Copyright 2011-2016 Jose Luis Blanco (joseluisblancoc@gmail.com). + * All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/** \mainpage nanoflann C++ API documentation + * nanoflann is a C++ header-only library for building KD-Trees, mostly + * optimized for 2D or 3D point clouds. + * + * nanoflann does not require compiling or installing, just an + * #include in your code. + * + * See: + * - C++ API organized by modules + * - Online README + * - Doxygen documentation + */ + +#ifndef NANOFLANN_HPP_ +#define NANOFLANN_HPP_ + +#include +#include +#include +#include +#include // for fwrite() +#include // for abs() +#include // for abs() +#include + +// Avoid conflicting declaration of min/max macros in windows headers +#if !defined(NOMINMAX) && (defined(_WIN32) || defined(_WIN32_) || defined(WIN32) || defined(_WIN64)) +# define NOMINMAX +# ifdef max +# undef max +# undef min +# endif +#endif + +namespace nanoflann +{ +/** @addtogroup nanoflann_grp nanoflann C++ library for ANN + * @{ */ + + /** Library version: 0xMmP (M=Major,m=minor,P=patch) */ + #define NANOFLANN_VERSION 0x121 + + /** @addtogroup result_sets_grp Result set classes + * @{ */ + template + class KNNResultSet + { + IndexType * indices; + DistanceType* dists; + CountType capacity; + CountType count; + + public: + inline KNNResultSet(CountType capacity_) : indices(0), dists(0), capacity(capacity_), count(0) + { + } + + inline void init(IndexType* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + if (capacity) + dists[capacity-1] = (std::numeric_limits::max)(); + } + + inline CountType size() const + { + return count; + } + + inline bool full() const + { + return count == capacity; + } + + + inline void addPoint(DistanceType dist, IndexType index) + { + CountType i; + for (i=count; i>0; --i) { +#ifdef NANOFLANN_FIRST_MATCH // If defined and two points have the same distance, the one with the lowest-index will be returned first. + if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) ) { +#else + if (dists[i-1]>dist) { +#endif + if (i + class RadiusResultSet + { + public: + const DistanceType radius; + + std::vector >& m_indices_dists; + + inline RadiusResultSet(DistanceType radius_, std::vector >& indices_dists) : radius(radius_), m_indices_dists(indices_dists) + { + init(); + } + + inline ~RadiusResultSet() { } + + inline void init() { clear(); } + inline void clear() { m_indices_dists.clear(); } + + inline size_t size() const { return m_indices_dists.size(); } + + inline bool full() const { return true; } + + inline void addPoint(DistanceType dist, IndexType index) + { + if (dist 0 + */ + std::pair worst_item() const + { + if (m_indices_dists.empty()) throw std::runtime_error("Cannot invoke RadiusResultSet::worst_item() on an empty list of results."); + typedef typename std::vector >::const_iterator DistIt; + DistIt it = std::max_element(m_indices_dists.begin(), m_indices_dists.end()); + return *it; + } + }; + + /** operator "<" for std::sort() */ + struct IndexDist_Sorter + { + /** PairType will be typically: std::pair */ + template + inline bool operator()(const PairType &p1, const PairType &p2) const { + return p1.second < p2.second; + } + }; + + /** @} */ + + + /** @addtogroup loadsave_grp Load/save auxiliary functions + * @{ */ + template + void save_value(FILE* stream, const T& value, size_t count = 1) + { + fwrite(&value, sizeof(value),count, stream); + } + + template + void save_value(FILE* stream, const std::vector& value) + { + size_t size = value.size(); + fwrite(&size, sizeof(size_t), 1, stream); + fwrite(&value[0], sizeof(T), size, stream); + } + + template + void load_value(FILE* stream, T& value, size_t count = 1) + { + size_t read_cnt = fread(&value, sizeof(value), count, stream); + if (read_cnt != count) { + throw std::runtime_error("Cannot read from file"); + } + } + + + template + void load_value(FILE* stream, std::vector& value) + { + size_t size; + size_t read_cnt = fread(&size, sizeof(size_t), 1, stream); + if (read_cnt!=1) { + throw std::runtime_error("Cannot read from file"); + } + value.resize(size); + read_cnt = fread(&value[0], sizeof(T), size, stream); + if (read_cnt!=size) { + throw std::runtime_error("Cannot read from file"); + } + } + /** @} */ + + + /** @addtogroup metric_grp Metric (distance) classes + * @{ */ + + /** Manhattan distance functor (generic version, optimized for high-dimensionality data sets). + * Corresponding distance traits: nanoflann::metric_L1 + * \tparam T Type of the elements (e.g. double, float, uint8_t) + * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) + */ + template + struct L1_Adaptor + { + typedef T ElementType; + typedef _DistanceType DistanceType; + + const DataSource &data_source; + + L1_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } + + inline DistanceType operator()(const T* a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const + { + DistanceType result = DistanceType(); + const T* last = a + size; + const T* lastgroup = last - 3; + size_t d = 0; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + const DistanceType diff0 = std::abs(a[0] - data_source.kdtree_get_pt(b_idx,d++)); + const DistanceType diff1 = std::abs(a[1] - data_source.kdtree_get_pt(b_idx,d++)); + const DistanceType diff2 = std::abs(a[2] - data_source.kdtree_get_pt(b_idx,d++)); + const DistanceType diff3 = std::abs(a[3] - data_source.kdtree_get_pt(b_idx,d++)); + result += diff0 + diff1 + diff2 + diff3; + a += 4; + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 components. Not needed for standard vector lengths. */ + while (a < last) { + result += std::abs( *a++ - data_source.kdtree_get_pt(b_idx,d++) ); + } + return result; + } + + template + inline DistanceType accum_dist(const U a, const V b, int ) const + { + return std::abs(a-b); + } + }; + + /** Squared Euclidean distance functor (generic version, optimized for high-dimensionality data sets). + * Corresponding distance traits: nanoflann::metric_L2 + * \tparam T Type of the elements (e.g. double, float, uint8_t) + * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) + */ + template + struct L2_Adaptor + { + typedef T ElementType; + typedef _DistanceType DistanceType; + + const DataSource &data_source; + + L2_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } + + inline DistanceType operator()(const T* a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const + { + DistanceType result = DistanceType(); + const T* last = a + size; + const T* lastgroup = last - 3; + size_t d = 0; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + const DistanceType diff0 = a[0] - data_source.kdtree_get_pt(b_idx,d++); + const DistanceType diff1 = a[1] - data_source.kdtree_get_pt(b_idx,d++); + const DistanceType diff2 = a[2] - data_source.kdtree_get_pt(b_idx,d++); + const DistanceType diff3 = a[3] - data_source.kdtree_get_pt(b_idx,d++); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 components. Not needed for standard vector lengths. */ + while (a < last) { + const DistanceType diff0 = *a++ - data_source.kdtree_get_pt(b_idx,d++); + result += diff0 * diff0; + } + return result; + } + + template + inline DistanceType accum_dist(const U a, const V b, int ) const + { + return (a-b)*(a-b); + } + }; + + /** Squared Euclidean (L2) distance functor (suitable for low-dimensionality datasets, like 2D or 3D point clouds) + * Corresponding distance traits: nanoflann::metric_L2_Simple + * \tparam T Type of the elements (e.g. double, float, uint8_t) + * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) + */ + template + struct L2_Simple_Adaptor + { + typedef T ElementType; + typedef _DistanceType DistanceType; + + const DataSource &data_source; + + L2_Simple_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } + + inline DistanceType operator()(const T* a, const size_t b_idx, size_t size) const { + return data_source.kdtree_distance(a,b_idx,size); + } + + template + inline DistanceType accum_dist(const U a, const V b, int ) const + { + return (a-b)*(a-b); + } + }; + + /** Metaprogramming helper traits class for the L1 (Manhattan) metric */ + struct metric_L1 { + template + struct traits { + typedef L1_Adaptor distance_t; + }; + }; + /** Metaprogramming helper traits class for the L2 (Euclidean) metric */ + struct metric_L2 { + template + struct traits { + typedef L2_Adaptor distance_t; + }; + }; + /** Metaprogramming helper traits class for the L2_simple (Euclidean) metric */ + struct metric_L2_Simple { + template + struct traits { + typedef L2_Simple_Adaptor distance_t; + }; + }; + + /** @} */ + + /** @addtogroup param_grp Parameter structs + * @{ */ + + /** Parameters (see README.md) */ + struct KDTreeSingleIndexAdaptorParams + { + KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10) : + leaf_max_size(_leaf_max_size) + {} + + size_t leaf_max_size; + }; + + /** Search options for KDTreeSingleIndexAdaptor::findNeighbors() */ + struct SearchParams + { + /** Note: The first argument (checks_IGNORED_) is ignored, but kept for compatibility with the FLANN interface */ + SearchParams(int checks_IGNORED_ = 32, float eps_ = 0, bool sorted_ = true ) : + checks(checks_IGNORED_), eps(eps_), sorted(sorted_) {} + + int checks; //!< Ignored parameter (Kept for compatibility with the FLANN interface). + float eps; //!< search for eps-approximate neighbours (default: 0) + bool sorted; //!< only for radius search, require neighbours sorted by distance (default: true) + }; + /** @} */ + + + /** @addtogroup memalloc_grp Memory allocation + * @{ */ + + /** + * Allocates (using C's malloc) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ + template + inline T* allocate(size_t count = 1) + { + T* mem = static_cast( ::malloc(sizeof(T)*count)); + return mem; + } + + + /** + * Pooled storage allocator + * + * The following routines allow for the efficient allocation of storage in + * small chunks from a specified pool. Rather than allowing each structure + * to be freed individually, an entire pool of storage is freed at once. + * This method has two advantages over just using malloc() and free(). First, + * it is far more efficient for allocating small objects, as there is + * no overhead for remembering all the information needed to free each + * object or consolidating fragmented memory. Second, the decision about + * how long to keep an object is made at the time of allocation, and there + * is no need to track down all the objects to free them. + * + */ + + const size_t WORDSIZE=16; + const size_t BLOCKSIZE=8192; + + class PooledAllocator + { + /* We maintain memory alignment to word boundaries by requiring that all + allocations be in multiples of the machine wordsize. */ + /* Size of machine word in bytes. Must be power of 2. */ + /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */ + + + size_t remaining; /* Number of bytes left in current block of storage. */ + void* base; /* Pointer to base of current block of storage. */ + void* loc; /* Current location in block to next allocate memory. */ + + void internal_init() + { + remaining = 0; + base = NULL; + usedMemory = 0; + wastedMemory = 0; + } + + public: + size_t usedMemory; + size_t wastedMemory; + + /** + Default constructor. Initializes a new pool. + */ + PooledAllocator() { + internal_init(); + } + + /** + * Destructor. Frees all the memory allocated in this pool. + */ + ~PooledAllocator() { + free_all(); + } + + /** Frees all allocated memory chunks */ + void free_all() + { + while (base != NULL) { + void *prev = *(static_cast( base)); /* Get pointer to prev block. */ + ::free(base); + base = prev; + } + internal_init(); + } + + /** + * Returns a pointer to a piece of new memory of the given size in bytes + * allocated from the pool. + */ + void* malloc(const size_t req_size) + { + /* Round size up to a multiple of wordsize. The following expression + only works for WORDSIZE that is a power of 2, by masking last bits of + incremented size to zero. + */ + const size_t size = (req_size + (WORDSIZE - 1)) & ~(WORDSIZE - 1); + + /* Check whether a new block must be allocated. Note that the first word + of a block is reserved for a pointer to the previous block. + */ + if (size > remaining) { + + wastedMemory += remaining; + + /* Allocate new storage. */ + const size_t blocksize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ? + size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE; + + // use the standard C malloc to allocate memory + void* m = ::malloc(blocksize); + if (!m) { + fprintf(stderr,"Failed to allocate memory.\n"); + return NULL; + } + + /* Fill first word of new block with pointer to previous block. */ + static_cast(m)[0] = base; + base = m; + + size_t shift = 0; + //int size_t = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1); + + remaining = blocksize - sizeof(void*) - shift; + loc = (static_cast(m) + sizeof(void*) + shift); + } + void* rloc = loc; + loc = static_cast(loc) + size; + remaining -= size; + + usedMemory += size; + + return rloc; + } + + /** + * Allocates (using this pool) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ + template + T* allocate(const size_t count = 1) + { + T* mem = static_cast(this->malloc(sizeof(T)*count)); + return mem; + } + + }; + /** @} */ + + /** @addtogroup nanoflann_metaprog_grp Auxiliary metaprogramming stuff + * @{ */ + + // ---------------- CArray ------------------------- + /** A STL container (as wrapper) for arrays of constant size defined at compile time (class imported from the MRPT project) + * This code is an adapted version from Boost, modifed for its integration + * within MRPT (JLBC, Dec/2009) (Renamed array -> CArray to avoid possible potential conflicts). + * See + * http://www.josuttis.com/cppcode + * for details and the latest version. + * See + * http://www.boost.org/libs/array for Documentation. + * for documentation. + * + * (C) Copyright Nicolai M. Josuttis 2001. + * Permission to copy, use, modify, sell and distribute this software + * is granted provided this copyright notice appears in all copies. + * This software is provided "as is" without express or implied + * warranty, and with no claim as to its suitability for any purpose. + * + * 29 Jan 2004 - minor fixes (Nico Josuttis) + * 04 Dec 2003 - update to synch with library TR1 (Alisdair Meredith) + * 23 Aug 2002 - fix for Non-MSVC compilers combined with MSVC libraries. + * 05 Aug 2001 - minor update (Nico Josuttis) + * 20 Jan 2001 - STLport fix (Beman Dawes) + * 29 Sep 2000 - Initial Revision (Nico Josuttis) + * + * Jan 30, 2004 + */ + template + class CArray { + public: + T elems[N]; // fixed-size array of elements of type T + + public: + // type definitions + typedef T value_type; + typedef T* iterator; + typedef const T* const_iterator; + typedef T& reference; + typedef const T& const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + // iterator support + inline iterator begin() { return elems; } + inline const_iterator begin() const { return elems; } + inline iterator end() { return elems+N; } + inline const_iterator end() const { return elems+N; } + + // reverse iterator support +#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) && !defined(BOOST_MSVC_STD_ITERATOR) && !defined(BOOST_NO_STD_ITERATOR_TRAITS) + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; +#elif defined(_MSC_VER) && (_MSC_VER == 1300) && defined(BOOST_DINKUMWARE_STDLIB) && (BOOST_DINKUMWARE_STDLIB == 310) + // workaround for broken reverse_iterator in VC7 + typedef std::reverse_iterator > reverse_iterator; + typedef std::reverse_iterator > const_reverse_iterator; +#else + // workaround for broken reverse_iterator implementations + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; +#endif + + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } + // operator[] + inline reference operator[](size_type i) { return elems[i]; } + inline const_reference operator[](size_type i) const { return elems[i]; } + // at() with range check + reference at(size_type i) { rangecheck(i); return elems[i]; } + const_reference at(size_type i) const { rangecheck(i); return elems[i]; } + // front() and back() + reference front() { return elems[0]; } + const_reference front() const { return elems[0]; } + reference back() { return elems[N-1]; } + const_reference back() const { return elems[N-1]; } + // size is constant + static inline size_type size() { return N; } + static bool empty() { return false; } + static size_type max_size() { return N; } + enum { static_size = N }; + /** This method has no effects in this class, but raises an exception if the expected size does not match */ + inline void resize(const size_t nElements) { if (nElements!=N) throw std::logic_error("Try to change the size of a CArray."); } + // swap (note: linear complexity in N, constant for given instantiation) + void swap (CArray& y) { std::swap_ranges(begin(),end(),y.begin()); } + // direct access to data (read-only) + const T* data() const { return elems; } + // use array as C array (direct read/write access to data) + T* data() { return elems; } + // assignment with type conversion + template CArray& operator= (const CArray& rhs) { + std::copy(rhs.begin(),rhs.end(), begin()); + return *this; + } + // assign one value to all elements + inline void assign (const T& value) { for (size_t i=0;i= size()) { throw std::out_of_range("CArray<>: index out of range"); } } + }; // end of CArray + + /** Used to declare fixed-size arrays when DIM>0, dynamically-allocated vectors when DIM=-1. + * Fixed size version for a generic DIM: + */ + template + struct array_or_vector_selector + { + typedef CArray container_t; + }; + /** Dynamic size version */ + template + struct array_or_vector_selector<-1,T> { + typedef std::vector container_t; + }; + /** @} */ + + /** @addtogroup kdtrees_grp KD-tree classes and adaptors + * @{ */ + + /** kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + * + * The class "DatasetAdaptor" must provide the following interface (can be non-virtual, inlined methods): + * + * \code + * // Must return the number of data poins + * inline size_t kdtree_get_point_count() const { ... } + * + * // [Only if using the metric_L2_Simple type] Must return the Euclidean (L2) distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class: + * inline DistanceType kdtree_distance(const T *p1, const size_t idx_p2,size_t size) const { ... } + * + * // Must return the dim'th component of the idx'th point in the class: + * inline T kdtree_get_pt(const size_t idx, int dim) const { ... } + * + * // Optional bounding-box computation: return false to default to a standard bbox computation loop. + * // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. + * // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) + * template + * bool kdtree_get_bbox(BBOX &bb) const + * { + * bb[0].low = ...; bb[0].high = ...; // 0th dimension limits + * bb[1].low = ...; bb[1].high = ...; // 1st dimension limits + * ... + * return true; + * } + * + * \endcode + * + * \tparam DatasetAdaptor The user-provided adaptor (see comments above). + * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. + * \tparam DIM Dimensionality of data points (e.g. 3 for 3D points) + * \tparam IndexType Will be typically size_t or int + */ + template + class KDTreeSingleIndexAdaptor + { + private: + /** Hidden copy constructor, to disallow copying indices (Not implemented) */ + KDTreeSingleIndexAdaptor(const KDTreeSingleIndexAdaptor&); + public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::DistanceType DistanceType; + protected: + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind; + + size_t m_leaf_max_size; + + + /** + * The dataset used by this index + */ + const DatasetAdaptor &dataset; //!< The source of our data + + const KDTreeSingleIndexAdaptorParams index_params; + + size_t m_size; //!< Number of current poins in the dataset + size_t m_size_at_index_build; //!< Number of points in the dataset when the index was built + int dim; //!< Dimensionality of each data point + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** Union used because a node can be either a LEAF node or a non-leaf node, so both data fields are never used simultaneously */ + union { + struct leaf + { + IndexType left, right; //!< Indices of points in leaf node + } lr; + struct nonleaf + { + int divfeat; //!< Dimension used for subdivision. + DistanceType divlow, divhigh; //!< The values used for subdivision. + } sub; + } node_type; + Node* child1, * child2; //!< Child nodes (both=NULL mean its a leaf node) + }; + typedef Node* NodePtr; + + + struct Interval + { + ElementType low, high; + }; + + /** Define "BoundingBox" as a fixed-size or variable-size container depending on "DIM" */ + typedef typename array_or_vector_selector::container_t BoundingBox; + + /** Define "distance_vector_t" as a fixed-size or variable-size container depending on "DIM" */ + typedef typename array_or_vector_selector::container_t distance_vector_t; + + /** The KD-tree used to find neighbours */ + NodePtr root_node; + BoundingBox root_bbox; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool; + + public: + + Distance distance; + + /** + * KDTree constructor + * + * Refer to docs in README.md or online in https://github.com/jlblancoc/nanoflann + * + * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 for 3D points) + * is determined by means of: + * - The \a DIM template parameter if >0 (highest priority) + * - Otherwise, the \a dimensionality parameter of this constructor. + * + * @param inputData Dataset with the input features + * @param params Basically, the maximum leaf node size + */ + KDTreeSingleIndexAdaptor(const int dimensionality, const DatasetAdaptor& inputData, const KDTreeSingleIndexAdaptorParams& params = KDTreeSingleIndexAdaptorParams() ) : + dataset(inputData), index_params(params), root_node(NULL), distance(inputData) + { + m_size = dataset.kdtree_get_point_count(); + m_size_at_index_build = m_size; + dim = dimensionality; + if (DIM>0) dim=DIM; + m_leaf_max_size = params.leaf_max_size; + + // Create a permutable array of indices to the input vectors. + init_vind(); + } + + /** Standard destructor */ + ~KDTreeSingleIndexAdaptor() { } + + /** Frees the previously-built index. Automatically called within buildIndex(). */ + void freeIndex() + { + pool.free_all(); + root_node=NULL; + m_size_at_index_build = 0; + } + + /** + * Builds the index + */ + void buildIndex() + { + init_vind(); + freeIndex(); + m_size_at_index_build = m_size; + if(m_size == 0) return; + computeBoundingBox(root_bbox); + root_node = divideTree(0, m_size, root_bbox ); // construct the tree + } + + /** Returns number of points in dataset */ + size_t size() const { return m_size; } + + /** Returns the length of each point in the dataset */ + size_t veclen() const { + return static_cast(DIM>0 ? DIM : dim); + } + + /** + * Computes the inde memory usage + * Returns: memory used by the index + */ + size_t usedMemory() const + { + return pool.usedMemory+pool.wastedMemory+dataset.kdtree_get_point_count()*sizeof(IndexType); // pool memory and vind array memory + } + + /** \name Query methods + * @{ */ + + /** + * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * + * \tparam RESULTSET Should be any ResultSet + * \return True if the requested neighbors could be found. + * \sa knnSearch, radiusSearch + */ + template + bool findNeighbors(RESULTSET& result, const ElementType* vec, const SearchParams& searchParams) const + { + assert(vec); + if (size() == 0) + return false; + if (!root_node) + throw std::runtime_error("[nanoflann] findNeighbors() called before building the index."); + float epsError = 1+searchParams.eps; + + distance_vector_t dists; // fixed or variable-sized container (depending on DIM) + dists.assign((DIM>0 ? DIM : dim) ,0); // Fill it with zeros. + DistanceType distsq = computeInitialDistances(vec, dists); + searchLevel(result, vec, root_node, distsq, dists, epsError); // "count_leaf" parameter removed since was neither used nor returned to the user. + return result.full(); + } + + /** + * Find the "num_closest" nearest neighbors to the \a query_point[0:dim-1]. Their indices are stored inside + * the result object. + * \sa radiusSearch, findNeighbors + * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. + */ + inline void knnSearch(const ElementType *query_point, const size_t num_closest, IndexType *out_indices, DistanceType *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const + { + nanoflann::KNNResultSet resultSet(num_closest); + resultSet.init(out_indices, out_distances_sq); + this->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); + } + + /** + * Find all the neighbors to \a query_point[0:dim-1] within a maximum radius. + * The output is given as a vector of pairs, of which the first element is a point index and the second the corresponding distance. + * Previous contents of \a IndicesDists are cleared. + * + * If searchParams.sorted==true, the output list is sorted by ascending distances. + * + * For a better performance, it is advisable to do a .reserve() on the vector if you have any wild guess about the number of expected matches. + * + * \sa knnSearch, findNeighbors, radiusSearchCustomCallback + * \return The number of points within the given radius (i.e. indices.size() or dists.size() ) + */ + size_t radiusSearch(const ElementType *query_point,const DistanceType &radius, std::vector >& IndicesDists, const SearchParams& searchParams) const + { + RadiusResultSet resultSet(radius,IndicesDists); + const size_t nFound = radiusSearchCustomCallback(query_point,resultSet,searchParams); + if (searchParams.sorted) + std::sort(IndicesDists.begin(),IndicesDists.end(), IndexDist_Sorter() ); + return nFound; + } + + /** + * Just like radiusSearch() but with a custom callback class for each point found in the radius of the query. + * See the source of RadiusResultSet<> as a start point for your own classes. + * \sa radiusSearch + */ + template + size_t radiusSearchCustomCallback(const ElementType *query_point,SEARCH_CALLBACK &resultSet, const SearchParams& searchParams = SearchParams() ) const + { + this->findNeighbors(resultSet, query_point, searchParams); + return resultSet.size(); + } + + /** @} */ + + private: + /** Make sure the auxiliary list \a vind has the same size than the current dataset, and re-generate if size has changed. */ + void init_vind() + { + // Create a permutable array of indices to the input vectors. + m_size = dataset.kdtree_get_point_count(); + if (vind.size()!=m_size) vind.resize(m_size); + for (size_t i = 0; i < m_size; i++) vind[i] = i; + } + + /// Helper accessor to the dataset points: + inline ElementType dataset_get(size_t idx, int component) const { + return dataset.kdtree_get_pt(idx,component); + } + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + void computeBoundingBox(BoundingBox& bbox) + { + bbox.resize((DIM>0 ? DIM : dim)); + if (dataset.kdtree_get_bbox(bbox)) + { + // Done! It was implemented in derived class + } + else + { + const size_t N = dataset.kdtree_get_point_count(); + if (!N) throw std::runtime_error("[nanoflann] computeBoundingBox() called but no data points found."); + for (int i=0; i<(DIM>0 ? DIM : dim); ++i) { + bbox[i].low = + bbox[i].high = dataset_get(0,i); + } + for (size_t k=1; k0 ? DIM : dim); ++i) { + if (dataset_get(k,i)bbox[i].high) bbox[i].high = dataset_get(k,i); + } + } + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * + * @param left index of the first vector + * @param right index of the last vector + */ + NodePtr divideTree(const IndexType left, const IndexType right, BoundingBox& bbox) + { + NodePtr node = pool.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( (right-left) <= static_cast(m_leaf_max_size) ) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->node_type.lr.left = left; + node->node_type.lr.right = right; + + // compute bounding-box of leaf points + for (int i=0; i<(DIM>0 ? DIM : dim); ++i) { + bbox[i].low = dataset_get(vind[left],i); + bbox[i].high = dataset_get(vind[left],i); + } + for (IndexType k=left+1; k0 ? DIM : dim); ++i) { + if (bbox[i].low>dataset_get(vind[k],i)) bbox[i].low=dataset_get(vind[k],i); + if (bbox[i].highnode_type.sub.divfeat = cutfeat; + + BoundingBox left_bbox(bbox); + left_bbox[cutfeat].high = cutval; + node->child1 = divideTree(left, left+idx, left_bbox); + + BoundingBox right_bbox(bbox); + right_bbox[cutfeat].low = cutval; + node->child2 = divideTree(left+idx, right, right_bbox); + + node->node_type.sub.divlow = left_bbox[cutfeat].high; + node->node_type.sub.divhigh = right_bbox[cutfeat].low; + + for (int i=0; i<(DIM>0 ? DIM : dim); ++i) { + bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low); + bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high); + } + } + + return node; + } + + + void computeMinMax(IndexType* ind, IndexType count, int element, ElementType& min_elem, ElementType& max_elem) + { + min_elem = dataset_get(ind[0],element); + max_elem = dataset_get(ind[0],element); + for (IndexType i=1; imax_elem) max_elem = val; + } + } + + void middleSplit_(IndexType* ind, IndexType count, IndexType& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox) + { + const DistanceType EPS=static_cast(0.00001); + ElementType max_span = bbox[0].high-bbox[0].low; + for (int i=1; i<(DIM>0 ? DIM : dim); ++i) { + ElementType span = bbox[i].high-bbox[i].low; + if (span>max_span) { + max_span = span; + } + } + ElementType max_spread = -1; + cutfeat = 0; + for (int i=0; i<(DIM>0 ? DIM : dim); ++i) { + ElementType span = bbox[i].high-bbox[i].low; + if (span>(1-EPS)*max_span) { + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + ElementType spread = max_elem-min_elem;; + if (spread>max_spread) { + cutfeat = i; + max_spread = spread; + } + } + } + // split in the middle + DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2; + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + + if (split_valmax_elem) cutval = max_elem; + else cutval = split_val; + + IndexType lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2cutval + */ + void planeSplit(IndexType* ind, const IndexType count, int cutfeat, DistanceType &cutval, IndexType& lim1, IndexType& lim2) + { + /* Move vector indices for left subtree to front of list. */ + IndexType left = 0; + IndexType right = count-1; + for (;; ) { + while (left<=right && dataset_get(ind[left],cutfeat)=cutval) --right; + if (left>right || !right) break; // "!right" was added to support unsigned Index types + std::swap(ind[left], ind[right]); + ++left; + --right; + } + /* If either list is empty, it means that all remaining features + * are identical. Split in the middle to maintain a balanced tree. + */ + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_get(ind[left],cutfeat)<=cutval) ++left; + while (right && left<=right && dataset_get(ind[right],cutfeat)>cutval) --right; + if (left>right || !right) break; // "!right" was added to support unsigned Index types + std::swap(ind[left], ind[right]); + ++left; + --right; + } + lim2 = left; + } + + DistanceType computeInitialDistances(const ElementType* vec, distance_vector_t& dists) const + { + assert(vec); + DistanceType distsq = DistanceType(); + + for (int i = 0; i < (DIM>0 ? DIM : dim); ++i) { + if (vec[i] < root_bbox[i].low) { + dists[i] = distance.accum_dist(vec[i], root_bbox[i].low, i); + distsq += dists[i]; + } + if (vec[i] > root_bbox[i].high) { + dists[i] = distance.accum_dist(vec[i], root_bbox[i].high, i); + distsq += dists[i]; + } + } + + return distsq; + } + + /** + * Performs an exact search in the tree starting from a node. + * \tparam RESULTSET Should be any ResultSet + */ + template + void searchLevel(RESULTSET& result_set, const ElementType* vec, const NodePtr node, DistanceType &mindistsq, + distance_vector_t& dists, const float epsError) const + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + //count_leaf += (node->lr.right-node->lr.left); // Removed since was neither used nor returned to the user. + DistanceType worst_dist = result_set.worstDist(); + for (IndexType i=node->node_type.lr.left; inode_type.lr.right; ++i) { + const IndexType index = vind[i];// reorder... : i; + DistanceType dist = distance(vec, index, (DIM>0 ? DIM : dim)); + if (distnode_type.sub.divfeat; + ElementType val = vec[idx]; + DistanceType diff1 = val - node->node_type.sub.divlow; + DistanceType diff2 = val - node->node_type.sub.divhigh; + + NodePtr bestChild; + NodePtr otherChild; + DistanceType cut_dist; + if ((diff1+diff2)<0) { + bestChild = node->child1; + otherChild = node->child2; + cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx); + } + else { + bestChild = node->child2; + otherChild = node->child1; + cut_dist = distance.accum_dist( val, node->node_type.sub.divlow, idx); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError); + + DistanceType dst = dists[idx]; + mindistsq = mindistsq + cut_dist - dst; + dists[idx] = cut_dist; + if (mindistsq*epsError<=result_set.worstDist()) { + searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError); + } + dists[idx] = dst; + } + + public: + /** Stores the index in a binary file. + * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when loading the index object it must be constructed associated to the same source of data points used while building it. + * See the example: examples/saveload_example.cpp + * \sa loadIndex */ + void saveIndex(FILE* stream) + { + save_value(stream, m_size); + save_value(stream, dim); + save_value(stream, root_bbox); + save_value(stream, m_leaf_max_size); + save_value(stream, vind); + save_tree(stream, root_node); + } + + /** Loads a previous index from a binary file. + * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the index object must be constructed associated to the same source of data points used while building the index. + * See the example: examples/saveload_example.cpp + * \sa loadIndex */ + void loadIndex(FILE* stream) + { + load_value(stream, m_size); + load_value(stream, dim); + load_value(stream, root_bbox); + load_value(stream, m_leaf_max_size); + load_value(stream, vind); + load_tree(stream, root_node); + } + + }; // class KDTree + + + /** An L2-metric KD-tree adaptor for working with data directly stored in an Eigen Matrix, without duplicating the data storage. + * Each row in the matrix represents a point in the state space. + * + * Example of usage: + * \code + * Eigen::Matrix mat; + * // Fill out "mat"... + * + * typedef KDTreeEigenMatrixAdaptor< Eigen::Matrix > my_kd_tree_t; + * const int max_leaf = 10; + * my_kd_tree_t mat_index(dimdim, mat, max_leaf ); + * mat_index.index->buildIndex(); + * mat_index.index->... + * \endcode + * + * \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations. + * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. + */ + template + struct KDTreeEigenMatrixAdaptor + { + typedef KDTreeEigenMatrixAdaptor self_t; + typedef typename MatrixType::Scalar num_t; + typedef typename MatrixType::Index IndexType; + typedef typename Distance::template traits::distance_t metric_t; + typedef KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType> index_t; + + index_t* index; //! The kd-tree index for the user to call its methods as usual with any other FLANN index. + + /// Constructor: takes a const ref to the matrix object with the data points + KDTreeEigenMatrixAdaptor(const int dimensionality, const MatrixType &mat, const int leaf_max_size = 10) : m_data_matrix(mat) + { + const IndexType dims = mat.cols(); + if (dims!=dimensionality) throw std::runtime_error("Error: 'dimensionality' must match column count in data matrix"); + if (DIM>0 && static_cast(dims)!=DIM) + throw std::runtime_error("Data set dimensionality does not match the 'DIM' template argument"); + index = new index_t( dims, *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) ); + index->buildIndex(); + } + private: + /** Hidden copy constructor, to disallow copying this class (Not implemented) */ + KDTreeEigenMatrixAdaptor(const self_t&); + public: + + ~KDTreeEigenMatrixAdaptor() { + delete index; + } + + const MatrixType &m_data_matrix; + + /** Query for the \a num_closest closest points to a given point (entered as query_point[0:dim-1]). + * Note that this is a short-cut method for index->findNeighbors(). + * The user can also call index->... methods as desired. + * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. + */ + inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const + { + nanoflann::KNNResultSet resultSet(num_closest); + resultSet.init(out_indices, out_distances_sq); + index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); + } + + /** @name Interface expected by KDTreeSingleIndexAdaptor + * @{ */ + + const self_t & derived() const { + return *this; + } + self_t & derived() { + return *this; + } + + // Must return the number of data points + inline size_t kdtree_get_point_count() const { + return m_data_matrix.rows(); + } + + // Returns the L2 distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class: + inline num_t kdtree_distance(const num_t *p1, const IndexType idx_p2,IndexType size) const + { + num_t s=0; + for (IndexType i=0; i + bool kdtree_get_bbox(BBOX& /*bb*/) const { + return false; + } + + /** @} */ + + }; // end of KDTreeEigenMatrixAdaptor + /** @} */ + +/** @} */ // end of grouping +} // end of NS + + +#endif /* NANOFLANN_HPP_ */ diff --git a/Thirdparty/DBoW2/DBoW3/tests/test_bigvoc.cpp b/Thirdparty/DBoW2/DBoW3/tests/test_bigvoc.cpp new file mode 100644 index 0000000000..7b75ecf62a --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/tests/test_bigvoc.cpp @@ -0,0 +1,89 @@ +#include +#include + +// DBoW3 +#include "DBoW3.h" +#include "timers.h" +// OpenCV +#include +#include +#include +#ifdef USE_CONTRIB +#include +#include +#endif +using namespace DBoW3; +using namespace std; + +//command line parser +class CmdLineParser{int argc; char **argv; public: CmdLineParser(int _argc,char **_argv):argc(_argc),argv(_argv){} bool operator[] ( string param ) {int idx=-1; for ( int i=0; i loadFeatures( std::vector path_to_images,string descriptor="") throw (std::exception){ + //select detector + cv::Ptr fdetector; + if (descriptor=="orb") fdetector=cv::ORB::create(2000); + + else if (descriptor=="brisk") fdetector=cv::BRISK::create(); +#ifdef OPENCV_VERSION_3 + else if (descriptor=="akaze") fdetector=cv::AKAZE::create(); +#endif +#ifdef USE_CONTRIB + else if(descriptor=="surf" ) fdetector=cv::xfeatures2d::SURF::create(400, 4, 2, false); +#endif + + else throw std::runtime_error("Invalid descriptor"); + assert(!descriptor.empty()); + vector features; + + + cout << "Extracting features..." << endl; + for(size_t i = 0; i < path_to_images.size(); ++i) + { + vector keypoints; + cv::Mat descriptors; + cout<<"reading image: "<detectAndCompute(image, cv::Mat(), keypoints, descriptors); + features.push_back(descriptors); + cout<<"done detecting features"<first<<" "<second<first<<" "<second< +#include "DBoW3.h" +#include "timers.h" +#include + +#include +#include +#include +#ifdef USE_CONTRIB +#include +#include +#endif +#include +using namespace DBoW3; +using namespace std; +using namespace std; + + +std::vector< cv::Mat > loadFeatures( std::vector path_to_images,string descriptor="") throw (std::exception){ + //select detector + cv::Ptr fdetector; + if (descriptor=="orb") fdetector=cv::ORB::create(2000); + + else if (descriptor=="brisk") fdetector=cv::BRISK::create(); +#ifdef OPENCV_VERSION_3 + else if (descriptor=="akaze") fdetector=cv::AKAZE::create(); +#endif +#ifdef USE_CONTRIB + else if(descriptor=="surf" ) fdetector=cv::xfeatures2d::SURF::create(400, 4, 2, false); +#endif + + else throw std::runtime_error("Invalid descriptor"); + assert(!descriptor.empty()); + vector features; + + + cout << "Extracting features..." << endl; + for(size_t i = 0; i < path_to_images.size(); ++i) + { + vector keypoints; + cv::Mat descriptors; + cout<<"reading image: "<detectAndCompute(image, cv::Mat(), keypoints, descriptors); + features.push_back(descriptors); + cout<<"done detecting features"< > parent_children; + void create(Vocabulary &voc){ + if(voc.getDescritorType()==CV_8UC1) _aligment=8; + else _aligment=16; + + + + //consider possible aligment of each descriptor adding offsets at the end + _desc_size_bytes=voc.getDescritorSize(); + _desc_size_bytes_al=_desc_size_bytes/_aligment; + if(_desc_size_bytes%_aligment!=0) _desc_size_bytes_al++; + _desc_size_bytes=_desc_size_bytes_al*_aligment; + + + int foffnbytes_alg=sizeof(uint32_t)/_aligment; + if(sizeof(uint32_t)%_aligment!=0) foffnbytes_alg++; + _feature_off_start=foffnbytes_alg*_aligment; + _child_off_start=_feature_off_start+voc.m_k*_desc_size_bytes ;//where do children information start from the start of the block + + + //block: nvalid|f0 f1 .. fn|ni0 ni1 ..nin + _block_size_bytes=_feature_off_start+ voc.m_k * (_desc_size_bytes + sizeof(node_info)); + _block_size_bytes_al=_block_size_bytes/_aligment; + if (_block_size_bytes%_aligment!=0) _block_size_bytes_al++; + _block_size_bytes=_block_size_bytes_al*_aligment; + + + _desc_type=CV_8UC1; + _desc_size=32; + + + _m_k=voc.m_k; + //start to work filling blocks + cout<<"_aligment="<<_aligment< nid_vpos; + for(size_t i=0;ifirst< block_offset; + uint32_t currblock=0;//expressed in blocks + uint32_t descsize=voc.getDescritorSize(); + for(const auto &Block:parent_children) + { + block_offset[Block.first]=currblock; + assert( !(currblock & 0x80000000));//32 bits 100000000...0.check msb is not set + uint64_t block_offset_bytes=currblock*_block_size_bytes; + int idx=0; + *reinterpret_cast(_data+block_offset_bytes)=Block.second.size(); + for(const auto &c:Block.second){ + const auto &node=voc.m_nodes[nid_vpos[c]]; + memcpy(_data+block_offset_bytes+_feature_off_start+idx*_desc_size_bytes,node.descriptor.ptr(0),descsize); + assert( block_offset_bytes+idx*_desc_size_bytes +descsize < _total_size ); + //now, the offset to the children block//unkonwn yet + idx++; + } + currblock++; + } + currblock=0; + //print sons of node 6 + + //now, we can write the offsets + for(const auto &Block:parent_children) + { + + int idx=0; + uint64_t block_offset_bytes=currblock*_block_size_bytes; + for(const auto &c:Block.second){ + const auto &node=voc.m_nodes[nid_vpos[c]]; + node_info *ptr_child=(node_info*)(_data+block_offset_bytes+_child_off_start+sizeof(node_info)*idx); + + if (!node.isLeaf()) { + assert(block_offset.count(node.id)); + ptr_child->id_or_childblock=block_offset[node.id];//childblock + } + else{ + //set the node id (ensure msb is set) + assert(!(node.id & 0x80000000));//check + ptr_child->id_or_childblock=node.word_id; + ptr_child->id_or_childblock|=0x80000000;//set the msb to one to distinguish from offset + //now,set the weight too + ptr_child->weight=node.weight; + } + //now, the offset to the children block//unkonwn yet + idx++; + } + currblock++; + } + cout<<"nblocks used="<(_data)<first<<" "<second<first<<" "<second<first<<" "<second<first<<" "<second< +#include "DBoW3.h" +#include "timers.h" +#include +#include +#include +#include +#include +#ifdef USE_CONTRIB +#include +#include +#endif +#include +#include "DescManip.h" +using namespace DBoW3; +using namespace std; +using namespace std; + + +std::vector< cv::Mat > loadFeatures( std::vector path_to_images,string descriptor="") throw (std::exception){ + //select detector + cv::Ptr fdetector; + if (descriptor=="orb") fdetector=cv::ORB::create(2000); + + else if (descriptor=="brisk") fdetector=cv::BRISK::create(); +#ifdef OPENCV_VERSION_3 + else if (descriptor=="akaze") fdetector=cv::AKAZE::create(); +#endif +#ifdef USE_CONTRIB + else if(descriptor=="surf" ) fdetector=cv::xfeatures2d::SURF::create(400, 4, 2, false); +#endif + + else throw std::runtime_error("Invalid descriptor"); + assert(!descriptor.empty()); + vector features; + + + cout << "Extracting features..." << endl; + for(size_t i = 0; i < path_to_images.size(); ++i) + { + vector keypoints; + cv::Mat descriptors; + cout<<"reading image: "<detectAndCompute(image, cv::Mat(), keypoints, descriptors); + features.push_back(descriptors); + cout<<"done detecting features"<descriptor); + cv::Mat feat(voc.m_words.size(),desc_size,CV_8UC1); + + for(int i=0;i< voc.m_words.size();i++){ + memcpy(feat.ptr(i),voc.m_words[i]->descriptor.ptr(0),desc_size); + assert(i==voc.m_words[i]->word_id); + } + return feat; + } +}; +} + + +int main(int argc,char **argv){ + DBoW3::Vocabulary voc; + voc.load(argv[1]); + cout<<"loaded"<first<<" "<second<first<<" "<second<(indices.rows-1,0)<(row,col) << "," << dists.at(row,col) << ")" << "\t"; + // } + // cout << endl; + // } + + + + + +} diff --git a/Thirdparty/DBoW2/DBoW3/tests/test_iobinary.cpp b/Thirdparty/DBoW2/DBoW3/tests/test_iobinary.cpp new file mode 100644 index 0000000000..3feab2f1dd --- /dev/null +++ b/Thirdparty/DBoW2/DBoW3/tests/test_iobinary.cpp @@ -0,0 +1,42 @@ +#include +#include + +// DBoW3 +#include "DBoW3.h" + +// OpenCV +#include +using namespace DBoW3; +using namespace std; + +//command line parser +class CmdLineParser{int argc; char **argv; public: CmdLineParser(int _argc,char **_argv):argc(_argc),argv(_argv){} bool operator[] ( string param ) {int idx=-1; for ( int i=0; i +#include + +// DBoW3 +#include "DBoW3.h" + +// OpenCV +#include +#include +#include +#ifdef USE_CONTRIB +#include +#include +#endif +#include "DescManip.h" + +using namespace DBoW3; +using namespace std; + + +//command line parser +class CmdLineParser{int argc; char **argv; public: CmdLineParser(int _argc,char **_argv):argc(_argc),argv(_argv){} bool operator[] ( string param ) {int idx=-1; for ( int i=0; i readImagePaths(int argc,char **argv,int start){ + vector paths; + for(int i=start;i loadFeatures( std::vector path_to_images,string descriptor="") throw (std::exception){ + //select detector + cv::Ptr fdetector; + if (descriptor=="orb") fdetector=cv::ORB::create(); + else if (descriptor=="brisk") fdetector=cv::BRISK::create(); +#ifdef OPENCV_VERSION_3 + else if (descriptor=="akaze") fdetector=cv::AKAZE::create(); +#endif +#ifdef USE_CONTRIB + else if(descriptor=="surf" ) fdetector=cv::xfeatures2d::SURF::create(400, 4, 2, EXTENDED_SURF); +#endif + + else throw std::runtime_error("Invalid descriptor"); + assert(!descriptor.empty()); + vector features; + + + cout << "Extracting features..." << endl; + for(size_t i = 0; i < path_to_images.size(); ++i) + { + vector keypoints; + cv::Mat descriptors; + cout<<"reading image: "<detectAndCompute(image, cv::Mat(), keypoints, descriptors); + features.push_back(descriptors); + cout<<"done detecting features"< &features){ + + //test it is not created + std::ifstream ifile(filename); + if (ifile.is_open()){cerr<<"ERROR::: Output File "<(0),f.total()*f.elemSize()); + } +} + +// ---------------------------------------------------------------------------- + +int main(int argc,char **argv) +{ + + try{ + CmdLineParser cml(argc,argv); + if (cml["-h"] || argc==1){ + cerr<<"Usage: descriptor_name output image0 image1 ... \n\t descriptors:brisk,surf,orb(default),akaze(only if using opencv 3)"< features= loadFeatures(images,descriptor); + + //save features to file + saveToFile(argv[2],features); + + }catch(std::exception &ex){ + cerr< +#include + +// DBoW3 +#include "DBoW3.h" + +// OpenCV +#include +using namespace DBoW3; +using namespace std; + +//command line parser +class CmdLineParser{int argc; char **argv; public: CmdLineParser(int _argc,char **_argv):argc(_argc),argv(_argv){} bool operator[] ( string param ) {int idx=-1; for ( int i=0; i readFeaturesFromFile(string filename){ +vector features; + //test it is not created + std::ifstream ifile(filename); + if (!ifile.is_open()){cerr<<"could not open input file"<(0),features[i].total()*features[i].elemSize()); + } + return features; +} + +// ---------------------------------------------------------------------------- + +int main(int argc,char **argv) +{ + + try{ + CmdLineParser cml(argc,argv); + if (cml["-h"] || argc!=3){ + cerr<<"Usage: features output_voc.yml[.gz]"< +#include + +// DBoW3 +#include "DBoW3.h" + +// OpenCV +#include +#include +#include +#ifdef USE_CONTRIB +#include +#include +#endif +#include "DescManip.h" + +using namespace DBoW3; +using namespace std; + + +//command line parser +class CmdLineParser{int argc; char **argv; public: CmdLineParser(int _argc,char **_argv):argc(_argc),argv(_argv){} bool operator[] ( string param ) {int idx=-1; for ( int i=0; i readImagePaths(int argc,char **argv,int start){ + vector paths; + for(int i=start;i loadFeatures( std::vector path_to_images,string descriptor="") throw (std::exception){ + //select detector + cv::Ptr fdetector; + if (descriptor=="orb") fdetector=cv::ORB::create(); + else if (descriptor=="brisk") fdetector=cv::BRISK::create(); +#ifdef OPENCV_VERSION_3 + else if (descriptor=="akaze") fdetector=cv::AKAZE::create(); +#endif +#ifdef USE_CONTRIB + else if(descriptor=="surf" ) fdetector=cv::xfeatures2d::SURF::create(400, 4, 2, EXTENDED_SURF); +#endif + + else throw std::runtime_error("Invalid descriptor"); + assert(!descriptor.empty()); + vector features; + + + cout << "Extracting features..." << endl; + for(size_t i = 0; i < path_to_images.size(); ++i) + { + vector keypoints; + cv::Mat descriptors; + cout<<"reading image: "<detectAndCompute(image, cv::Mat(), keypoints, descriptors); + features.push_back(descriptors); + cout<<"done detecting features"< &features) +{ + // branching factor and depth levels + const int k = 9; + const int L = 3; + const WeightingType weight = TF_IDF; + const ScoringType score = L1_NORM; + + DBoW3::Vocabulary voc(k, L, weight, score); + + cout << "Creating a small " << k << "^" << L << " vocabulary..." << endl; + voc.create(features); + cout << "... done!" << endl; + + cout << "Vocabulary information: " << endl + << voc << endl << endl; + + // lets do something with this vocabulary + cout << "Matching images against themselves (0 low, 1 high): " << endl; + BowVector v1, v2; + for(size_t i = 0; i < features.size(); i++) + { + voc.transform(features[i], v1); + for(size_t j = 0; j < features.size(); j++) + { + voc.transform(features[j], v2); + + double score = voc.score(v1, v2); + cout << "Image " << i << " vs Image " << j << ": " << score << endl; + } + } + + // save the vocabulary to disk + cout << endl << "Saving vocabulary..." << endl; + voc.save("small_voc.yml.gz"); + cout << "Done" << endl; +} + +////// ---------------------------------------------------------------------------- + +void testDatabase(const vector &features) +{ + cout << "Creating a small database..." << endl; + + // load the vocabulary from disk + Vocabulary voc("small_voc.yml.gz"); + + Database db(voc, false, 0); // false = do not use direct index + // (so ignore the last param) + // The direct index is useful if we want to retrieve the features that + // belong to some vocabulary node. + // db creates a copy of the vocabulary, we may get rid of "voc" now + + // add images to the database + for(size_t i = 0; i < features.size(); i++) + db.add(features[i]); + + cout << "... done!" << endl; + + cout << "Database information: " << endl << db << endl; + + // and query the database + cout << "Querying the database: " << endl; + + QueryResults ret; + for(size_t i = 0; i < features.size(); i++) + { + db.query(features[i], ret, 4); + + // ret[0] is always the same image in this case, because we added it to the + // database. ret[1] is the second best match. + + cout << "Searching for Image " << i << ". " << ret << endl; + } + + cout << endl; + + // we can save the database. The created file includes the vocabulary + // and the entries added + cout << "Saving database..." << endl; + db.save("small_db.yml.gz"); + cout << "... done!" << endl; + + // once saved, we can load it again + cout << "Retrieving database once again..." << endl; + Database db2("small_db.yml.gz"); + cout << "... done! This is: " << endl << db2 << endl; +} + + +// ---------------------------------------------------------------------------- + +int main(int argc,char **argv) +{ + + try{ + CmdLineParser cml(argc,argv); + if (cml["-h"] || argc<=2){ + cerr<<"Usage: descriptor_name image0 image1 ... \n\t descriptors:brisk,surf,orb ,akaze(only if using opencv 3)"< features= loadFeatures(images,descriptor); + testVocCreation(features); + + + testDatabase(features); + + }catch(std::exception &ex){ + cerr< -#include "Thirdparty/DBoW2/DBoW2/BowVector.h" -#include "Thirdparty/DBoW2/DBoW2/FeatureVector.h" +#include "Thirdparty/DBoW2/DBoW3/src/BowVector.h" +#include "Thirdparty/DBoW2/DBoW3/src/FeatureVector.h" #include "Thirdparty/Sophus/sophus/geometry.hpp" @@ -235,8 +235,8 @@ class Frame std::vector mvDepth; // Bag of Words Vector structures. - DBoW2::BowVector mBowVec; - DBoW2::FeatureVector mFeatVec; + DBoW3::BowVector mBowVec; + DBoW3::FeatureVector mFeatVec; // ORB descriptor, each row associated to a keypoint. cv::Mat mDescriptors, mDescriptorsRight; diff --git a/include/KeyFrame.h b/include/KeyFrame.h index 1b8d1c4392..21fa7fe83f 100644 --- a/include/KeyFrame.h +++ b/include/KeyFrame.h @@ -21,8 +21,8 @@ #define KEYFRAME_H #include "MapPoint.h" -#include "Thirdparty/DBoW2/DBoW2/BowVector.h" -#include "Thirdparty/DBoW2/DBoW2/FeatureVector.h" +#include "Thirdparty/DBoW2/DBoW3/src/BowVector.h" +#include "Thirdparty/DBoW2/DBoW3/src/FeatureVector.h" #include "ORBVocabulary.h" #include "ORBextractor.h" #include "Frame.h" @@ -385,8 +385,8 @@ class KeyFrame const cv::Mat mDescriptors; //BoW - DBoW2::BowVector mBowVec; - DBoW2::FeatureVector mFeatVec; + DBoW3::BowVector mBowVec; + DBoW3::FeatureVector mFeatVec; // Pose relative to parent (this is computed when bad flag is activated) Sophus::SE3f mTcp; diff --git a/include/ORBVocabulary.h b/include/ORBVocabulary.h index 71628b8c0f..6ccf691a43 100644 --- a/include/ORBVocabulary.h +++ b/include/ORBVocabulary.h @@ -20,14 +20,13 @@ #ifndef ORBVOCABULARY_H #define ORBVOCABULARY_H -#include"Thirdparty/DBoW2/DBoW2/FORB.h" -#include"Thirdparty/DBoW2/DBoW2/TemplatedVocabulary.h" +#include"Thirdparty/DBoW2/DBoW3/src/DescManip.h" +#include"Thirdparty/DBoW2/DBoW3/src/Vocabulary.h" namespace ORB_SLAM3 { -typedef DBoW2::TemplatedVocabulary - ORBVocabulary; +typedef DBoW3::Vocabulary ORBVocabulary; } //namespace ORB_SLAM diff --git a/src/KeyFrameDatabase.cc b/src/KeyFrameDatabase.cc index 13b4da6115..3d17692656 100644 --- a/src/KeyFrameDatabase.cc +++ b/src/KeyFrameDatabase.cc @@ -20,7 +20,7 @@ #include "KeyFrameDatabase.h" #include "KeyFrame.h" -#include "Thirdparty/DBoW2/DBoW2/BowVector.h" +#include "Thirdparty/DBoW2/DBoW3/src/BowVector.h" #include @@ -40,7 +40,7 @@ void KeyFrameDatabase::add(KeyFrame *pKF) { unique_lock lock(mMutex); - for(DBoW2::BowVector::const_iterator vit= pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit!=vend; vit++) + for(DBoW3::BowVector::const_iterator vit= pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit!=vend; vit++) mvInvertedFile[vit->first].push_back(pKF); } @@ -49,7 +49,7 @@ void KeyFrameDatabase::erase(KeyFrame* pKF) unique_lock lock(mMutex); // Erase elements in the Inverse File for the entry - for(DBoW2::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit!=vend; vit++) + for(DBoW3::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit!=vend; vit++) { // List of keyframes that share the word list &lKFs = mvInvertedFile[vit->first]; @@ -107,7 +107,7 @@ vector KeyFrameDatabase::DetectLoopCandidates(KeyFrame* pKF, float mi { unique_lock lock(mMutex); - for(DBoW2::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) + for(DBoW3::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) { list &lKFs = mvInvertedFile[vit->first]; @@ -235,7 +235,7 @@ void KeyFrameDatabase::DetectCandidates(KeyFrame* pKF, float minScore,vector lock(mMutex); - for(DBoW2::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) + for(DBoW3::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) { list &lKFs = mvInvertedFile[vit->first]; @@ -451,7 +451,7 @@ void KeyFrameDatabase::DetectCandidates(KeyFrame* pKF, float minScore,vectormBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) + for(DBoW3::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) { list &lKFs = mvInvertedFile[vit->first]; @@ -476,7 +476,7 @@ void KeyFrameDatabase::DetectBestCandidates(KeyFrame *pKF, vector &vp spConnectedKF = pKF->GetConnectedKeyFrames(); - for(DBoW2::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) + for(DBoW3::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) { list &lKFs = mvInvertedFile[vit->first]; @@ -612,7 +612,7 @@ void KeyFrameDatabase::DetectNBestCandidates(KeyFrame *pKF, vector &v spConnectedKF = pKF->GetConnectedKeyFrames(); - for(DBoW2::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) + for(DBoW3::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) { list &lKFs = mvInvertedFile[vit->first]; @@ -738,7 +738,7 @@ vector KeyFrameDatabase::DetectRelocalizationCandidates(Frame *F, Map { unique_lock lock(mMutex); - for(DBoW2::BowVector::const_iterator vit=F->mBowVec.begin(), vend=F->mBowVec.end(); vit != vend; vit++) + for(DBoW3::BowVector::const_iterator vit=F->mBowVec.begin(), vend=F->mBowVec.end(); vit != vend; vit++) { list &lKFs = mvInvertedFile[vit->first]; diff --git a/src/MLPnPsolver.cpp b/src/MLPnPsolver.cpp index 2f8702b41a..8bad84b6d5 100644 --- a/src/MLPnPsolver.cpp +++ b/src/MLPnPsolver.cpp @@ -47,6 +47,7 @@ ******************************************************************************/ #include "MLPnPsolver.h" +#include "Thirdparty/DBoW2/DUtils/Random.h" #include diff --git a/src/ORBmatcher.cc b/src/ORBmatcher.cc index 9129683e4e..dae1b8634c 100644 --- a/src/ORBmatcher.cc +++ b/src/ORBmatcher.cc @@ -23,7 +23,7 @@ #include -#include "Thirdparty/DBoW2/DBoW2/FeatureVector.h" +#include "Thirdparty/DBoW2/DBoW3/src/FeatureVector.h" #include @@ -226,7 +226,7 @@ namespace ORB_SLAM3 vpMapPointMatches = vector(F.N,static_cast(NULL)); - const DBoW2::FeatureVector &vFeatVecKF = pKF->mFeatVec; + const DBoW3::FeatureVector &vFeatVecKF = pKF->mFeatVec; int nmatches=0; @@ -236,10 +236,10 @@ namespace ORB_SLAM3 const float factor = 1.0f/HISTO_LENGTH; // We perform the matching over ORB that belong to the same vocabulary node (at a certain level) - DBoW2::FeatureVector::const_iterator KFit = vFeatVecKF.begin(); - DBoW2::FeatureVector::const_iterator Fit = F.mFeatVec.begin(); - DBoW2::FeatureVector::const_iterator KFend = vFeatVecKF.end(); - DBoW2::FeatureVector::const_iterator Fend = F.mFeatVec.end(); + DBoW3::FeatureVector::const_iterator KFit = vFeatVecKF.begin(); + DBoW3::FeatureVector::const_iterator Fit = F.mFeatVec.begin(); + DBoW3::FeatureVector::const_iterator KFend = vFeatVecKF.end(); + DBoW3::FeatureVector::const_iterator Fend = F.mFeatVec.end(); while(KFit != KFend && Fit != Fend) { @@ -765,12 +765,12 @@ namespace ORB_SLAM3 int ORBmatcher::SearchByBoW(KeyFrame *pKF1, KeyFrame *pKF2, vector &vpMatches12) { const vector &vKeysUn1 = pKF1->mvKeysUn; - const DBoW2::FeatureVector &vFeatVec1 = pKF1->mFeatVec; + const DBoW3::FeatureVector &vFeatVec1 = pKF1->mFeatVec; const vector vpMapPoints1 = pKF1->GetMapPointMatches(); const cv::Mat &Descriptors1 = pKF1->mDescriptors; const vector &vKeysUn2 = pKF2->mvKeysUn; - const DBoW2::FeatureVector &vFeatVec2 = pKF2->mFeatVec; + const DBoW3::FeatureVector &vFeatVec2 = pKF2->mFeatVec; const vector vpMapPoints2 = pKF2->GetMapPointMatches(); const cv::Mat &Descriptors2 = pKF2->mDescriptors; @@ -785,10 +785,10 @@ namespace ORB_SLAM3 int nmatches = 0; - DBoW2::FeatureVector::const_iterator f1it = vFeatVec1.begin(); - DBoW2::FeatureVector::const_iterator f2it = vFeatVec2.begin(); - DBoW2::FeatureVector::const_iterator f1end = vFeatVec1.end(); - DBoW2::FeatureVector::const_iterator f2end = vFeatVec2.end(); + DBoW3::FeatureVector::const_iterator f1it = vFeatVec1.begin(); + DBoW3::FeatureVector::const_iterator f2it = vFeatVec2.begin(); + DBoW3::FeatureVector::const_iterator f1end = vFeatVec1.end(); + DBoW3::FeatureVector::const_iterator f2end = vFeatVec2.end(); while(f1it != f1end && f2it != f2end) { @@ -907,8 +907,8 @@ namespace ORB_SLAM3 int ORBmatcher::SearchForTriangulation(KeyFrame *pKF1, KeyFrame *pKF2, vector > &vMatchedPairs, const bool bOnlyStereo, const bool bCoarse) { - const DBoW2::FeatureVector &vFeatVec1 = pKF1->mFeatVec; - const DBoW2::FeatureVector &vFeatVec2 = pKF2->mFeatVec; + const DBoW3::FeatureVector &vFeatVec1 = pKF1->mFeatVec; + const DBoW3::FeatureVector &vFeatVec2 = pKF2->mFeatVec; //Compute epipole in second image Sophus::SE3f T1w = pKF1->GetPose(); @@ -955,10 +955,10 @@ namespace ORB_SLAM3 const float factor = 1.0f/HISTO_LENGTH; - DBoW2::FeatureVector::const_iterator f1it = vFeatVec1.begin(); - DBoW2::FeatureVector::const_iterator f2it = vFeatVec2.begin(); - DBoW2::FeatureVector::const_iterator f1end = vFeatVec1.end(); - DBoW2::FeatureVector::const_iterator f2end = vFeatVec2.end(); + DBoW3::FeatureVector::const_iterator f1it = vFeatVec1.begin(); + DBoW3::FeatureVector::const_iterator f2it = vFeatVec2.begin(); + DBoW3::FeatureVector::const_iterator f1end = vFeatVec1.end(); + DBoW3::FeatureVector::const_iterator f2end = vFeatVec2.end(); while(f1it!=f1end && f2it!=f2end) { diff --git a/src/System.cc b/src/System.cc index 60d9c5185a..8853068f9d 100644 --- a/src/System.cc +++ b/src/System.cc @@ -115,7 +115,8 @@ System::System(const string &strVocFile, const string &strSettingsFile, const eS cout << endl << "Loading ORB Vocabulary. This could take a while..." << endl; mpVocabulary = new ORBVocabulary(); - bool bVocLoad = mpVocabulary->loadFromTextFile(strVocFile); + mpVocabulary->load(strVocFile); + bool bVocLoad = true; if(!bVocLoad) { cerr << "Wrong path to vocabulary. " << endl; @@ -137,7 +138,8 @@ System::System(const string &strVocFile, const string &strSettingsFile, const eS cout << endl << "Loading ORB Vocabulary. This could take a while..." << endl; mpVocabulary = new ORBVocabulary(); - bool bVocLoad = mpVocabulary->loadFromTextFile(strVocFile); + mpVocabulary->load(strVocFile); + bool bVocLoad = true; if(!bVocLoad) { cerr << "Wrong path to vocabulary. " << endl;