diff --git a/src-tauri/src/config.rs b/src-tauri/src/config.rs index abc01d2..b786444 100644 --- a/src-tauri/src/config.rs +++ b/src-tauri/src/config.rs @@ -1,4 +1,4 @@ -use crate::{types::SettingsOptions, dal::get_active_server}; +use crate::{dal::DataAccessLayer, types::SettingsOptions}; use fs_extra::dir::{move_dir, CopyOptions}; use sqlx::{Pool, Row, Sqlite}; use tauri::State; @@ -137,6 +137,7 @@ pub async fn set_local_dir( log::info!("setting local directory to {}", dir); log::info!("parent dir: {}", parent_dir); let pool = state_mutex.lock().await; + let dal = DataAccessLayer::new(&pool); let old_server_dir = get_server_dir(&pool).await.unwrap(); let _ = sqlx::query("UPDATE server SET local_dir = ? WHERE active = 1") .bind(dir.clone()) @@ -155,7 +156,7 @@ pub async fn set_local_dir( } } } - let url = get_active_server(&pool).await.unwrap(); + let url = dal.get_active_server().await.unwrap(); if url == "" { log::warn!("could not obtain active server url"); return Ok(false); @@ -182,7 +183,8 @@ pub async fn cmd_get_cache_setting(state_mutex: State<'_, Mutex>>) #[tauri::command] pub async fn cmd_set_cache_setting(new_cache: bool, state_mutex: State<'_, Mutex>>) -> Result { let pool = state_mutex.lock().await; - let url = get_active_server(&pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + let url = dal.get_active_server().await.unwrap(); match sqlx::query("UPDATE server SET cache_setting = $1 WHERE url = $2") .bind(if new_cache { 1 } else { 0 }) @@ -200,7 +202,8 @@ pub async fn cmd_set_cache_setting(new_cache: bool, state_mutex: State<'_, Mutex } pub async fn get_cache_setting(pool: &Pool) -> Result { - let url = get_active_server(pool).await.unwrap(); + let dal = DataAccessLayer::new(pool); + let url = dal.get_active_server().await.unwrap(); match sqlx::query("SELECT cache_setting FROM server WHERE url = $1") .bind(url) .fetch_one(pool) diff --git a/src-tauri/src/dal.rs b/src-tauri/src/dal.rs index e0088d7..60e0b94 100644 --- a/src-tauri/src/dal.rs +++ b/src-tauri/src/dal.rs @@ -3,138 +3,154 @@ use std::path::Path; use std::result::Result::Ok; use crate::types::{ChangeType, UpdatedFile}; -pub async fn get_current_server(pool: &Pool) -> Result { - let output = sqlx::query("SELECT CASE WHEN debug_active = 1 THEN debug_url ELSE url END as url FROM server WHERE active = 1").fetch_one(&*pool).await; +pub struct DataAccessLayer<'a> { + pub pool: &'a Pool +} - match output { - Ok(row) => Ok(row.get::("url")), - Err(err) => { - log::error!("couldn't get the current server url: {}", err); - Ok("".to_string()) - } +impl<'a> DataAccessLayer<'a> { + pub fn new(user_pool: &'a Pool) -> Self { + DataAccessLayer { pool: &user_pool } } -} -pub async fn get_active_server(pool: &Pool) -> Result { - let output = sqlx::query("SELECT url FROM server WHERE active = 1") - .fetch_one(&*pool) - .await; + /// gets the current server URL to use for network calls + pub async fn get_current_server(&self) -> Result { + let output = sqlx::query("SELECT CASE WHEN debug_active = 1 THEN debug_url ELSE url END as url FROM server WHERE active = 1") + .fetch_one(self.pool).await; + + match output { + Ok(row) => Ok(row.get::("url")), + Err(err) => { + log::error!("couldn't get the current server url: {}", err); + Ok("".to_string()) + } + } + } - match output { - Ok(row) => Ok(row.get::("url")), - Err(err) => { - log::error!("couldn't get the active server url: {}", err); - Ok("".to_string()) + /// gets the current server URL for db foreign key purposes + pub async fn get_active_server(&self) -> Result { + let output = sqlx::query("SELECT url FROM server WHERE active = 1") + .fetch_one(self.pool) + .await; + + match output { + Ok(row) => Ok(row.get::("url")), + Err(err) => { + log::error!("couldn't get the active server url: {}", err); + Ok("".to_string()) + } } } -} -pub async fn get_project_dir(pid: i32, pool: &Pool) -> Result { - //println!("current allocating {}B", get_allocated()); - let server = get_active_server(pool).await.unwrap(); - let db_call = sqlx::query("SELECT server.local_dir, project.title, project.team_name FROM server, project WHERE server.active = 1 AND project.url = ? AND project.pid = ?") - .bind(server) + /// deletes an entry from the file table + pub async fn delete_file_entry(&self, pid: i32, path: String) -> Result { + let _ = sqlx::query( + "DELETE FROM file + WHERE pid = $1 AND filepath = $2", + ) .bind(pid) - .fetch_one(pool) + .bind(path) + .execute(self.pool) .await; - match db_call { - Ok(row) => { - let output = Path::new(&row.get::("local_dir")) - .join(row.get::("team_name")) - .join(row.get::("title")); - Ok(output.display().to_string()) - } - Err(err) => { - log::error!("couldn't get the project directory for pid {}: {}", pid, err); - Ok("".to_string()) - } + Ok(true) } -} -pub async fn get_file_info(pid: i32, path: String, pool: &Pool) -> Result { - let output = sqlx::query( - "SELECT curr_hash, size, change_type, in_fs FROM file WHERE filepath = $1 AND pid = $2", - ) - .bind(path.clone()) - .bind(pid) - .fetch_one(&*pool) - .await; - - match output { - Ok(row) => { - let change = match row.get::("change_type") { - 1 => ChangeType::Create, - 2 => ChangeType::Update, - 3 => ChangeType::Delete, - _ => ChangeType::NoChange, - }; - let in_fs = if row.get::("in_fs") > 0 { true } else { false }; - let owo: UpdatedFile = UpdatedFile { - path: path, - hash: row.get::("curr_hash").to_string(), - size: row.get::("size"), - change: change, - in_fs: in_fs - }; - - Ok(owo) - } - Err(err) => { - log::error!("couldn't get the file information for {} in project {}: {}", path, pid, err); - Err(()) + /// get the project directory for the specified project + pub async fn get_project_dir(&self, pid: i32) -> Result { + //println!("current allocating {}B", get_allocated()); + let server = self.get_active_server().await.unwrap(); + let db_call = sqlx::query("SELECT server.local_dir, project.title, project.team_name FROM server, project WHERE server.active = 1 AND project.url = ? AND project.pid = ?") + .bind(server) + .bind(pid) + .fetch_one(self.pool) + .await; + match db_call { + Ok(row) => { + let output = Path::new(&row.get::("local_dir")) + .join(row.get::("team_name")) + .join(row.get::("title")); + Ok(output.display().to_string()) + } + Err(err) => { + log::error!("couldn't get the project directory for pid {}: {}", pid, err); + Ok("".to_string()) + } } } -} - -pub async fn get_basehash(pid: i32, path: String, pool: &Pool) -> Result { - let result = sqlx::query( - "SELECT base_hash FROM file WHERE - pid = $1 AND filepath = $2 LIMIT 1 - ", - ) - .bind(pid) - .bind(path) - .fetch_one(pool) - .await; - match result { - Ok(row) => Ok(row.get::("base_hash")), - Err(err) => { - log::error!("could not get base_hash: {}", err); - Err(()) + pub async fn get_file_info(&self, pid: i32, path: String) -> Result { + let output = sqlx::query( + "SELECT curr_hash, size, change_type, in_fs FROM file WHERE filepath = $1 AND pid = $2", + ) + .bind(path.clone()) + .bind(pid) + .fetch_one(self.pool) + .await; + + match output { + Ok(row) => { + let change = match row.get::("change_type") { + 1 => ChangeType::Create, + 2 => ChangeType::Update, + 3 => ChangeType::Delete, + _ => ChangeType::NoChange, + }; + let in_fs = if row.get::("in_fs") > 0 { true } else { false }; + let owo: UpdatedFile = UpdatedFile { + path: path, + hash: row.get::("curr_hash").to_string(), + size: row.get::("size"), + change: change, + in_fs: in_fs + }; + + Ok(owo) + } + Err(err) => { + log::error!("couldn't get the file information for {} in project {}: {}", path, pid, err); + Err(()) + } } } -} -pub async fn delete_file_entry(pid: i32, path: String, pool: &Pool) -> Result { - let _ = sqlx::query( - "DELETE FROM file - WHERE pid = $1 AND filepath = $2", - ) - .bind(pid) - .bind(path) - .execute(pool) - .await; - Ok(true) -} + pub async fn get_basehash(&self, pid: i32, path: String) -> Result { + let result = sqlx::query( + "SELECT base_hash FROM file WHERE + pid = $1 AND filepath = $2 LIMIT 1 + ", + ) + .bind(pid) + .bind(path) + .fetch_one(self.pool) + .await; + + match result { + Ok(row) => Ok(row.get::("base_hash")), + Err(err) => { + log::error!("could not get base_hash: {}", err); + Err(()) + } + } + } + + // TODO necessary to have in dal? + pub async fn update_file_entry(&self, pid: i32, path: String) -> Result { + let _ = sqlx::query( + " + UPDATE file SET + base_hash = tracked_hash, + curr_hash = tracked_hash, + base_commitid = tracked_commitid, + size = tracked_size, + in_fs = 1, + change_type = 0 + WHERE pid = $1 AND filepath = $2 + ", + ) + .bind(pid) + .bind(path) + .execute(self.pool) + .await; + Ok(true) + } +} // end impl DataAcessLayer<'_> -// TODO necessary? -pub async fn update_file_entry(pid: i32, path: String, pool: &Pool) -> Result { - let _ = sqlx::query( - " - UPDATE file SET - base_hash = tracked_hash, - curr_hash = tracked_hash, - base_commitid = tracked_commitid, - size = tracked_size, - in_fs = 1, - change_type = 0 - WHERE pid = $1 AND filepath = $2 - ", - ) - .bind(pid) - .bind(path) - .execute(pool) - .await; - Ok(true) -} \ No newline at end of file diff --git a/src-tauri/src/download.rs b/src-tauri/src/download.rs index 6120dc3..3794d3f 100644 --- a/src-tauri/src/download.rs +++ b/src-tauri/src/download.rs @@ -5,7 +5,7 @@ use crate::types::{ ReqwestError, }; use crate::util::{delete_cache, delete_trash, get_cache_dir, get_trash_dir}; -use crate::dal::{delete_file_entry, get_current_server, get_project_dir}; +use crate::dal::DataAccessLayer; use futures::{stream, StreamExt}; use log::{info, trace, warn}; use reqwest::Client; @@ -32,8 +32,10 @@ pub async fn download_files( app_handle: AppHandle, ) -> Result { let pool = state_mutex.lock().await; - let server_url = get_current_server(&pool).await.unwrap(); - let project_dir = get_project_dir(pid, &pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + + let server_url = dal.get_current_server().await.unwrap(); + let project_dir = dal.get_project_dir(pid).await.unwrap(); let cache_dir = get_cache_dir(&pool).await.unwrap(); let trash_dir = get_trash_dir(&pool).await.unwrap(); @@ -276,7 +278,7 @@ pub async fn download_files( .await; } else { // file.download == delete - let _ = delete_file_entry(pid, file.rel_path, &pool).await; + let _ = dal.delete_file_entry(pid, file.rel_path).await; } } @@ -471,7 +473,16 @@ fn read_mapping(hash_dir: &String) -> Result, ()> { // assumes trash dir exists pub fn trash_file(proj_dir: &String, trash_dir: &String, hash: String) -> Result { - let trash_path = trash_dir.to_owned() + &(sep().to_string()) + hash.as_str(); + let trash_path; + #[cfg(target_os = "windows")] + { + trash_path = trash_dir.to_owned() + &(sep().to_string()) + hash.as_str(); + } + #[cfg(target_os = "linux")] + { + trash_path = translate_filepath(&(trash_dir.to_owned() + &(sep().to_string()) + hash.as_str()), true); + } + match fs::rename(proj_dir, trash_path) { Ok(_) => Ok(true), Err(err) => { @@ -495,7 +506,8 @@ pub fn recover_file(trash_dir: &String, proj_dir: &String) -> Result { #[tauri::command] pub async fn download_single_file(pid: i64, path: String, commit_id: i64, user_id: String, download_path: String, state_mutex: State<'_, Mutex>>) -> Result { let pool = state_mutex.lock().await; - let server_url = get_current_server(&pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + let server_url = dal.get_current_server().await.unwrap(); let cache_dir = get_cache_dir(&pool).await.unwrap(); // request download links from glassy server diff --git a/src-tauri/src/reset.rs b/src-tauri/src/reset.rs index 2a235cd..f4ebbc7 100644 --- a/src-tauri/src/reset.rs +++ b/src-tauri/src/reset.rs @@ -8,7 +8,7 @@ use crate::types::{DownloadRequest, DownloadRequestMessage, DownloadServerOutput use crate::util::{ delete_cache, delete_trash, get_cache_dir, get_trash_dir }; -use crate::dal::{get_current_server, get_project_dir}; +use crate::dal::DataAccessLayer; use futures::{stream, StreamExt}; use log::{info, warn}; use reqwest::Client; @@ -24,17 +24,18 @@ const CONCURRENT_AWS_REQUESTS: usize = 4; #[tauri::command] pub async fn reset_files( - pid: i64, + pid: i32, filepaths: Vec, user: String, app_handle: AppHandle, ) -> Result { let state_mutex = app_handle.state::>>(); let pool = state_mutex.lock().await; - let project_dir = get_project_dir(pid.try_into().unwrap(), &pool) + let dal = DataAccessLayer::new(&pool); + let project_dir = dal.get_project_dir(pid) .await .unwrap(); - let server_url = get_current_server(&pool).await.unwrap(); + let server_url = dal.get_current_server().await.unwrap(); let cache_dir = get_cache_dir(&pool).await.unwrap(); let trash_dir = get_trash_dir(&pool).await.unwrap(); @@ -71,7 +72,7 @@ pub async fn reset_files( // if file isnt in cache, we need to download it if !verify_cache(&cache_path).unwrap() { to_download.push(DownloadRequest { - project_id: pid, + project_id: pid.into(), path: file, commit_id: commit, user_id: user.clone(), diff --git a/src-tauri/src/sync.rs b/src-tauri/src/sync.rs index d455d34..d23e293 100644 --- a/src-tauri/src/sync.rs +++ b/src-tauri/src/sync.rs @@ -1,5 +1,5 @@ use crate::{ - file::translate_filepath, types::RemoteFile, util::open_directory, dal::{get_active_server, get_project_dir} + file::translate_filepath, types::RemoteFile, util::open_directory, dal::DataAccessLayer }; use merkle_hash::{bytes_to_hex, Algorithm, MerkleTree}; use serde::{Deserialize, Serialize}; @@ -35,7 +35,7 @@ pub async fn hash_dir(pid: i32, dir_path: PathBuf, pool: &Pool) { } #[cfg(target_os = "linux")] { - rel_path = translate_filepath(file.path.relative.into_string(), false); + rel_path = translate_filepath(&(file.path.relative.into_string()), false); } let metadata = std::fs::metadata(file.path.absolute.clone()).unwrap(); let hash = bytes_to_hex(file.hash); @@ -125,7 +125,8 @@ pub async fn open_project_dir( state_mutex: State<'_, Mutex>>, ) -> Result<(), ()> { let pool = state_mutex.lock().await; - let project_dir = get_project_dir(pid, &pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + let project_dir = dal.get_project_dir(pid).await.unwrap(); let _ = fs::create_dir_all(&project_dir); let mut pb = PathBuf::new(); pb.push(project_dir); @@ -144,7 +145,8 @@ pub async fn sync_changes( log::info!("syncing changes for project {}", pid); let pool = state_mutex.lock().await; - let project_dir = get_project_dir(pid, &pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + let project_dir = dal.get_project_dir(pid).await.unwrap(); // create folder if it does not exist let _ = fs::create_dir_all(&project_dir); @@ -193,7 +195,8 @@ pub async fn update_project_info( state_mutex: State<'_, Mutex>>, ) -> Result<(), ()> { let pool = state_mutex.lock().await; - let server = get_active_server(&pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + let server = dal.get_active_server().await.unwrap(); let _output = sqlx::query("INSERT INTO project(pid, url, title, team_name, base_commitid, remote_title) VALUES (?, ?, ?, ?, ?, ?) ON CONFLICT(pid, url) DO UPDATE SET remote_title = excluded.title") @@ -312,7 +315,8 @@ pub async fn get_project_name( state_mutex: State<'_, Mutex>>, ) -> Result { let pool = state_mutex.lock().await; - let server = get_active_server(&pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + let server = dal.get_active_server().await.unwrap(); let output = sqlx::query("SELECT title FROM project WHERE pid = $1 AND url = $2") .bind(pid) .bind(server) @@ -340,7 +344,8 @@ pub async fn get_local_projects( state_mutex: State<'_, Mutex>>, ) -> Result, ()> { let pool = state_mutex.lock().await; - let server = get_active_server(&pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + let server = dal.get_active_server().await.unwrap(); let pid_query = sqlx::query("SELECT DISTINCT pid FROM file") .fetch_all(&*pool) diff --git a/src-tauri/src/upload.rs b/src-tauri/src/upload.rs index 1f856be..e282e75 100644 --- a/src-tauri/src/upload.rs +++ b/src-tauri/src/upload.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::file::{sep, translate_filepath}; use crate::types::{ChangeType, ReqwestError, UpdatedFile}; use crate::util::verify_file; -use crate::dal::{get_current_server, get_file_info, get_project_dir}; +use crate::dal::DataAccessLayer; use fs_chunker::Chunk; use futures::{stream, StreamExt}; use log::error; @@ -52,8 +52,9 @@ pub async fn upload_files( ) -> Result { let state_mutex = app_handle.state::>>(); let pool = state_mutex.lock().await; - let project_dir = get_project_dir(pid, &pool).await.unwrap(); - let server_url = get_current_server(&pool).await.unwrap(); + let dal = DataAccessLayer::new(&pool); + let project_dir = dal.get_project_dir(pid).await.unwrap(); + let server_url = dal.get_current_server().await.unwrap(); let endpoint = server_url + "/store/request"; let client: Client = reqwest::Client::new(); log::debug!("uploading files for project {}", pid); @@ -61,7 +62,7 @@ pub async fn upload_files( let mut to_upload: Vec = vec![]; let mut uploaded: u32 = 0; for filepath in filepaths { - let file: UpdatedFile = get_file_info(pid, filepath.clone(), &pool).await.unwrap(); + let file: UpdatedFile = dal.get_file_info(pid, filepath.clone()).await.unwrap(); // verify file information if !verify_file(&filepath, pid, &pool).await.unwrap() { diff --git a/src-tauri/src/util.rs b/src-tauri/src/util.rs index 4b983b4..20282e3 100644 --- a/src-tauri/src/util.rs +++ b/src-tauri/src/util.rs @@ -13,7 +13,7 @@ use crate::file::sep; //use std::alloc; //use cap::Cap; use crate::get_server_dir; -use crate::dal::{get_file_info, get_project_dir}; +use crate::dal::DataAccessLayer; pub async fn get_cache_dir(pool: &Pool) -> Result { let server_dir = get_server_dir(pool).await; @@ -155,11 +155,12 @@ pub fn get_max_allocated() -> usize { */ pub async fn verify_file(rel_path: &String, pid: i32, pool: &Pool) -> Result { - let project_dir = get_project_dir(pid, pool).await.unwrap(); + let dal = DataAccessLayer::new(pool); + let project_dir = dal.get_project_dir(pid).await.unwrap(); let absolute_path = project_dir + &(sep().to_string()) + rel_path; let abs_path = Path::new(&absolute_path); // get current file info - let file_info = get_file_info(pid, rel_path.to_string(), pool).await.unwrap(); + let file_info = dal.get_file_info(pid, rel_path.to_string()).await.unwrap(); // check file existence before any sort of hashing if !abs_path.exists() && !file_info.in_fs {