Skip to content

Commit

Permalink
Merge pull request #78 from glassypdm/dev
Browse files Browse the repository at this point in the history
v0.7.0
  • Loading branch information
joshtenorio authored Oct 29, 2024
2 parents ebdf842 + 84876a9 commit 2ed1faa
Show file tree
Hide file tree
Showing 33 changed files with 1,968 additions and 1,234 deletions.
4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "glassy_pdm_client",
"private": true,
"version": "0.6.3",
"version": "0.7.0",
"type": "module",
"scripts": {
"dev": "vite",
Expand Down Expand Up @@ -33,7 +33,7 @@
"@radix-ui/react-tooltip": "^1.1.3",
"@tanstack/react-query": "^5.59.9",
"@tanstack/react-query-devtools": "^5.59.9",
"@tanstack/react-router": "^1.64.0",
"@tanstack/react-router": "^1.77.6",
"@tanstack/react-table": "^8.20.5",
"@tauri-apps/api": "2.0.2",
"@tauri-apps/plugin-dialog": "2.0.0",
Expand Down
1,600 changes: 775 additions & 825 deletions pnpm-lock.yaml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion src-tauri/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion src-tauri/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "glassy_pdm_client"
version = "0.6.3"
version = "0.7.0"
description = "glassyPDM"
authors = ["Joshua Tenorio"]
license = "GPL"
Expand Down
1 change: 1 addition & 0 deletions src-tauri/capabilities/main.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
"core:tray:default",
"shell:allow-open",
"dialog:allow-open",
"dialog:allow-save",
"fs:allow-exists",
"fs:allow-mkdir",
"log:default"
Expand Down
2 changes: 1 addition & 1 deletion src-tauri/gen/schemas/capabilities.json
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"default":{"identifier":"default","description":"Capability for the main window","local":true,"windows":["main"],"permissions":["core:path:default","core:event:default","core:window:default","core:app:default","core:resources:default","core:menu:default","core:tray:default","shell:allow-open","dialog:allow-open","fs:allow-exists","fs:allow-mkdir","log:default"]}}
{"default":{"identifier":"default","description":"Capability for the main window","local":true,"windows":["main"],"permissions":["core:path:default","core:event:default","core:window:default","core:app:default","core:resources:default","core:menu:default","core:tray:default","shell:allow-open","dialog:allow-open","dialog:allow-save","fs:allow-exists","fs:allow-mkdir","log:default"]}}
1 change: 1 addition & 0 deletions src-tauri/migrations/2_cache_setting.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ALTER TABLE server ADD cache_setting INTEGER DEFAULT 0;
74 changes: 68 additions & 6 deletions src-tauri/src/config.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use crate::{types::SettingsOptions, util::get_active_server};
use fs_extra::dir::{move_dir, CopyOptions};
use sqlx::{Pool, Row, Sqlite};
use tauri::State;
use tokio::sync::Mutex;
Expand Down Expand Up @@ -128,18 +129,37 @@ pub async fn init_settings_options(

#[tauri::command]
pub async fn set_local_dir(
parent_dir: String,
dir: String,
move_files: bool,
state_mutex: State<'_, Mutex<Pool<Sqlite>>>,
) -> Result<(), ()> {
) -> Result<bool, ()> {
log::info!("setting local directory to {}", dir);
log::info!("parent dir: {}", parent_dir);
let pool = state_mutex.lock().await;

let old_server_dir = get_server_dir(&pool).await.unwrap();
let _ = sqlx::query("UPDATE server SET local_dir = ? WHERE active = 1")
.bind(dir)
.bind(dir.clone())
.execute(&*pool)
.await;

// TODO error handling
if move_files {
log::info!("moving files...");
// TODO linux testing
let options = CopyOptions::new();
match move_dir(old_server_dir.clone(), parent_dir.clone(), &options) {
Ok(_) => return Ok(true),
Err(e) => {
log::error!("encountered error moving files from {} to {}: {}", old_server_dir, parent_dir, e);
return Ok(false);
}
}
}
let url = get_active_server(&pool).await.unwrap();
if url == "" {
log::warn!("could not obtain active server url");
return Ok(false);
}

// clear file and project table
let _ = sqlx::query("delete from project WHERE url = $1")
Expand All @@ -149,7 +169,49 @@ pub async fn set_local_dir(

let _ = sqlx::query("delete from file")
.execute(&*pool)
.await;
.await;
Ok(true)
}

Ok(())
#[tauri::command]
pub async fn cmd_get_cache_setting(state_mutex: State<'_, Mutex<Pool<Sqlite>>>) -> Result<bool, ()> {
let pool = state_mutex.lock().await;
return get_cache_setting(&pool).await;
}

#[tauri::command]
pub async fn cmd_set_cache_setting(new_cache: bool, state_mutex: State<'_, Mutex<Pool<Sqlite>>>) -> Result<bool, ()> {
let pool = state_mutex.lock().await;
let url = get_active_server(&pool).await.unwrap();

match sqlx::query("UPDATE server SET cache_setting = $1 WHERE url = $2")
.bind(if new_cache { 1 } else { 0 })
.bind(url)
.execute(&*pool)
.await {
Ok(_o) => {
Ok(true)
},
Err(err) => {
log::error!("could not set cache setting due to db error: {}", err);
Ok(false)
}
}
}

pub async fn get_cache_setting(pool: &Pool<Sqlite>) -> Result<bool, ()> {
let url = get_active_server(pool).await.unwrap();
match sqlx::query("SELECT cache_setting FROM server WHERE url = $1")
.bind(url)
.fetch_one(pool)
.await {
Ok(row) => {
let setting = row.get::<u32, &str>("cache_setting");
Ok(if setting == 1 { true } else { false })
},
Err(err) => {
log::error!("could not retrieve cache setting due to db error: {}", err);
Ok(false)
}
}
}
144 changes: 135 additions & 9 deletions src-tauri/src/download.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
use crate::config::get_cache_setting;
use crate::types::{
DownloadInformation, DownloadRequest, DownloadRequestMessage, DownloadServerOutput, FileChunk,
ReqwestError,
};
use crate::util::{delete_trash, get_cache_dir, get_trash_dir};
use crate::util::{delete_cache, delete_trash, get_cache_dir, get_trash_dir};
use crate::util::{get_current_server, get_project_dir};
use futures::{stream, StreamExt};
use reqwest::Client;
Expand Down Expand Up @@ -84,7 +85,7 @@ pub async fn download_files(
body: None,
}),
Err(err) => {
println!("error: {}", err);
log::error!("error: {}", err);
DownloadServerOutput {
response: "reqwest error".to_string(),
body: None,
Expand Down Expand Up @@ -114,7 +115,7 @@ pub async fn download_files(
}
} else {
*error = false;
println!(
log::error!(
"error TODO something L159 download.rs: response= {}",
output.response
);
Expand All @@ -124,12 +125,12 @@ pub async fn download_files(
.await;

if *error_flag.lock().await {
println!("issue getting download link");
log::error!("issue getting download link");
return Ok(false);
}

let num_chunks = chunk_downloads.lock().await.len();
println!("s3 urls obtained, downloading {} chunks...", num_chunks);
log::info!("s3 urls obtained, downloading {} chunks...", num_chunks);

// download chunks
let copy = (*chunk_downloads).lock().await.clone();
Expand All @@ -150,15 +151,15 @@ pub async fn download_files(
}
Err(err) => {
*error = true;
println!("error downloading file {}", err);
log::error!("error downloading file {}", err);
}
};
}
})
.await;

if *error_flag.lock().await {
println!("issue downloading file from s3");
log::error!("issue downloading file from s3");
return Ok(false);
}

Expand All @@ -167,7 +168,7 @@ pub async fn download_files(
let cache_str = cache_dir.clone() + "\\" + file.hash.as_str();
let res = verify_cache(&cache_str).unwrap();
if !res {
println!("verifying cache failed: {}", file.hash);
log::error!("verifying cache failed: {}", file.hash);
return Ok(false);
}
}
Expand Down Expand Up @@ -264,6 +265,13 @@ pub async fn download_files(
.await;
}
}

// if configured, delete cache
let should_delete_cache = get_cache_setting(&pool).await.unwrap();
if should_delete_cache {
let _ = delete_cache(&pool).await;
}

Ok(true)
}

Expand Down Expand Up @@ -341,7 +349,7 @@ pub fn assemble_file(cache_dir: &String, proj_path: &String) -> Result<bool, ()>
} else if mapping.len() == 0 {
println!("assemble file: empty mapping for {}", cache_dir);
return Ok(false);
} else {
} else { // TODO this can be un-nested
// otherwise we need to assemble the file
let proj_file = match File::create(proj_path) {
Ok(file) => file,
Expand Down Expand Up @@ -461,3 +469,121 @@ pub fn recover_file(trash_dir: &String, proj_dir: &String) -> Result<bool, ()> {
}
}
}

#[tauri::command]
pub async fn download_single_file(pid: i64, path: String, commit_id: i64, user_id: String, download_path: String, state_mutex: State<'_, Mutex<Pool<Sqlite>>>) -> Result<bool, ()> {
let pool = state_mutex.lock().await;
let server_url = get_current_server(&pool).await.unwrap();
let cache_dir = get_cache_dir(&pool).await.unwrap();

// request download links from glassy server
let endpoint = server_url + "/store/download";
let glassy_client: Client = reqwest::Client::new();
let body: DownloadRequest = DownloadRequest {
project_id: pid,
path: path.clone(),
commit_id: commit_id,
user_id: user_id,
};
let response = glassy_client.post(endpoint).json(&body).send().await;

let server_output: DownloadServerOutput = match response {
Ok(res) => res
.json::<DownloadServerOutput>()
.await
.unwrap_or_else(|_| DownloadServerOutput {
response: "server error".to_string(),
body: None,
}),
Err(err) => {
log::warn!("couldn't fetch download information for {} at commit {} in project {}: {}", path, commit_id, pid, err);
DownloadServerOutput {
response: "reqwest error".to_string(),
body: None,
}
}
};

if server_output.response != "success" {
log::error!("couldn't download file {}", path);
return Ok(false);
}
let download_info = match server_output.body {
Some(a) => a,
None => {
log::error!("download information missing for {}", path);
return Ok(false);
}
};

// if file is cached, assemble file to download path
let hash_dir = cache_dir.clone() + "\\" + download_info.file_hash.as_str();
if verify_cache(&hash_dir).unwrap() {
log::info!("hash exists in cache");
let out = assemble_file(&hash_dir, &download_path).unwrap();
// just need to assemble path and return true
return Ok(out)
}

let _ = match save_filechunkmapping(&cache_dir, &download_info) {
Ok(res) => {
if !res {
log::warn!("couldn't save filechunk mapping for file hash {}", download_info.file_hash);
}
},
Err(err) => {
log::error!("encountered error when writing file chunk mapping for ifle hash {}", download_info.file_hash);
}
};

// otherwise we need to download the chunks and assemble them
let aws_client: Client = reqwest::Client::new();
let error_flag = Arc::new(Mutex::new(false));
let moved_error_flag = Arc::clone(&error_flag);
let cloned_cache = cache_dir.clone();
let _ = stream::iter(download_info.file_chunks.into_iter())
.for_each_concurrent(CONCURRENT_AWS_REQUESTS, |chunk_info| {
let cloned_error_flag = Arc::clone(&moved_error_flag);
let client = &aws_client;
// create cache_dir/file_hash directory
let filehash_dir = cloned_cache.clone() + "\\" + chunk_info.file_hash.as_str();
async move {
let res = download_with_client(&filehash_dir, chunk_info, client).await;
let mut error = cloned_error_flag.lock().await;
let _ = match res {
Ok(_) => {
log::info!("chunk downloaded successfully");
}
Err(err) => {
*error = true;
log::error!("error downloading chunk {}", err);
}
};
}
})
.await;

if *error_flag.lock().await {
log::error!("issue downloading file from s3");
return Ok(false);
}

// verify the new downloaded chunks exist
let cache = cache_dir + "\\" + download_info.file_hash.as_str();
let res = verify_cache(&cache).unwrap();
if !res {
log::error!("verifying cache failed: {}", download_info.file_hash);
return Ok(false);
}

// assemble file
let out = assemble_file(&hash_dir, &download_path).unwrap();

// if configured, delete cache
let should_delete_cache = get_cache_setting(&pool).await.unwrap();
if should_delete_cache {
let _ = delete_cache(&pool).await;
}

Ok(out)
}
Loading

0 comments on commit 2ed1faa

Please sign in to comment.