diff --git a/Cargo.toml b/Cargo.toml index b114361..d0e5e19 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,12 +1,12 @@ -[workspace] -resolver = "2" -members = [ - "std", - "examples/*", -] - -[workspace.dependencies] -dioxus-std = { path = "./std" } -dioxus = { version = "0.5" } -dioxus-web = { version = "0.5" } -dioxus-desktop = { version = "0.5" } \ No newline at end of file +[workspace] +resolver = "2" +members = [ + "std", + "examples/*", +] + +[workspace.dependencies] +dioxus-std = { path = "./std" } +dioxus = { version = "0.5" } +dioxus-web = { version = "0.5" } +dioxus-desktop = { version = "0.5" } diff --git a/examples/README.md b/examples/README.md index 68efc62..8172313 100644 --- a/examples/README.md +++ b/examples/README.md @@ -12,5 +12,8 @@ Learn how to use the `i18n` abstraction. ### [`channel`](./channel/) Learn how to use the `channel` abstraction. +### [`storage`](./storage/) +Learn how to use the `storage` abstraction. + ### [`clipboard`](./clipboard/) -Learn how to use the `clipboard` abstraction. \ No newline at end of file +Learn how to use the `clipboard` abstraction. diff --git a/examples/storage/Cargo.toml b/examples/storage/Cargo.toml new file mode 100644 index 0000000..f4d790f --- /dev/null +++ b/examples/storage/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "storage-desktop" +version = "0.1.0" +edition = "2021" + +[dependencies] +dioxus-std = { workspace = true, features = ["storage"] } +dioxus = { workspace = true, features = ["router"] } + +[features] +web = ["dioxus/web"] +desktop = ["dioxus/desktop"] diff --git a/examples/storage/README.md b/examples/storage/README.md new file mode 100644 index 0000000..15ffa8a --- /dev/null +++ b/examples/storage/README.md @@ -0,0 +1,8 @@ +# use_persistent (Desktop) + + +Use persistent allows you to create data that will persist across sessions. This example will teach you how to use the `use_persistant` hook. + +Run: + +```dioxus serve --platform desktop``` diff --git a/examples/storage/src/main.rs b/examples/storage/src/main.rs new file mode 100644 index 0000000..a488508 --- /dev/null +++ b/examples/storage/src/main.rs @@ -0,0 +1,102 @@ +use dioxus::prelude::*; +use dioxus_router::prelude::*; +use dioxus_std::storage::*; + +fn main() { + dioxus_std::storage::set_dir!(); + launch(app); +} + +fn app() -> Element { + rsx! { + Router:: {} + } +} + +#[derive(Routable, Clone)] +#[rustfmt::skip] +enum Route { + #[layout(Footer)] + #[route("/")] + Page1 {}, + #[route("/page2")] + Page2 {}, +} + +#[component] +fn Footer() -> Element { + let new_window = { + #[cfg(feature = "desktop")] + { + let window = dioxus::desktop::use_window(); + rsx! { + div { + button { + onclick: move |_| { + let dom = VirtualDom::new(app); + window.new_window(dom, Default::default()); + }, + "New Window" + } + } + } + } + #[cfg(not(feature = "desktop"))] + { + rsx! { + div {} + } + } + }; + + rsx! { + div { + Outlet:: { } + + p { + "----" + } + + {new_window} + + nav { + ul { + li { Link { to: Route::Page1 {}, "Page1" } } + li { Link { to: Route::Page2 {}, "Page2" } } + } + } + } + } +} + +#[component] +fn Page1() -> Element { + rsx!("Home") +} + +#[component] +fn Page2() -> Element { + let mut count_session = use_singleton_persistent(|| 0); + let mut count_local = use_synced_storage::("synced".to_string(), || 0); + + rsx!( + div { + button { + onclick: move |_| { + *count_session.write() += 1; + }, + "Click me!" + }, + "I persist for the current session. Clicked {count_session} times" + } + div { + button { + onclick: move |_| { + *count_local.write() += 1; + }, + "Click me!" + }, + "I persist across all sessions. Clicked {count_local} times" + } + ) +} diff --git a/std/Cargo.toml b/std/Cargo.toml index ef792f7..e6e143c 100644 --- a/std/Cargo.toml +++ b/std/Cargo.toml @@ -50,6 +50,24 @@ i18n = [ # Non Shared "dep:unic-langid", ] +storage = [ + # Shared + "dep:rustc-hash", + "dep:postcard", + "dep:once_cell", + "dep:dioxus-signals", + "dep:tokio", + "dep:yazi", + "web-sys/StorageEvent", + "dep:serde", + "dep:futures-util", + + # WASM + "dep:wasm-bindgen", + + # Not WASM + "dep:directories", +] # CI testing wasm-testing = ["geolocation", "color_scheme", "utils", "i18n"] @@ -75,7 +93,7 @@ notify-rust = { version = "4.8.0", optional = true } uuid = { version = "1.3.2", optional = true } async-broadcast = { version = "0.5.1", optional = true } -# Used by: geolocation +# Used by: geolocation, storage futures = { version = "0.3.28", features = ["std"], optional = true } futures-util = { version = "0.3.28", optional = true } @@ -84,6 +102,16 @@ serde = { version = "1.0.163", optional = true } serde_json = { version = "1.0.96", optional = true } unic-langid = { version = "0.9.1", features = ["serde"], optional = true } +# Used by: storage +rustc-hash = { version = "1.1.0", optional = true } +postcard = { version = "1.0.2", features = ["use-std"], optional = true } +once_cell = { version = "1.17.0", optional = true } +dioxus-signals = { version = "0.5.0-alpha.2", features = [ + "serialize", +], optional = true } +tokio = { version = "1.33.0", features = ["sync"], optional = true } +yazi = { version = "0.1.4", optional = true } +tracing = "0.1.40" # # # # # # # # # # Windows Deps. # @@ -113,6 +141,10 @@ js-sys = "0.3.62" uuid = { version = "1.3.2", features = ["js"] } +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +# Used by: storage +directories = { version = "4.0.1", optional = true } + # # # # # # Docs. # # # # # # diff --git a/std/src/geolocation/mod.rs b/std/src/geolocation/mod.rs index ce0cc1d..79a3ec8 100644 --- a/std/src/geolocation/mod.rs +++ b/std/src/geolocation/mod.rs @@ -1,6 +1,12 @@ -mod core; -mod platform; -mod use_geolocation; - -pub use self::core::*; -pub use self::use_geolocation::*; +cfg_if::cfg_if! { + if #[cfg(any(windows, target_family = "wasm"))] { + pub mod core; + pub mod platform; + pub mod use_geolocation; + pub use self::core::*; + pub use self::use_geolocation::*; + } + else { + compile_error!("The geolocation module is not supported on this platform."); + } +} diff --git a/std/src/lib.rs b/std/src/lib.rs index 0332a7d..e474d6e 100644 --- a/std/src/lib.rs +++ b/std/src/lib.rs @@ -29,3 +29,9 @@ cfg_if::cfg_if! { pub mod clipboard; } } + +cfg_if::cfg_if! { + if #[cfg(feature = "storage")] { + pub mod storage; + } +} diff --git a/std/src/storage/client_storage/fs.rs b/std/src/storage/client_storage/fs.rs new file mode 100644 index 0000000..9d2dde5 --- /dev/null +++ b/std/src/storage/client_storage/fs.rs @@ -0,0 +1,128 @@ +use crate::storage::{StorageChannelPayload, StorageSubscription}; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::collections::HashMap; +use std::io::Write; +use std::sync::{OnceLock, RwLock}; +use tokio::sync::watch::{channel, Receiver}; + +use crate::storage::{serde_to_string, try_serde_from_string, StorageBacking, StorageSubscriber}; + +#[doc(hidden)] +/// Sets the directory where the storage files are located. +pub fn set_directory(path: std::path::PathBuf) { + LOCATION.set(path).unwrap(); +} + +#[doc(hidden)] +pub fn set_dir_name(name: &str) { + set_directory( + directories::BaseDirs::new() + .unwrap() + .data_local_dir() + .join(name), + ) +} + +/// The location where the storage files are located. +static LOCATION: OnceLock = OnceLock::new(); + +/// Set a value in the configured storage location using the key as the file name. +fn set(key: String, value: &T) { + let as_str = serde_to_string(value); + let path = LOCATION + .get() + .expect("Call the set_dir macro before accessing persistant data"); + std::fs::create_dir_all(path).unwrap(); + let file_path = path.join(key); + let mut file = std::fs::File::create(file_path).unwrap(); + file.write_all(as_str.as_bytes()).unwrap(); +} + +/// Get a value from the configured storage location using the key as the file name. +fn get(key: &str) -> Option { + let path = LOCATION + .get() + .expect("Call the set_dir macro before accessing persistant data") + .join(key); + let s = std::fs::read_to_string(path).ok()?; + try_serde_from_string(&s) +} + +#[derive(Clone)] +pub struct LocalStorage; + +impl StorageBacking for LocalStorage { + type Key = String; + + fn set(key: String, value: &T) { + let key_clone = key.clone(); + let value_clone = (*value).clone(); + set(key, value); + + // If the subscriptions map is not initialized, we don't need to notify any subscribers. + if let Some(subscriptions) = SUBSCRIPTIONS.get() { + let read_binding = subscriptions.read().unwrap(); + if let Some(subscription) = read_binding.get(&key_clone) { + subscription + .tx + .send(StorageChannelPayload::new(value_clone)) + .unwrap(); + } + } + } + + fn get(key: &String) -> Option { + get(key) + } +} + +// Note that this module contains an optimization that differs from the web version. Dioxus Desktop runs all windows in +// the same thread, meaning that we can just directly notify the subscribers via the same channels, rather than using the +// storage event listener. +impl StorageSubscriber for LocalStorage { + fn subscribe( + key: &::Key, + ) -> Receiver { + // Initialize the subscriptions map if it hasn't been initialized yet. + let subscriptions = SUBSCRIPTIONS.get_or_init(|| RwLock::new(HashMap::new())); + + // Check if the subscription already exists. If it does, return the existing subscription's channel. + // If it doesn't, create a new subscription and return its channel. + let read_binding = subscriptions.read().unwrap(); + match read_binding.get(key) { + Some(subscription) => subscription.tx.subscribe(), + None => { + drop(read_binding); + let (tx, rx) = channel::(StorageChannelPayload::default()); + let subscription = StorageSubscription::new::(tx, key.clone()); + + subscriptions + .write() + .unwrap() + .insert(key.clone(), subscription); + rx + } + } + } + + fn unsubscribe(key: &::Key) { + tracing::trace!("Unsubscribing from \"{}\"", key); + + // Fail silently if unsubscribe is called but the subscriptions map isn't initialized yet. + if let Some(subscriptions) = SUBSCRIPTIONS.get() { + let read_binding = subscriptions.read().unwrap(); + + // If the subscription exists, remove it from the subscriptions map. + if read_binding.contains_key(key) { + tracing::trace!("Found entry for \"{}\"", key); + drop(read_binding); + subscriptions.write().unwrap().remove(key); + } + } + } +} + +/// A map of all the channels that are currently subscribed to and the getters for the corresponding storage entry. +/// This gets initialized lazily. +static SUBSCRIPTIONS: OnceLock>> = OnceLock::new(); diff --git a/std/src/storage/client_storage/memory.rs b/std/src/storage/client_storage/memory.rs new file mode 100644 index 0000000..195007f --- /dev/null +++ b/std/src/storage/client_storage/memory.rs @@ -0,0 +1,69 @@ +use std::any::Any; +use std::cell::RefCell; +use std::collections::HashMap; +use std::ops::{Deref, DerefMut}; +use std::rc::Rc; +use std::sync::Arc; + +use crate::storage::StorageBacking; + +#[derive(Clone)] +pub struct SessionStorage; + +impl StorageBacking for SessionStorage { + type Key = String; + + fn set(key: String, value: &T) { + let session = SessionStore::get_current_session(); + session.borrow_mut().insert(key, Arc::new(value.clone())); + } + + fn get(key: &String) -> Option { + let session = SessionStore::get_current_session(); + let read_binding = session.borrow(); + let value_any = read_binding.get(key)?; + value_any.downcast_ref::().cloned() + } +} + +/// An in-memory session store that is tied to the current Dioxus root context. +#[derive(Clone)] +struct SessionStore { + /// The underlying map of session data. + map: Rc>>>, +} + +impl SessionStore { + fn new() -> Self { + Self { + map: Rc::new(RefCell::new(HashMap::>::new())), + } + } + + /// Get the current session store from the root context, or create a new one if it doesn't exist. + fn get_current_session() -> Self { + dioxus::prelude::consume_context_from_scope::(dioxus::prelude::ScopeId::ROOT) + .map_or_else( + || { + let session = Self::new(); + dioxus::prelude::provide_root_context(session.clone()); + session + }, + |s| s, + ) + } +} + +impl Deref for SessionStore { + type Target = Rc>>>; + + fn deref(&self) -> &Self::Target { + &self.map + } +} + +impl DerefMut for SessionStore { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.map + } +} diff --git a/std/src/storage/client_storage/mod.rs b/std/src/storage/client_storage/mod.rs new file mode 100644 index 0000000..6acd450 --- /dev/null +++ b/std/src/storage/client_storage/mod.rs @@ -0,0 +1,43 @@ +#[allow(clippy::needless_doctest_main)] +/// Set the directory where the storage files are located on non-wasm targets. +/// +/// ```rust +/// use dioxus_std::set_dir; +/// +/// fn main(){ +/// // set the directory to the default location +/// set_dir!(); +/// } +/// ``` +/// ```rust +/// use dioxus_std::set_dir; +/// +/// fn main(){ +/// // set the directory to a custom location +/// set_dir!("path/to/dir"); +/// } +/// ``` +#[macro_export] +macro_rules! set_dir { + () => { + #[cfg(not(target_family = "wasm"))] + $crate::storage::set_dir_name(env!("CARGO_PKG_NAME")) + }; + ($path: literal) => { + #[cfg(not(target_family = "wasm"))] + $crate::storage::set_directory(std::path::PathBuf::from($path)) + }; +} +pub use set_dir; + +cfg_if::cfg_if! { + if #[cfg(target_family = "wasm")] { + pub mod web; + pub use web::*; + } else { + pub mod fs; + pub use fs::*; + pub mod memory; + pub use memory::SessionStorage; + } +} diff --git a/std/src/storage/client_storage/web.rs b/std/src/storage/client_storage/web.rs new file mode 100644 index 0000000..b555a40 --- /dev/null +++ b/std/src/storage/client_storage/web.rs @@ -0,0 +1,137 @@ +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use dioxus::prelude::*; +use once_cell::sync::Lazy; +use serde::{de::DeserializeOwned, Serialize}; +use tokio::sync::watch::{channel, Receiver}; +use wasm_bindgen::prelude::Closure; +use wasm_bindgen::JsCast; +use web_sys::{window, Storage}; + +use crate::storage::{ + serde_to_string, try_serde_from_string, StorageBacking, StorageChannelPayload, + StorageSubscriber, StorageSubscription, +}; + +#[derive(Clone)] +pub struct LocalStorage; + +impl StorageBacking for LocalStorage { + type Key = String; + + fn set(key: String, value: &T) { + set(key, value, WebStorageType::Local); + } + + fn get(key: &String) -> Option { + get(key, WebStorageType::Local) + } +} + +impl StorageSubscriber for LocalStorage { + fn subscribe( + key: &String, + ) -> Receiver { + let read_binding = SUBSCRIPTIONS.read().unwrap(); + match read_binding.get(key) { + Some(subscription) => subscription.tx.subscribe(), + None => { + drop(read_binding); + let (tx, rx) = channel::(StorageChannelPayload::default()); + let subscription = StorageSubscription::new::(tx, key.clone()); + SUBSCRIPTIONS + .write() + .unwrap() + .insert(key.clone(), subscription); + rx + } + } + } + + fn unsubscribe(key: &String) { + let read_binding = SUBSCRIPTIONS.read().unwrap(); + if let Some(entry) = read_binding.get(key) { + if entry.tx.is_closed() { + drop(read_binding); + SUBSCRIPTIONS.write().unwrap().remove(key); + } + } + } +} + +/// A map of all the channels that are currently subscribed to and the getters for the corresponding storage entry. This gets initialized lazily and will set up a listener for storage events. +static SUBSCRIPTIONS: Lazy>>> = Lazy::new(|| { + // Create a closure that will be called when a storage event occurs. + let closure = Closure::wrap(Box::new(move |e: web_sys::StorageEvent| { + tracing::trace!("Storage event: {:?}", e); + let key: String = e.key().unwrap(); + let read_binding = SUBSCRIPTIONS.read().unwrap(); + if let Some(subscription) = read_binding.get(&key) { + if subscription.tx.is_closed() { + tracing::trace!("Channel is closed, removing subscription for \"{}\"", key); + drop(read_binding); + SUBSCRIPTIONS.write().unwrap().remove(&key); + return; + } + // Call the getter for the given entry and send the value to said entry's channel. + match subscription.get_and_send() { + Ok(_) => tracing::trace!("Sent storage event"), + Err(err) => tracing::error!("Error sending storage event: {:?}", err.to_string()), + } + } + }) as Box); + // Register the closure to be called when a storage event occurs. + window() + .unwrap() + .add_event_listener_with_callback("storage", closure.as_ref().unchecked_ref()) + .unwrap(); + // Relinquish ownership of the closure to the JS runtime so that it can be called later. + closure.forget(); + Arc::new(RwLock::new(HashMap::new())) +}); + +#[derive(Clone)] +pub struct SessionStorage; + +impl StorageBacking for SessionStorage { + type Key = String; + + fn set(key: String, value: &T) { + set(key, value, WebStorageType::Session); + } + + fn get(key: &String) -> Option { + get(key, WebStorageType::Session) + } +} + +fn set(key: String, value: &T, storage_type: WebStorageType) { + let as_str = serde_to_string(value); + get_storage_by_type(storage_type) + .unwrap() + .set_item(&key, &as_str) + .unwrap(); +} + +fn get(key: &str, storage_type: WebStorageType) -> Option { + let s: String = get_storage_by_type(storage_type)?.get_item(key).ok()??; + try_serde_from_string(&s) +} + +fn get_storage_by_type(storage_type: WebStorageType) -> Option { + window().map_or_else( + || None, + |window| match storage_type { + WebStorageType::Local => window.local_storage().ok()?, + WebStorageType::Session => window.session_storage().ok()?, + }, + ) +} + +enum WebStorageType { + Local, + Session, +} diff --git a/std/src/storage/mod.rs b/std/src/storage/mod.rs new file mode 100644 index 0000000..884995b --- /dev/null +++ b/std/src/storage/mod.rs @@ -0,0 +1,581 @@ +//! # dioxus-storage +//! A library for handling local storage ergonomically in Dioxus +//! ## Usage +//! ```rust +//! use dioxus_std::storage::use_persistent; +//! use dioxus::prelude::*; +//! +//! fn app() -> Element { +//! let num = use_persistent("count", || 0); +//! rsx! { +//! div { +//! button { +//! onclick: move |_| { +//! *num.write() += 1; +//! }, +//! "Increment" +//! } +//! div { +//! "{*num.read()}" +//! } +//! } +//! } +//! } +//! ``` + +mod client_storage; +mod persistence; + +pub use client_storage::{LocalStorage, SessionStorage}; +use futures_util::stream::StreamExt; +pub use persistence::{ + new_persistent, new_singleton_persistent, use_persistent, use_singleton_persistent, +}; + +use dioxus::prelude::*; +use postcard::to_allocvec; +use serde::{de::DeserializeOwned, Serialize}; +use std::any::Any; +use std::fmt::{Debug, Display}; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; +use tokio::sync::watch::error::SendError; +use tokio::sync::watch::{Receiver, Sender}; + +pub use client_storage::set_dir; +#[cfg(not(target_family = "wasm"))] +pub use client_storage::{set_dir_name, set_directory}; + +/// A storage hook that can be used to store data that will persist across application reloads. This hook is generic over the storage location which can be useful for other hooks. +/// +/// This hook returns a Signal that can be used to read and modify the state. +/// +/// ## Usage +/// +/// ```rust +/// use dioxus_std::storage::{use_storage, StorageBacking}; +/// use dioxus::prelude::*; +/// use dioxus_signals::Signal; +/// +/// // This hook can be used with any storage backing without multiple versions of the hook +/// fn use_user_id() -> Signal where S: StorageBacking { +/// use_storage::("user-id", || 123) +/// } +/// ``` +pub fn use_storage(key: S::Key, init: impl FnOnce() -> T) -> Signal +where + S: StorageBacking, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, + S::Key: Clone, +{ + use_hook(|| new_storage::(key, init)) +} + +/// Creates a Signal that can be used to store data that will persist across application reloads. +/// +/// This hook returns a Signal that can be used to read and modify the state. +/// +/// ## Usage +/// +/// ```rust +/// use dioxus_std::storage::{new_storage, StorageBacking}; +/// use dioxus::prelude::*; +/// use dioxus_signals::Signal; +/// +/// // This hook can be used with any storage backing without multiple versions of the hook +/// fn user_id() -> Signal where S: StorageBacking { +/// new_storage::("user-id", || 123) +/// } +/// ``` +pub fn new_storage(key: S::Key, init: impl FnOnce() -> T) -> Signal +where + S: StorageBacking, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, + S::Key: Clone, +{ + let mut init = Some(init); + + if cfg!(feature = "ssr") { + // SSR does not support storage on the backend. We will just use a normal Signal to represent the initial state. + // The client will hydrate this with a correct StorageEntry and maintain state. + Signal::new(init.take().unwrap()()) + } else if cfg!(feature = "hydrate") { + let key_clone = key.clone(); + let mut storage_entry = new_storage_entry::(key, init.take().unwrap()); + if generation() == 0 { + // The first generation is rendered on the server side and so must be hydrated. + needs_update(); + } + if generation() == 1 { + // The first time the vdom is hydrated, we set the correct value from storage and set up the subscription to storage events. + storage_entry.set(get_from_storage::(key_clone, init.take().unwrap())); + storage_entry.save_to_storage_on_change(); + } + storage_entry.data + } else { + // The client is rendered normally, so we can just use the storage entry. + let storage_entry = new_storage_entry::(key, init.take().unwrap()); + storage_entry.save_to_storage_on_change(); + storage_entry.data + } +} + +/// A storage hook that can be used to store data that will persist across application reloads and be synced across all app sessions for a given installation or browser. +/// +/// This hook returns a Signal that can be used to read and modify the state. +/// The changes to the state will be persisted to storage and all other app sessions will be notified of the change to update their local state. +pub fn use_synced_storage(key: S::Key, init: impl FnOnce() -> T) -> Signal +where + S: StorageBacking + StorageSubscriber, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, + S::Key: Clone, +{ + use_hook(|| new_synced_storage::(key, init)) +} + +/// Create a signal that can be used to store data that will persist across application reloads and be synced across all app sessions for a given installation or browser. +/// +/// This hook returns a Signal that can be used to read and modify the state. +/// The changes to the state will be persisted to storage and all other app sessions will be notified of the change to update their local state. +pub fn new_synced_storage(key: S::Key, init: impl FnOnce() -> T) -> Signal +where + S: StorageBacking + StorageSubscriber, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, + S::Key: Clone, +{ + let mut init = Some(init); + let signal = { + if cfg!(feature = "ssr") { + // SSR does not support synced storage on the backend. We will just use a normal Signal to represent the initial state. + // The client will hydrate this with a correct SyncedStorageEntry and maintain state. + Signal::new(init.take().unwrap()()) + } else if cfg!(feature = "hydrate") { + let key_clone = key.clone(); + let mut storage_entry = new_synced_storage_entry::(key, init.take().unwrap()); + if generation() == 0 { + // The first generation is rendered on the server side and so must be hydrated. + needs_update(); + } + if generation() == 1 { + // The first time the vdom is hydrated, we set the correct value from storage and set up the subscription to storage events. + storage_entry + .entry + .set(get_from_storage::(key_clone, init.take().unwrap())); + storage_entry.save_to_storage_on_change(); + storage_entry.subscribe_to_storage(); + } + *storage_entry.data() + } else { + // The client is rendered normally, so we can just use the synced storage entry. + let storage_entry = new_synced_storage_entry::(key, init.take().unwrap()); + storage_entry.save_to_storage_on_change(); + storage_entry.subscribe_to_storage(); + *storage_entry.data() + } + }; + signal +} + +/// A hook that creates a StorageEntry with the latest value from storage or the init value if it doesn't exist. +pub fn use_storage_entry(key: S::Key, init: impl FnOnce() -> T) -> StorageEntry +where + S: StorageBacking, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, + S::Key: Clone, +{ + use_hook(|| new_storage_entry::(key, init)) +} + +/// A hook that creates a StorageEntry with the latest value from storage or the init value if it doesn't exist, and provides a channel to subscribe to updates to the underlying storage. +pub fn use_synced_storage_entry( + key: S::Key, + init: impl FnOnce() -> T, +) -> SyncedStorageEntry +where + S: StorageBacking + StorageSubscriber, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, + S::Key: Clone, +{ + use_hook(|| new_synced_storage_entry::(key, init)) +} + +/// Returns a StorageEntry with the latest value from storage or the init value if it doesn't exist. +pub fn new_storage_entry(key: S::Key, init: impl FnOnce() -> T) -> StorageEntry +where + S: StorageBacking, + T: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, + S::Key: Clone, +{ + let data = get_from_storage::(key.clone(), init); + StorageEntry::new(key, data) +} + +/// Returns a synced StorageEntry with the latest value from storage or the init value if it doesn't exist. +/// +/// This differs from `storage_entry` in that this one will return a channel to subscribe to updates to the underlying storage. +pub fn new_synced_storage_entry( + key: S::Key, + init: impl FnOnce() -> T, +) -> SyncedStorageEntry +where + S: StorageBacking + StorageSubscriber, + T: Serialize + DeserializeOwned + Clone + PartialEq + Send + Sync + 'static, + S::Key: Clone, +{ + let data = get_from_storage::(key.clone(), init); + SyncedStorageEntry::new(key, data) +} + +/// Returns a value from storage or the init value if it doesn't exist. +pub fn get_from_storage< + S: StorageBacking, + T: Serialize + DeserializeOwned + Send + Sync + Clone + 'static, +>( + key: S::Key, + init: impl FnOnce() -> T, +) -> T { + S::get(&key).unwrap_or_else(|| { + let data = init(); + S::set(key, &data); + data + }) +} + +/// A trait for common functionality between StorageEntry and SyncedStorageEntry +pub trait StorageEntryTrait: + Clone + 'static +{ + /// Saves the current state to storage + fn save(&self); + + /// Updates the state from storage + fn update(&mut self); + + /// Gets the key used to store the data in storage + fn key(&self) -> &S::Key; + + /// Gets the signal that can be used to read and modify the state + fn data(&self) -> &Signal; + + /// Creates a hook that will save the state to storage when the state changes + fn save_to_storage_on_change(&self) + where + S: StorageBacking, + T: Serialize + DeserializeOwned + Clone + PartialEq + 'static, + { + let entry_clone = self.clone(); + let old = Signal::new(self.data().cloned()); + let data = *self.data(); + spawn(async move { + loop { + let (rc, mut reactive_context) = ReactiveContext::new(); + rc.run_in(|| { + if *old.read() != *data.read() { + tracing::trace!("Saving to storage"); + entry_clone.save(); + } + }); + if reactive_context.next().await.is_none() { + break; + } + } + }); + } +} + +/// A wrapper around StorageEntry that provides a channel to subscribe to updates to the underlying storage. +#[derive(Clone)] +pub struct SyncedStorageEntry< + S: StorageBacking + StorageSubscriber, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, +> { + /// The underlying StorageEntry that is used to store the data and track the state + pub(crate) entry: StorageEntry, + /// The channel to subscribe to updates to the underlying storage + pub(crate) channel: Receiver, +} + +impl SyncedStorageEntry +where + S: StorageBacking + StorageSubscriber, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, +{ + pub fn new(key: S::Key, data: T) -> Self { + let channel = S::subscribe::(&key); + Self { + entry: StorageEntry::new(key, data), + channel, + } + } + + /// Gets the channel to subscribe to updates to the underlying storage + pub fn channel(&self) -> &Receiver { + &self.channel + } + + /// Creates a hook that will update the state when the underlying storage changes + pub fn subscribe_to_storage(&self) { + let storage_entry_signal = *self.data(); + let channel = self.channel.clone(); + spawn(async move { + to_owned![channel, storage_entry_signal]; + loop { + // Wait for an update to the channel + if channel.changed().await.is_ok() { + // Retrieve the latest value from the channel, mark it as read, and update the state + let payload = channel.borrow_and_update(); + *storage_entry_signal.write() = payload + .data + .downcast_ref::() + .expect("Type mismatch with storage entry") + .clone(); + } + } + }); + } +} + +impl StorageEntryTrait for SyncedStorageEntry +where + S: StorageBacking + StorageSubscriber, + T: Serialize + DeserializeOwned + Clone + Send + Sync + PartialEq + 'static, +{ + fn save(&self) { + // We want to save in the following conditions + // - The value from the channel is different from the current value + // - The value from the channel could not be determined, likely because it hasn't been set yet + if let Some(payload) = self.channel.borrow().data.downcast_ref::() { + if *self.entry.data.read() == *payload { + return; + } + } + self.entry.save(); + } + + fn update(&mut self) { + self.entry.update(); + } + + fn key(&self) -> &S::Key { + self.entry.key() + } + + fn data(&self) -> &Signal { + &self.entry.data + } +} + +/// A storage entry that can be used to store data across application reloads. It optionally provides a channel to subscribe to updates to the underlying storage. +#[derive(Clone)] +pub struct StorageEntry< + S: StorageBacking, + T: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, +> { + /// The key used to store the data in storage + pub(crate) key: S::Key, + /// A signal that can be used to read and modify the state + pub(crate) data: Signal, +} + +impl StorageEntry +where + S: StorageBacking, + T: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, + S::Key: Clone, +{ + /// Creates a new StorageEntry + pub fn new(key: S::Key, data: T) -> Self { + Self { + key, + data: Signal::new_in_scope( + data, + current_scope_id().expect("must be called from inside of the dioxus context"), + ), + } + } +} + +impl StorageEntryTrait for StorageEntry +where + S: StorageBacking, + T: Serialize + DeserializeOwned + Clone + PartialEq + Send + Sync + 'static, +{ + fn save(&self) { + S::set(self.key.clone(), &*self.data.read()); + } + + fn update(&mut self) { + self.data = S::get(&self.key).unwrap_or(self.data); + } + + fn key(&self) -> &S::Key { + &self.key + } + + fn data(&self) -> &Signal { + &self.data + } +} + +impl Deref + for StorageEntry +{ + type Target = Signal; + + fn deref(&self) -> &Signal { + &self.data + } +} + +impl DerefMut + for StorageEntry +{ + fn deref_mut(&mut self) -> &mut Signal { + &mut self.data + } +} + +impl Display + for StorageEntry +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.data.fmt(f) + } +} + +impl Debug + for StorageEntry +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.data.fmt(f) + } +} + +/// A trait for a storage backing +pub trait StorageBacking: Clone + 'static { + /// The key type used to store data in storage + type Key: PartialEq + Clone + Debug + Send + Sync + 'static; + /// Gets a value from storage for the given key + fn get(key: &Self::Key) -> Option; + /// Sets a value in storage for the given key + fn set(key: Self::Key, value: &T); +} + +/// A trait for a subscriber to events from a storage backing +pub trait StorageSubscriber { + /// Subscribes to events from a storage backing for the given key + fn subscribe( + key: &S::Key, + ) -> Receiver; + /// Unsubscribes from events from a storage backing for the given key + fn unsubscribe(key: &S::Key); +} + +/// A struct to hold information about processing a storage event. +pub struct StorageSubscription { + /// A getter function that will get the data from storage and return it as a StorageChannelPayload. + pub(crate) getter: Box StorageChannelPayload + 'static + Send + Sync>, + + /// The channel to send the data to. + pub(crate) tx: Arc>, +} + +impl StorageSubscription { + pub fn new< + S: StorageBacking + StorageSubscriber, + T: DeserializeOwned + Send + Sync + Clone + 'static, + >( + tx: Sender, + key: S::Key, + ) -> Self { + let getter = move || { + let data = S::get::(&key).unwrap(); + StorageChannelPayload::new(data) + }; + Self { + getter: Box::new(getter), + tx: Arc::new(tx), + } + } + + /// Gets the latest data from storage and sends it to the channel. + pub fn get_and_send(&self) -> Result<(), SendError> { + let payload = (self.getter)(); + self.tx.send(payload) + } +} + +/// A payload for a storage channel that contains the latest value from storage. +#[derive(Clone, Debug)] +pub struct StorageChannelPayload { + data: Arc, +} + +impl StorageChannelPayload { + /// Creates a new StorageChannelPayload + pub fn new(data: T) -> Self { + Self { + data: Arc::new(data), + } + } + + /// Gets the data from the payload + pub fn data(&self) -> Option<&T> { + self.data.downcast_ref::() + } +} + +impl Default for StorageChannelPayload { + fn default() -> Self { + Self { data: Arc::new(()) } + } +} + +// Helper functions + +/// Serializes a value to a string and compresses it. +pub(crate) fn serde_to_string(value: &T) -> String { + let serialized = to_allocvec(value).unwrap(); + let compressed = yazi::compress( + &serialized, + yazi::Format::Zlib, + yazi::CompressionLevel::BestSize, + ) + .unwrap(); + let as_str: String = compressed + .iter() + .flat_map(|u| { + [ + char::from_digit(((*u & 0xF0) >> 4).into(), 16).unwrap(), + char::from_digit((*u & 0x0F).into(), 16).unwrap(), + ] + .into_iter() + }) + .collect(); + as_str +} + +#[allow(unused)] +/// Deserializes a value from a string and unwraps errors. +pub(crate) fn serde_from_string(value: &str) -> T { + try_serde_from_string(value).unwrap() +} + +/// Deserializes and decompresses a value from a string and returns None if there is an error. +pub(crate) fn try_serde_from_string(value: &str) -> Option { + let mut bytes: Vec = Vec::new(); + let mut chars = value.chars(); + while let Some(c) = chars.next() { + let n1 = c.to_digit(16)?; + let c2 = chars.next()?; + let n2 = c2.to_digit(16)?; + bytes.push((n1 * 16 + n2) as u8); + } + match yazi::decompress(&bytes, yazi::Format::Zlib) { + Ok((decompressed, _)) => match postcard::from_bytes(&decompressed) { + Ok(v) => Some(v), + Err(err) => None, + }, + Err(err) => None, + } +} diff --git a/std/src/storage/persistence.rs b/std/src/storage/persistence.rs new file mode 100644 index 0000000..e7eb76e --- /dev/null +++ b/std/src/storage/persistence.rs @@ -0,0 +1,66 @@ +use crate::storage::new_storage_entry; +use crate::storage::SessionStorage; +use dioxus::prelude::*; +use dioxus_signals::Signal; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use super::StorageEntryTrait; + +/// A persistent storage hook that can be used to store data across application reloads. +/// +/// Depending on the platform this uses either local storage or a file storage +#[allow(clippy::needless_return)] +pub fn use_persistent< + T: Serialize + DeserializeOwned + Default + Clone + Send + Sync + PartialEq + 'static, +>( + key: impl ToString, + init: impl FnOnce() -> T, +) -> Signal { + use_hook(|| new_persistent(key, init)) +} + +/// Creates a persistent storage signal that can be used to store data across application reloads. +/// +/// Depending on the platform this uses either local storage or a file storage +#[allow(clippy::needless_return)] +pub fn new_persistent< + T: Serialize + DeserializeOwned + Default + Clone + Send + Sync + PartialEq + 'static, +>( + key: impl ToString, + init: impl FnOnce() -> T, +) -> Signal { + let storage_entry = new_storage_entry::(key.to_string(), init); + storage_entry.save_to_storage_on_change(); + storage_entry.data +} + +/// A persistent storage hook that can be used to store data across application reloads. +/// The state will be the same for every call to this hook from the same line of code. +/// +/// Depending on the platform this uses either local storage or a file storage +#[allow(clippy::needless_return)] +#[track_caller] +pub fn use_singleton_persistent< + T: Serialize + DeserializeOwned + Default + Clone + Send + Sync + PartialEq + 'static, +>( + init: impl FnOnce() -> T, +) -> Signal { + use_hook(|| new_singleton_persistent(init)) +} + +/// Create a persistent storage signal that can be used to store data across application reloads. +/// The state will be the same for every call to this hook from the same line of code. +/// +/// Depending on the platform this uses either local storage or a file storage +#[allow(clippy::needless_return)] +#[track_caller] +pub fn new_singleton_persistent< + T: Serialize + DeserializeOwned + Default + Clone + Send + Sync + PartialEq + 'static, +>( + init: impl FnOnce() -> T, +) -> Signal { + let caller = std::panic::Location::caller(); + let key = format!("{}:{}", caller.file(), caller.line()); + new_persistent(key, init) +}