diff --git a/src/batch_shuffle.rs b/src/batch_shuffle.rs index df4b703..cfc72ee 100644 --- a/src/batch_shuffle.rs +++ b/src/batch_shuffle.rs @@ -13,7 +13,7 @@ use std::{ use crate::{dataset::FSRSDataset, FSRSItem}; -pub struct BatchShuffledDataset { +pub(crate) struct BatchShuffledDataset { dataset: Arc, indices: Vec, input: PhantomData, diff --git a/src/cosine_annealing.rs b/src/cosine_annealing.rs index 95751c0..325acf1 100644 --- a/src/cosine_annealing.rs +++ b/src/cosine_annealing.rs @@ -1,6 +1,6 @@ use burn::{lr_scheduler::LrScheduler, tensor::backend::Backend, LearningRate}; #[derive(Clone, Debug)] -pub struct CosineAnnealingLR { +pub(crate) struct CosineAnnealingLR { t_max: f64, eta_min: f64, init_lr: LearningRate, diff --git a/src/dataset.rs b/src/dataset.rs index 47cb3b2..82ec6e8 100644 --- a/src/dataset.rs +++ b/src/dataset.rs @@ -50,7 +50,7 @@ impl FSRSItem { } } -pub struct FSRSBatcher { +pub(crate) struct FSRSBatcher { device: B::Device, } @@ -61,7 +61,7 @@ impl FSRSBatcher { } #[derive(Debug, Clone)] -pub struct FSRSBatch { +pub(crate) struct FSRSBatch { pub t_historys: Tensor, pub r_historys: Tensor, pub delta_ts: Tensor, @@ -133,7 +133,7 @@ impl Batcher> for FSRSBatcher { } } -pub struct FSRSDataset { +pub(crate) struct FSRSDataset { items: Vec, } diff --git a/src/inference.rs b/src/inference.rs index ad2d57e..4d5b64f 100644 --- a/src/inference.rs +++ b/src/inference.rs @@ -13,10 +13,10 @@ use crate::model::Model; use crate::training::BCELoss; use crate::{FSRSError, FSRSItem}; use burn::tensor::ElementConversion; -pub const DECAY: f64 = -0.5; +pub(crate) const DECAY: f64 = -0.5; /// (9/10) ^ (1 / DECAY) - 1 -pub const FACTOR: f64 = 19f64 / 81f64; -pub const S_MIN: f32 = 0.01; +pub(crate) const FACTOR: f64 = 19f64 / 81f64; +pub(crate) const S_MIN: f32 = 0.01; /// This is a slice for efficiency, but should always be 17 in length. pub type Parameters = [f32]; use itertools::izip; diff --git a/src/model.rs b/src/model.rs index 19b7f16..08ef3cd 100644 --- a/src/model.rs +++ b/src/model.rs @@ -16,7 +16,7 @@ pub struct Model { pub config: ModelConfig, } -pub trait Get { +pub(crate) trait Get { fn get(&self, n: usize) -> Tensor; } @@ -177,7 +177,7 @@ impl Model { } #[derive(Debug, Clone)] -pub struct MemoryStateTensors { +pub(crate) struct MemoryStateTensors { pub stability: Tensor, pub difficulty: Tensor, } @@ -240,7 +240,7 @@ impl FSRS { } } -pub fn parameters_to_model(parameters: &Parameters) -> Model { +pub(crate) fn parameters_to_model(parameters: &Parameters) -> Model { let config = ModelConfig::default(); let mut model = Model::new(config); model.w = Param::from(Tensor::from_floats( diff --git a/src/pre_training.rs b/src/pre_training.rs index a3ae59a..872c1dc 100644 --- a/src/pre_training.rs +++ b/src/pre_training.rs @@ -106,7 +106,7 @@ fn loss( logloss + l1 } -pub const INIT_S_MAX: f32 = 100.0; +pub(crate) const INIT_S_MAX: f32 = 100.0; fn search_parameters( mut pretrainset: HashMap>, diff --git a/src/training.rs b/src/training.rs index 65a57d5..90bf52d 100644 --- a/src/training.rs +++ b/src/training.rs @@ -166,7 +166,7 @@ impl MetricsRenderer for ProgressCollector { } #[derive(Config)] -pub struct TrainingConfig { +pub(crate) struct TrainingConfig { pub model: ModelConfig, pub optimizer: AdamConfig, #[config(default = 5)] diff --git a/src/weight_clipper.rs b/src/weight_clipper.rs index 0c475ec..13e2df0 100644 --- a/src/weight_clipper.rs +++ b/src/weight_clipper.rs @@ -4,7 +4,7 @@ use crate::{ }; use burn::tensor::{backend::Backend, Data, Tensor}; -pub fn weight_clipper(parameters: Tensor) -> Tensor { +pub(crate) fn weight_clipper(parameters: Tensor) -> Tensor { let val = clip_parameters(¶meters.to_data().convert().value); Tensor::from_data( Data::new(val, parameters.shape()).convert(), @@ -12,7 +12,7 @@ pub fn weight_clipper(parameters: Tensor) -> Tensor { ) } -pub fn clip_parameters(parameters: &Parameters) -> Vec { +pub(crate) fn clip_parameters(parameters: &Parameters) -> Vec { // https://regex101.com/r/21mXNI/1 const CLAMPS: [(f32, f32); 17] = [ (S_MIN, INIT_S_MAX),