Skip to content

Commit

Permalink
feat: reduce MIN_CHUNK_SIZE and MIN_ENCRYPTABLE_BYTES down to 1/3 bytes.
Browse files Browse the repository at this point in the history
BREAKING CHANGE: this will affect current chunked data
  • Loading branch information
joshuef committed Jan 9, 2024
1 parent c8918b7 commit ec748df
Showing 1 changed file with 11 additions and 7 deletions.
18 changes: 11 additions & 7 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ use std::{
ops::Range,
path::{Path, PathBuf},
};
use tempfile::{tempdir, TempDir};
use tempfile::tempdir;
use xor_name::XorName;

// export these because they are used in our public API.
Expand All @@ -128,8 +128,8 @@ pub use xor_name;
pub const MIN_ENCRYPTABLE_BYTES: usize = 3 * MIN_CHUNK_SIZE;
/// The maximum size (before compression) of an individual chunk of a file, defined as 500kiB.
pub const MAX_CHUNK_SIZE: usize = 512 * 1024;
/// The minimum size (before compression) of an individual chunk of a file, defined as 1kiB.
pub const MIN_CHUNK_SIZE: usize = 1024;
/// The minimum size (before compression) of an individual chunk of a file, defined as 1B.
pub const MIN_CHUNK_SIZE: usize = 1;
/// Controls the compression-speed vs compression-density tradeoffs. The higher the quality, the
/// slower the compression. Range is 0 to 11.
pub const COMPRESSION_QUALITY: i32 = 6;
Expand Down Expand Up @@ -269,14 +269,17 @@ pub struct StreamSelfDecryptor {
// Progressing collection of received encrypted chunks
encrypted_chunks: BTreeMap<usize, XorName>,
// Temp directory to hold the un-processed encrypted_chunks
temp_dir: TempDir,
temp_dir: PathBuf,
}

impl StreamSelfDecryptor {
/// For decryption, return with an intialized streaming decryptor
pub fn decrypt_to_file(file_path: Box<PathBuf>, data_map: &DataMap) -> Result<Self> {
let temp_dir = tempdir()?;
let temp_dir = tempdir()?.path().join(file_path.file_name().unwrap());

println!("temp_dir: {:?}", temp_dir);
let src_hashes = extract_hashes(data_map);
println!("hashses len {:?}", src_hashes.len());
Ok(StreamSelfDecryptor {
file_path,
chunk_index: 0,
Expand All @@ -288,6 +291,7 @@ impl StreamSelfDecryptor {

/// Return true if all encrypted chunk got received and file decrypted.
pub fn next_encrypted(&mut self, encrypted_chunk: EncryptedChunk) -> Result<bool> {
println!("Src hashes len: {:?}", self.src_hashes.len() );
if encrypted_chunk.index == self.chunk_index {
let decrypted_content =
decrypt_chunk(self.chunk_index, encrypted_chunk.content, &self.src_hashes)?;
Expand All @@ -303,7 +307,7 @@ impl StreamSelfDecryptor {
} else {
let chunk_name = XorName::from_content(&encrypted_chunk.content);

let file_path = self.temp_dir.path().join(hex::encode(chunk_name));
let file_path = self.temp_dir.join(hex::encode(chunk_name));
let mut output_file = File::create(file_path)?;
output_file.write_all(&encrypted_chunk.content)?;

Expand Down Expand Up @@ -333,7 +337,7 @@ impl StreamSelfDecryptor {
// Drain any in-order chunks due to the recent filled in piece.
fn drain_unprocessed(&mut self) -> Result<()> {
while let Some(chunk_name) = self.encrypted_chunks.get(&self.chunk_index) {
let file_path = self.temp_dir.path().join(&hex::encode(chunk_name));
let file_path = self.temp_dir.join(&hex::encode(chunk_name));
let mut chunk_file = File::open(file_path)?;
let mut chunk_data = Vec::new();
let _ = chunk_file.read_to_end(&mut chunk_data)?;
Expand Down

0 comments on commit ec748df

Please sign in to comment.