diff --git a/pumpkin-world/Cargo.toml b/pumpkin-world/Cargo.toml index 1c7eacd8..36b2c7f6 100644 --- a/pumpkin-world/Cargo.toml +++ b/pumpkin-world/Cargo.toml @@ -9,6 +9,9 @@ pumpkin-core = { path = "../pumpkin-core" } pumpkin-config = { path = "../pumpkin-config" } pumpkin-macros = { path = "../pumpkin-macros" } +bytes.workspace = true + + tokio.workspace = true rayon.workspace = true derive_more.workspace = true diff --git a/pumpkin-world/src/block/block_registry.rs b/pumpkin-world/src/block/block_registry.rs index d043c21f..20a424d9 100644 --- a/pumpkin-world/src/block/block_registry.rs +++ b/pumpkin-world/src/block/block_registry.rs @@ -24,6 +24,14 @@ pub static BLOCK_ID_BY_REGISTRY_ID: LazyLock> = LazyLock::n map }); +pub static BLOCK_ID_TO_REGISTRY_ID: LazyLock> = LazyLock::new(|| { + let mut map = HashMap::new(); + for block in &*BLOCKS.blocks { + map.insert(block.default_state_id, block.name.clone()); + } + map +}); + pub static BLOCK_ID_BY_STATE_ID: LazyLock> = LazyLock::new(|| { let mut map = HashMap::new(); for block in &BLOCKS.blocks { diff --git a/pumpkin-world/src/chunk/anvil.rs b/pumpkin-world/src/chunk/anvil.rs index 2391537f..5b151783 100644 --- a/pumpkin-world/src/chunk/anvil.rs +++ b/pumpkin-world/src/chunk/anvil.rs @@ -1,60 +1,62 @@ use std::{ + collections::HashMap, fs::OpenOptions, - io::{Read, Seek}, + io::{Read, Seek, SeekFrom, Write}, }; -use flate2::bufread::{GzDecoder, ZlibDecoder}; +use bytes::*; +use fastnbt::LongArray; +use flate2::bufread::{GzDecoder, GzEncoder, ZlibDecoder, ZlibEncoder}; +use pumpkin_core::math::ceil_log2; -use crate::level::LevelFolder; +use crate::{ + block::block_registry::BLOCK_ID_TO_REGISTRY_ID, chunk::ChunkWritingError, level::LevelFolder, +}; -use super::{ChunkData, ChunkReader, ChunkReadingError, CompressionError}; +use super::{ + ChunkData, ChunkNbt, ChunkReader, ChunkReadingError, ChunkSection, ChunkSectionBlockStates, + ChunkSerializingError, ChunkWriter, CompressionError, PaletteEntry, +}; -#[derive(Clone)] -pub struct AnvilChunkReader {} +// 1.21.4 +const WORLD_DATA_VERSION: i32 = 4189; -impl Default for AnvilChunkReader { - fn default() -> Self { - Self::new() - } -} - -impl AnvilChunkReader { - pub fn new() -> Self { - Self {} - } -} +#[derive(Clone, Default)] +pub struct AnvilChunkFormat; #[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u8)] pub enum Compression { /// GZip Compression - GZip, + GZip = 1, /// ZLib Compression - ZLib, - /// Uncompressed (since a version before 1.15.1) - None, + ZLib = 2, /// LZ4 Compression (since 24w04a) - LZ4, + LZ4 = 4, /// Custom compression algorithm (since 24w05a) - Custom, + Custom = 127, } impl Compression { - pub fn from_byte(byte: u8) -> Option { + /// Returns Ok when a compression is found otherwise an Err + #[allow(clippy::result_unit_err)] + pub fn from_byte(byte: u8) -> Result, ()> { match byte { - 1 => Some(Self::GZip), - 2 => Some(Self::ZLib), - 3 => Some(Self::None), - 4 => Some(Self::LZ4), - // Creative i guess? - 127 => Some(Self::Custom), - _ => None, + 1 => Ok(Some(Self::GZip)), + 2 => Ok(Some(Self::ZLib)), + // Uncompressed (since a version before 1.15.1) + 3 => Ok(None), + 4 => Ok(Some(Self::LZ4)), + 127 => Ok(Some(Self::Custom)), + // Unknown format + _ => Err(()), } } - fn decompress_data(&self, compressed_data: Vec) -> Result, CompressionError> { + fn decompress_data(&self, compressed_data: &[u8]) -> Result, CompressionError> { match self { Compression::GZip => { - let mut decoder = GzDecoder::new(&compressed_data[..]); + let mut decoder = GzDecoder::new(compressed_data); let mut chunk_data = Vec::new(); decoder .read_to_end(&mut chunk_data) @@ -62,17 +64,16 @@ impl Compression { Ok(chunk_data) } Compression::ZLib => { - let mut decoder = ZlibDecoder::new(&compressed_data[..]); + let mut decoder = ZlibDecoder::new(compressed_data); let mut chunk_data = Vec::new(); decoder .read_to_end(&mut chunk_data) .map_err(CompressionError::ZlibError)?; Ok(chunk_data) } - Compression::None => Ok(compressed_data), Compression::LZ4 => { - let mut decoder = lz4::Decoder::new(compressed_data.as_slice()) - .map_err(CompressionError::LZ4Error)?; + let mut decoder = + lz4::Decoder::new(compressed_data).map_err(CompressionError::LZ4Error)?; let mut decompressed_data = Vec::new(); decoder .read_to_end(&mut decompressed_data) @@ -82,9 +83,54 @@ impl Compression { Compression::Custom => todo!(), } } + fn compress_data( + &self, + uncompressed_data: &[u8], + compression_level: u32, + ) -> Result, CompressionError> { + match self { + Compression::GZip => { + let mut decoder = GzEncoder::new( + uncompressed_data, + flate2::Compression::new(compression_level), + ); + let mut chunk_data = Vec::new(); + decoder + .read_to_end(&mut chunk_data) + .map_err(CompressionError::GZipError)?; + Ok(chunk_data) + } + Compression::ZLib => { + let mut decoder = ZlibEncoder::new( + uncompressed_data, + flate2::Compression::new(compression_level), + ); + let mut chunk_data = Vec::new(); + decoder + .read_to_end(&mut chunk_data) + .map_err(CompressionError::ZlibError)?; + Ok(chunk_data) + } + Compression::LZ4 => { + let mut compressed_data = Vec::new(); + let mut encoder = lz4::EncoderBuilder::new() + .level(compression_level) + .build(&mut compressed_data) + .map_err(CompressionError::LZ4Error)?; + if let Err(err) = encoder.write_all(uncompressed_data) { + return Err(CompressionError::LZ4Error(err)); + } + if let (_output, Err(err)) = encoder.finish() { + return Err(CompressionError::LZ4Error(err)); + } + Ok(compressed_data) + } + Compression::Custom => todo!(), + } + } } -impl ChunkReader for AnvilChunkReader { +impl ChunkReader for AnvilChunkFormat { fn read_chunk( &self, save_file: &LevelFolder, @@ -115,14 +161,14 @@ impl ChunkReader for AnvilChunkReader { .read_exact(&mut timestamp_table) .map_err(|err| ChunkReadingError::IoError(err.kind()))?; - let modulus = |a: i32, b: i32| ((a % b) + b) % b; - let chunk_x = modulus(at.x, 32) as u32; - let chunk_z = modulus(at.z, 32) as u32; + let chunk_x = at.x & 0x1F; + let chunk_z = at.z & 0x1F; let table_entry = (chunk_x + chunk_z * 32) * 4; - let mut offset = vec![0u8]; + let mut offset = BytesMut::new(); + offset.put_u8(0); offset.extend_from_slice(&location_table[table_entry as usize..table_entry as usize + 3]); - let offset = u32::from_be_bytes(offset.try_into().unwrap()) as u64 * 4096; + let offset = offset.get_u32() as u64 * 4096; let size = location_table[table_entry as usize + 3] as usize * 4096; if offset == 0 && size == 0 { @@ -141,25 +187,299 @@ impl ChunkReader for AnvilChunkReader { out }; - // TODO: check checksum to make sure chunk is not corrupted - let header: Vec = file_buf.drain(0..5).collect(); - - let compression = Compression::from_byte(header[4]).ok_or( - ChunkReadingError::Compression(CompressionError::UnknownCompression), - )?; + let mut header: Bytes = file_buf.drain(0..5).collect(); + if header.remaining() != 5 { + return Err(ChunkReadingError::InvalidHeader); + } + let size = header.get_u32(); + let compression = header.get_u8(); - let size = u32::from_be_bytes(header[..4].try_into().unwrap()); + let compression = Compression::from_byte(compression) + .map_err(|_| ChunkReadingError::Compression(CompressionError::UnknownCompression))?; // size includes the compression scheme byte, so we need to subtract 1 - let chunk_data = file_buf.drain(0..size as usize - 1).collect(); - let decompressed_chunk = compression - .decompress_data(chunk_data) - .map_err(ChunkReadingError::Compression)?; + let chunk_data: Vec = file_buf.drain(0..size as usize - 1).collect(); + + let decompressed_chunk = if let Some(compression) = compression { + compression + .decompress_data(&chunk_data) + .map_err(ChunkReadingError::Compression)? + } else { + chunk_data + }; ChunkData::from_bytes(&decompressed_chunk, *at).map_err(ChunkReadingError::ParsingError) } } +impl ChunkWriter for AnvilChunkFormat { + fn write_chunk( + &self, + chunk_data: &ChunkData, + level_folder: &LevelFolder, + at: &pumpkin_core::math::vector2::Vector2, + ) -> Result<(), super::ChunkWritingError> { + // TODO: update timestamp + let region = (at.x >> 5, at.z >> 5); + let mut region_file = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .truncate(false) + .open( + level_folder + .region_folder + .join(format!("./r.{}.{}.mca", region.0, region.1)), + ) + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + + let raw_bytes = self + .to_bytes(chunk_data) + .map_err(|err| ChunkWritingError::ChunkSerializingError(err.to_string()))?; + let mut buf = Vec::with_capacity(4096); + // TODO: config + let compression = Compression::ZLib; + buf.put_u8(compression as u8); + buf.put_slice( + &compression + // TODO: config + .compress_data(&raw_bytes, 6) + .map_err(ChunkWritingError::Compression)?, + ); + + let mut location_table: [u8; 4096] = [0; 4096]; + let mut timestamp_table: [u8; 4096] = [0; 4096]; + + let file_meta = region_file + .metadata() + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + + // The header consists of 8 KiB of data + // fill the location and timestamp tables if they exist + if file_meta.len() >= 8192 { + region_file + .read_exact(&mut location_table) + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + region_file + .read_exact(&mut timestamp_table) + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + } + + let chunk_x = at.x & 0x1F; + let chunk_z = at.z & 0x1F; + + let table_entry = (chunk_x + chunk_z * 32) * 4; + + let mut offset = BytesMut::new(); + offset.put_u8(0); + offset.extend_from_slice(&location_table[table_entry as usize..table_entry as usize + 3]); + let at_offset = offset.get_u32() as u64 * 4096; + let at_size = location_table[table_entry as usize + 3] as usize * 4096; + + let mut end_index = 4096 * 2; + if at_offset != 0 || at_size != 0 { + // move other chunks earlier, if there is a hole + for (other_offset, other_size, other_table_entry) in location_table + .chunks(4) + .enumerate() + .filter_map(|(index, v)| { + if table_entry / 4 == index as i32 { + return None; + } + let mut offset = BytesMut::new(); + offset.put_u8(0); + offset.extend_from_slice(&v[0..3]); + let offset = offset.get_u32() as u64 * 4096; + let size = v[3] as usize * 4096; + if offset == 0 && size == 0 { + return None; + } + Some((offset, size, index * 4)) + }) + .collect::>() + { + if at_offset > other_offset { + continue; + } + + fn read_at_most(file: &mut std::fs::File, size: usize) -> std::io::Result> { + let mut buf = vec![0u8; size]; + + let mut cursor = 0; + loop { + match file.read(&mut buf[cursor..])? { + 0 => break, + bytes_read => { + cursor += bytes_read; + } + } + } + + Ok(buf) + } + + region_file.seek(SeekFrom::Start(other_offset)).unwrap(); // TODO + let buf = match read_at_most(&mut region_file, other_size) { + Ok(v) => v, + Err(_) => panic!( + "Region file r.-{},{}.mca got corrupted, sorry", + region.0, region.1 + ), + }; + + region_file + .seek(SeekFrom::Start(other_offset - at_size as u64)) + .unwrap(); // TODO + region_file.write_all(&buf).unwrap_or_else(|_| { + panic!( + "Region file r.-{},{}.mca got corrupted, sorry", + region.0, region.1 + ) + }); + let location_bytes = + &(((other_offset - at_size as u64) / 4096) as u32).to_be_bytes()[1..4]; + let size_bytes = [(other_size.div_ceil(4096)) as u8]; + let location_entry = [location_bytes, &size_bytes].concat(); + location_table[other_table_entry..other_table_entry + 4] + .as_mut() + .copy_from_slice(&location_entry); + + end_index = (other_offset as isize + other_size as isize - at_size as isize) as u64; + } + } else { + for (offset, size) in location_table.chunks(4).filter_map(|v| { + let mut offset = BytesMut::new(); + offset.put_u8(0); + offset.extend_from_slice(&v[0..3]); + let offset = offset.get_u32() as u64 * 4096; + let size = v[3] as usize * 4096; + if offset == 0 && size == 0 { + return None; + } + Some((offset, size)) + }) { + end_index = u64::max(offset + size as u64, end_index); + } + } + + let location_bytes = &(end_index as u32 / 4096).to_be_bytes()[1..4]; + let size_bytes = [(buf.len().div_ceil(4096)) as u8]; + location_table[table_entry as usize..table_entry as usize + 4] + .as_mut() + .copy_from_slice(&[location_bytes, &size_bytes].concat()); + + // write new location and timestamp table + + region_file.seek(SeekFrom::Start(0)).unwrap(); // TODO + if let Err(err) = region_file.write_all(&[location_table, timestamp_table].concat()) { + return Err(ChunkWritingError::IoError(err.kind())); + } + // dbg!(&location_table.iter().map(|v| v.to_string()).join(",")); + + region_file.seek(SeekFrom::Start(end_index)).unwrap(); // TODO + + let mut header: BytesMut = BytesMut::new(); + // length + header.put_u32(buf.len() as u32); + region_file + .write_all(&header) + .expect("Failed to write header"); + region_file.write_all(&buf).unwrap_or_else(|_| { + panic!( + "Region file r.-{},{}.mca got corrupted, sorry", + region.0, region.1 + ) + }); + + let sector_count = (buf.len() + 4).div_ceil(4096) * 4096; + + // padding + region_file + .write_all(&vec![0u8; sector_count]) + .expect("Failed to add padding"); + + Ok(()) + } +} + +impl AnvilChunkFormat { + pub fn to_bytes(&self, chunk_data: &ChunkData) -> Result, ChunkSerializingError> { + let mut sections = Vec::new(); + + for (i, blocks) in chunk_data.blocks.blocks.chunks(16 * 16 * 16).enumerate() { + // get unique blocks + let palette = HashMap::::from_iter(blocks.iter().map(|v| { + ( + *v, + BLOCK_ID_TO_REGISTRY_ID + .get(v) + .expect("Tried saving a block which does not exist."), + ) + })); + let palette = HashMap::::from_iter( + palette + .into_iter() + .enumerate() + .map(|(index, (block_id, registry_str))| (block_id, (registry_str, index))), + ); + + let block_bit_size = if palette.len() < 16 { + 4 + } else { + ceil_log2(palette.len() as u32).max(4) + }; + let _blocks_in_pack = 64 / block_bit_size; + + let mut section_longs = Vec::new(); + let mut current_pack_long: i64 = 0; + let mut bits_used_in_pack: u32 = 0; + + for block in blocks { + let index = palette.get(block).expect("Just added all unique").1; + current_pack_long |= (index as i64) << bits_used_in_pack; + bits_used_in_pack += block_bit_size as u32; + + if bits_used_in_pack >= 64 { + section_longs.push(current_pack_long); + current_pack_long = 0; + bits_used_in_pack = 0; + } + } + + if bits_used_in_pack > 0 { + section_longs.push(current_pack_long); + } + + sections.push(ChunkSection { + y: i as i8, + block_states: Some(ChunkSectionBlockStates { + data: Some(LongArray::new(section_longs)), + palette: palette + .into_iter() + .map(|entry| PaletteEntry { + name: entry.1 .0.clone(), + properties: None, + }) + .collect(), + }), + }); + } + + let nbt = ChunkNbt { + data_version: WORLD_DATA_VERSION, + x_pos: chunk_data.position.x, + z_pos: chunk_data.position.z, + status: super::ChunkStatus::Full, + heightmaps: chunk_data.blocks.heightmap.clone(), + sections, + }; + + let bytes = fastnbt::to_bytes(&nbt); + + bytes.map_err(ChunkSerializingError::ErrorSerializingChunk) + } +} + #[cfg(test)] mod tests { use std::path::PathBuf; @@ -167,14 +487,14 @@ mod tests { use pumpkin_core::math::vector2::Vector2; use crate::{ - chunk::{anvil::AnvilChunkReader, ChunkReader, ChunkReadingError}, + chunk::{anvil::AnvilChunkFormat, ChunkReader, ChunkReadingError}, level::LevelFolder, }; #[test] fn not_existing() { let region_path = PathBuf::from("not_existing"); - let result = AnvilChunkReader::new().read_chunk( + let result = AnvilChunkFormat.read_chunk( &LevelFolder { root_folder: PathBuf::from(""), region_folder: region_path, diff --git a/pumpkin-world/src/chunk/mod.rs b/pumpkin-world/src/chunk/mod.rs index 8473e030..9c555b39 100644 --- a/pumpkin-world/src/chunk/mod.rs +++ b/pumpkin-world/src/chunk/mod.rs @@ -1,7 +1,6 @@ use fastnbt::LongArray; -use pumpkin_core::math::vector2::Vector2; +use pumpkin_core::math::{ceil_log2, vector2::Vector2}; use serde::{Deserialize, Serialize}; -use std::cmp::max; use std::collections::HashMap; use std::ops::Index; use thiserror::Error; @@ -27,10 +26,21 @@ pub trait ChunkReader: Sync + Send { ) -> Result; } +pub trait ChunkWriter: Send + Sync { + fn write_chunk( + &self, + chunk: &ChunkData, + level_folder: &LevelFolder, + at: &Vector2, + ) -> Result<(), ChunkWritingError>; +} + #[derive(Error, Debug)] pub enum ChunkReadingError { #[error("Io error: {0}")] IoError(std::io::ErrorKind), + #[error("Invalid header")] + InvalidHeader, #[error("Region is invalid")] RegionIsInvalid, #[error("Compression error {0}")] @@ -41,6 +51,16 @@ pub enum ChunkReadingError { ParsingError(ChunkParsingError), } +#[derive(Error, Debug)] +pub enum ChunkWritingError { + #[error("Io error: {0}")] + IoError(std::io::ErrorKind), + #[error("Compression error {0}")] + Compression(CompressionError), + #[error("Chunk serializing error: {0}")] + ChunkSerializingError(String), +} + #[derive(Error, Debug)] pub enum CompressionError { #[error("Compression scheme not recognised")] @@ -68,18 +88,12 @@ pub struct ChunkBlocks { pub heightmap: ChunkHeightmaps, } -#[derive(Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] struct PaletteEntry { + // block name name: String, - _properties: Option>, -} - -#[derive(Deserialize, Debug, Clone)] -struct ChunkSectionBlockStates { - // #[serde(with = "LongArray")] - data: Option, - palette: Vec, + properties: Option>, } #[derive(Deserialize, Serialize, Debug, Clone)] @@ -91,28 +105,37 @@ pub struct ChunkHeightmaps { world_surface: LongArray, } -#[derive(Deserialize, Debug)] -#[expect(dead_code)] +#[derive(Serialize, Deserialize, Debug)] struct ChunkSection { #[serde(rename = "Y")] - y: i32, + y: i8, block_states: Option, } -#[derive(Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, Clone)] +struct ChunkSectionBlockStates { + // #[serde(with = "LongArray")] + data: Option, + palette: Vec, +} + +#[derive(Serialize, Deserialize, Debug)] #[serde(rename_all = "PascalCase")] struct ChunkNbt { - #[expect(dead_code)] - data_version: usize, - + data_version: i32, + #[serde(rename = "xPos")] + x_pos: i32, + // #[serde(rename = "yPos")] + //y_pos: i32, + #[serde(rename = "zPos")] + z_pos: i32, + status: ChunkStatus, #[serde(rename = "sections")] sections: Vec, - heightmaps: ChunkHeightmaps, } -#[derive(Deserialize, Debug, PartialEq, Eq)] -#[serde(tag = "Status")] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] enum ChunkStatus { #[serde(rename = "minecraft:empty")] Empty, @@ -231,10 +254,21 @@ impl Index for ChunkBlocks { } } +// I can't use an tag because it will break ChunkNBT, but status need to have a big S, so "Status" +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "PascalCase")] +pub struct ChunkStatusWrapper { + status: ChunkStatus, +} + impl ChunkData { - pub fn from_bytes(chunk_data: &[u8], at: Vector2) -> Result { - if fastnbt::from_bytes::(chunk_data) + pub fn from_bytes( + chunk_data: &[u8], + position: Vector2, + ) -> Result { + if fastnbt::from_bytes::(chunk_data) .map_err(|_| ChunkParsingError::FailedReadStatus)? + .status != ChunkStatus::Full { return Err(ChunkParsingError::ChunkNotGenerated); @@ -243,6 +277,17 @@ impl ChunkData { let chunk_data = fastnbt::from_bytes::(chunk_data) .map_err(|e| ChunkParsingError::ErrorDeserializingChunk(e.to_string()))?; + if chunk_data.x_pos != position.x || chunk_data.z_pos != position.z { + log::error!( + "Expected chunk at {}:{}, but got {}:{}", + position.x, + position.z, + chunk_data.x_pos, + chunk_data.z_pos + ); + // lets still continue + } + // this needs to be boxed, otherwise it will cause a stack-overflow let mut blocks = ChunkBlocks::empty_with_heightmap(chunk_data.heightmaps); let mut block_index = 0; // which block we're currently at @@ -274,9 +319,10 @@ impl ChunkData { }; // How many bits each block has in one of the palette u64s - let block_bit_size = { - let size = 64 - (palette.len() as i64 - 1).leading_zeros(); - max(4, size) + let block_bit_size = if palette.len() < 16 { + 4 + } else { + ceil_log2(palette.len() as u32).max(4) }; // How many blocks there are in one of the palettes u64s let blocks_in_palette = 64 / block_bit_size; @@ -310,10 +356,7 @@ impl ChunkData { } } - Ok(ChunkData { - blocks, - position: at, - }) + Ok(ChunkData { blocks, position }) } } @@ -326,3 +369,9 @@ pub enum ChunkParsingError { #[error("Error deserializing chunk: {0}")] ErrorDeserializingChunk(String), } + +#[derive(Error, Debug)] +pub enum ChunkSerializingError { + #[error("Error serializing chunk: {0}")] + ErrorSerializingChunk(fastnbt::error::Error), +} diff --git a/pumpkin-world/src/level.rs b/pumpkin-world/src/level.rs index ede2d616..80f8a7f7 100644 --- a/pumpkin-world/src/level.rs +++ b/pumpkin-world/src/level.rs @@ -11,7 +11,8 @@ use tokio::{ use crate::{ chunk::{ - anvil::AnvilChunkReader, ChunkData, ChunkParsingError, ChunkReader, ChunkReadingError, + anvil::AnvilChunkFormat, ChunkData, ChunkParsingError, ChunkReader, ChunkReadingError, + ChunkWriter, }, generation::{get_world_gen, Seed, WorldGenerator}, lock::{anvil::AnvilLevelLocker, LevelLocker}, @@ -35,6 +36,7 @@ pub struct Level { loaded_chunks: Arc, Arc>>>, chunk_watchers: Arc, usize>>, chunk_reader: Arc, + chunk_writer: Arc, world_gen: Arc, // Gets unlocked when dropped // TODO: Make this a trait @@ -75,7 +77,8 @@ impl Level { world_gen, world_info_writer: Arc::new(AnvilLevelInfo), level_folder, - chunk_reader: Arc::new(AnvilChunkReader::new()), + chunk_reader: Arc::new(AnvilChunkFormat), + chunk_writer: Arc::new(AnvilChunkFormat), loaded_chunks: Arc::new(DashMap::new()), chunk_watchers: Arc::new(DashMap::new()), level_info, @@ -88,7 +91,7 @@ impl Level { // lets first save all chunks for chunk in self.loaded_chunks.iter() { let chunk = chunk.read().await; - self.clean_chunk(&chunk.position); + self.clean_chunk(&chunk.position).await; } // then lets save the world info self.world_info_writer @@ -168,16 +171,16 @@ impl Level { } } - pub fn clean_chunks(&self, chunks: &[Vector2]) { - chunks.iter().for_each(|chunk_pos| { + pub async fn clean_chunks(&self, chunks: &[Vector2]) { + for chunk_pos in chunks { //log::debug!("Unloading {:?}", chunk_pos); - self.clean_chunk(chunk_pos); - }); + self.clean_chunk(chunk_pos).await; + } } - pub fn clean_chunk(&self, chunk: &Vector2) { + pub async fn clean_chunk(&self, chunk: &Vector2) { if let Some(data) = self.loaded_chunks.remove(chunk) { - self.write_chunk(data); + self.write_chunk(data).await; } } @@ -201,8 +204,14 @@ impl Level { self.chunk_watchers.shrink_to_fit(); } - pub fn write_chunk(&self, _chunk_to_write: (Vector2, Arc>)) { - //TODO + pub async fn write_chunk(&self, chunk_to_write: (Vector2, Arc>)) { + let data = chunk_to_write.1.read().await; + if let Err(error) = + self.chunk_writer + .write_chunk(&data, &self.level_folder, &chunk_to_write.0) + { + log::error!("Failed writing Chunk to disk {}", error.to_string()); + } } fn load_chunk_from_save( @@ -235,7 +244,8 @@ impl Level { let channel = channel.clone(); let loaded_chunks = self.loaded_chunks.clone(); let chunk_reader = self.chunk_reader.clone(); - let level_info = self.level_folder.clone(); + let chunk_writer = self.chunk_writer.clone(); + let level_folder = self.level_folder.clone(); let world_gen = self.world_gen.clone(); let chunk_pos = *at; @@ -244,8 +254,23 @@ impl Level { .map(|entry| entry.value().clone()) .unwrap_or_else(|| { let loaded_chunk = - match Self::load_chunk_from_save(chunk_reader, &level_info, chunk_pos) { - Ok(chunk) => chunk, + match Self::load_chunk_from_save(chunk_reader, &level_folder, chunk_pos) { + Ok(chunk) => { + // Save new Chunk + if let Some(chunk) = &chunk { + if let Err(error) = chunk_writer.write_chunk( + &chunk.blocking_read(), + &level_folder, + &chunk_pos, + ) { + log::error!( + "Failed writing Chunk to disk {}", + error.to_string() + ); + }; + } + chunk + } Err(err) => { log::error!( "Failed to read chunk (regenerating) {:?}: {:?}", diff --git a/pumpkin/src/entity/player.rs b/pumpkin/src/entity/player.rs index 5e9c88e2..d293df0b 100644 --- a/pumpkin/src/entity/player.rs +++ b/pumpkin/src/entity/player.rs @@ -235,19 +235,21 @@ impl Player { radial_chunks.len() ); + let level = &world.level; + // Decrement value of watched chunks - let chunks_to_clean = world.level.mark_chunks_as_not_watched(&radial_chunks); + let chunks_to_clean = level.mark_chunks_as_not_watched(&radial_chunks); // Remove chunks with no watchers from the cache - world.level.clean_chunks(&chunks_to_clean); + level.clean_chunks(&chunks_to_clean).await; // Remove left over entries from all possiblily loaded chunks - world.level.clean_memory(&radial_chunks); + level.clean_memory(&radial_chunks); log::debug!( "Removed player id {} ({}) ({} chunks remain cached)", self.gameprofile.name, self.client.id, - self.world().level.loaded_chunk_count() + level.loaded_chunk_count() ); //self.world().level.list_cached(); diff --git a/pumpkin/src/world/mod.rs b/pumpkin/src/world/mod.rs index 8aafd465..93720c87 100644 --- a/pumpkin/src/world/mod.rs +++ b/pumpkin/src/world/mod.rs @@ -579,7 +579,7 @@ impl World { "Received chunk {:?}, but it is no longer watched... cleaning", &chunk_data.position ); - level.clean_chunk(&chunk_data.position); + level.clean_chunk(&chunk_data.position).await; continue; } @@ -851,7 +851,7 @@ impl World { "Received chunk {:?}, but it is not watched... cleaning", chunk_pos ); - self.level.clean_chunk(&chunk_pos); + self.level.clean_chunk(&chunk_pos).await; } chunk diff --git a/pumpkin/src/world/player_chunker.rs b/pumpkin/src/world/player_chunker.rs index 98e1157d..43ff67d1 100644 --- a/pumpkin/src/world/player_chunker.rs +++ b/pumpkin/src/world/player_chunker.rs @@ -79,18 +79,13 @@ pub async fn update_position(player: &Arc) { // Make sure the watched section and the chunk watcher updates are async atomic. We want to // ensure what we unload when the player disconnects is correct - entity - .world - .level - .mark_chunks_as_newly_watched(&loading_chunks); - let chunks_to_clean = entity - .world - .level - .mark_chunks_as_not_watched(&unloading_chunks); + let level = &entity.world.level; + level.mark_chunks_as_newly_watched(&loading_chunks); + let chunks_to_clean = level.mark_chunks_as_not_watched(&unloading_chunks); player.watched_section.store(new_cylindrical); if !chunks_to_clean.is_empty() { - entity.world.level.clean_chunks(&chunks_to_clean); + level.clean_chunks(&chunks_to_clean).await; // This can take a little if we are sending a bunch of packets, queue it up :p let client = player.client.clone();