diff --git a/Cargo.toml b/Cargo.toml index 032a28c6a..9b9ab09eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,7 +46,7 @@ uuid.workspace = true bytes.workspace = true rand.workspace = true valence_advancement = { workspace = true, optional = true } -valence_anvil = { workspace = true, optional = true } +valence_anvil = { workspace = true, optional = true, features = ["bevy_plugin"] } valence_boss_bar = { workspace = true, optional = true } valence_server.workspace = true valence_inventory = { workspace = true, optional = true } diff --git a/crates/valence_anvil/Cargo.toml b/crates/valence_anvil/Cargo.toml index 3e320f139..08ce559c4 100644 --- a/crates/valence_anvil/Cargo.toml +++ b/crates/valence_anvil/Cargo.toml @@ -9,14 +9,18 @@ repository.workspace = true documentation.workspace = true license.workspace = true +[features] +bevy_plugin = ["dep:bevy_app", "dep:bevy_ecs", "dep:flume", "parsing"] +parsing = ["dep:num-integer", "dep:valence_server"] + [dependencies] -bevy_app.workspace = true -bevy_ecs.workspace = true +bevy_app = { workspace = true, optional = true } +bevy_ecs = { workspace = true, optional = true } byteorder.workspace = true flate2.workspace = true -flume.workspace = true +flume = { workspace = true, optional = true } lru.workspace = true -num-integer.workspace = true +num-integer = { workspace = true, optional = true } thiserror.workspace = true -tracing.workspace = true -valence_server.workspace = true +valence_nbt = { workspace = true, features = ["binary"] } +valence_server = { workspace = true, optional = true } diff --git a/crates/valence_anvil/src/bevy.rs b/crates/valence_anvil/src/bevy.rs new file mode 100644 index 000000000..97b4fea64 --- /dev/null +++ b/crates/valence_anvil/src/bevy.rs @@ -0,0 +1,270 @@ +use std::collections::hash_map::Entry; +use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; +use std::thread; + +use bevy_app::prelude::*; +use bevy_ecs::prelude::*; +use flume::{Receiver, Sender}; +use valence_server::client::{Client, OldView, View}; +use valence_server::entity::{EntityLayerId, OldEntityLayerId}; +use valence_server::layer::UpdateLayersPreClientSet; +use valence_server::protocol::anyhow; +use valence_server::registry::BiomeRegistry; +use valence_server::{ChunkLayer, ChunkPos}; + +use crate::parsing::{DimensionFolder, ParsedChunk}; + +type WorkerResult = anyhow::Result>; + +/// The order in which chunks should be processed by the anvil worker. Smaller +/// values are sent first. +type Priority = u64; + +#[derive(Component, Debug)] +pub struct AnvilLevel { + /// Chunk worker state to be moved to another thread. + worker_state: Option, + /// The set of chunk positions that should not be loaded or unloaded by + /// the anvil system. + /// + /// This set is empty by default, but you can modify it at any time. + pub ignored_chunks: HashSet, + /// Chunks that need to be loaded. Chunks with `None` priority have already + /// been sent to the anvil thread. + pending: HashMap>, + /// Sender for the chunk worker thread. + sender: Sender, + /// Receiver for the chunk worker thread. + receiver: Receiver<(ChunkPos, WorkerResult)>, +} + +impl AnvilLevel { + pub fn new(world_root: impl Into, biomes: &BiomeRegistry) -> Self { + let (pending_sender, pending_receiver) = flume::unbounded(); + let (finished_sender, finished_receiver) = flume::bounded(4096); + + Self { + worker_state: Some(ChunkWorkerState { + dimension_folder: DimensionFolder::new(world_root, biomes), + sender: finished_sender, + receiver: pending_receiver, + }), + ignored_chunks: HashSet::new(), + pending: HashMap::new(), + sender: pending_sender, + receiver: finished_receiver, + } + } + + /// Forces a chunk to be loaded at a specific position in this world. This + /// will bypass [`AnvilLevel::ignored_chunks`]. + /// Note that the chunk will be unloaded next tick unless it has been added + /// to [`AnvilLevel::ignored_chunks`] or it is in view of a client. + /// + /// This has no effect if a chunk at the position is already present. + pub fn force_chunk_load(&mut self, pos: ChunkPos) { + match self.pending.entry(pos) { + Entry::Occupied(oe) => { + // If the chunk is already scheduled to load but hasn't been sent to the chunk + // worker yet, then give it the highest priority. + if let Some(priority) = oe.into_mut() { + *priority = 0; + } + } + Entry::Vacant(ve) => { + ve.insert(Some(0)); + } + } + } +} + +#[derive(Debug)] +struct ChunkWorkerState { + /// The world folder containing the region folder where chunks are loaded + /// from. + dimension_folder: DimensionFolder, + /// Sender of finished chunks. + sender: Sender<(ChunkPos, WorkerResult)>, + /// Receiver of pending chunks. + receiver: Receiver, +} + +pub struct AnvilPlugin; + +impl Plugin for AnvilPlugin { + fn build(&self, app: &mut App) { + app.add_event::() + .add_event::() + .add_systems(PreUpdate, remove_unviewed_chunks) + .add_systems( + PostUpdate, + (init_anvil, update_client_views, send_recv_chunks) + .chain() + .before(UpdateLayersPreClientSet), + ); + } +} + +fn init_anvil(mut query: Query<&mut AnvilLevel, (Added, With)>) { + for mut level in &mut query { + if let Some(state) = level.worker_state.take() { + thread::spawn(move || anvil_worker(state)); + } + } +} + +/// Removes all chunks no longer viewed by clients. +/// +/// This needs to run in `PreUpdate` where the chunk viewer counts have been +/// updated from the previous tick. +fn remove_unviewed_chunks( + mut chunk_layers: Query<(Entity, &mut ChunkLayer, &AnvilLevel)>, + mut unload_events: EventWriter, +) { + for (entity, mut layer, anvil) in &mut chunk_layers { + layer.retain_chunks(|pos, chunk| { + if chunk.viewer_count_mut() > 0 || anvil.ignored_chunks.contains(&pos) { + true + } else { + unload_events.send(ChunkUnloadEvent { + chunk_layer: entity, + pos, + }); + false + } + }); + } +} + +fn update_client_views( + clients: Query<(&EntityLayerId, Ref, View, OldView), With>, + mut chunk_layers: Query<(&ChunkLayer, &mut AnvilLevel)>, +) { + for (loc, old_loc, view, old_view) in &clients { + let view = view.get(); + let old_view = old_view.get(); + + if loc != &*old_loc || view != old_view || old_loc.is_added() { + let Ok((layer, mut anvil)) = chunk_layers.get_mut(loc.0) else { + continue; + }; + + let queue_pos = |pos| { + if !anvil.ignored_chunks.contains(&pos) && layer.chunk(pos).is_none() { + // Chunks closer to clients are prioritized. + match anvil.pending.entry(pos) { + Entry::Occupied(mut oe) => { + if let Some(priority) = oe.get_mut() { + let dist = view.pos.distance_squared(pos); + *priority = (*priority).min(dist); + } + } + Entry::Vacant(ve) => { + let dist = view.pos.distance_squared(pos); + ve.insert(Some(dist)); + } + } + } + }; + + // Queue all the new chunks in the view to be sent to the anvil worker. + if old_loc.is_added() { + view.iter().for_each(queue_pos); + } else { + view.diff(old_view).for_each(queue_pos); + } + } + } +} + +fn send_recv_chunks( + mut layers: Query<(Entity, &mut ChunkLayer, &mut AnvilLevel)>, + mut to_send: Local>, + mut load_events: EventWriter, +) { + for (entity, mut layer, anvil) in &mut layers { + let anvil = anvil.into_inner(); + + // Insert the chunks that are finished loading into the chunk layer and send + // load events. + for (pos, res) in anvil.receiver.drain() { + anvil.pending.remove(&pos); + + let status = match res { + Ok(Some(ParsedChunk { chunk, timestamp })) => { + layer.insert_chunk(pos, chunk); + ChunkLoadStatus::Success { timestamp } + } + Ok(None) => ChunkLoadStatus::Empty, + Err(e) => ChunkLoadStatus::Failed(e), + }; + + load_events.send(ChunkLoadEvent { + chunk_layer: entity, + pos, + status, + }); + } + + // Collect all the new chunks that need to be loaded this tick. + for (pos, priority) in &mut anvil.pending { + if let Some(pri) = priority.take() { + to_send.push((pri, *pos)); + } + } + + // Sort chunks by ascending priority. + to_send.sort_unstable_by_key(|(pri, _)| *pri); + + // Send the sorted chunks to be loaded. + for (_, pos) in to_send.drain(..) { + let _ = anvil.sender.try_send(pos); + } + } +} + +fn anvil_worker(mut state: ChunkWorkerState) { + while let Ok(pos) = state.receiver.recv() { + let res = state + .dimension_folder + .get_chunk(pos) + .map_err(anyhow::Error::from); + + let _ = state.sender.send((pos, res)); + } +} + +/// An event sent by `valence_anvil` after an attempt to load a chunk is made. +#[derive(Event, Debug)] +pub struct ChunkLoadEvent { + /// The [`ChunkLayer`] where the chunk is located. + pub chunk_layer: Entity, + /// The position of the chunk in the layer. + pub pos: ChunkPos, + pub status: ChunkLoadStatus, +} + +#[derive(Debug)] +pub enum ChunkLoadStatus { + /// A new chunk was successfully loaded and inserted into the layer. + Success { + /// The time this chunk was last modified, measured in seconds since the + /// epoch. + timestamp: u32, + }, + /// The Anvil level does not have a chunk at the position. No chunk was + /// loaded. + Empty, + /// An attempt was made to load the chunk, but something went wrong. + Failed(anyhow::Error), +} + +/// An event sent by `valence_anvil` when a chunk is unloaded from an layer. +#[derive(Event, Debug)] +pub struct ChunkUnloadEvent { + /// The [`ChunkLayer`] where the chunk was unloaded. + pub chunk_layer: Entity, + /// The position of the chunk that was unloaded. + pub pos: ChunkPos, +} diff --git a/crates/valence_anvil/src/lib.rs b/crates/valence_anvil/src/lib.rs index cd6c3d4a6..f77c4e77a 100644 --- a/crates/valence_anvil/src/lib.rs +++ b/crates/valence_anvil/src/lib.rs @@ -17,133 +17,74 @@ clippy::dbg_macro )] -use std::collections::hash_map::Entry; -use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::File; use std::io::{ErrorKind, Read, Seek, SeekFrom}; use std::num::NonZeroUsize; use std::path::PathBuf; -use std::thread; -use bevy_app::prelude::*; -use bevy_ecs::prelude::*; +#[cfg(feature = "bevy_plugin")] +pub use bevy::*; use byteorder::{BigEndian, ReadBytesExt}; use flate2::bufread::{GzDecoder, ZlibDecoder}; -use flume::{Receiver, Sender}; use lru::LruCache; -use tracing::warn; -use valence_server::client::{Client, OldView, View}; -use valence_server::entity::{EntityLayerId, OldEntityLayerId}; -use valence_server::layer::chunk::UnloadedChunk; -use valence_server::layer::UpdateLayersPreClientSet; -use valence_server::nbt::Compound; -use valence_server::protocol::anyhow::{bail, ensure}; -use valence_server::protocol::{anyhow, ChunkPos}; -use valence_server::registry::biome::BiomeId; -use valence_server::registry::BiomeRegistry; -use valence_server::{ChunkLayer, Ident}; - -mod parse_chunk; - -#[derive(Component, Debug)] -pub struct AnvilLevel { - /// Chunk worker state to be moved to another thread. - worker_state: Option, - /// The set of chunk positions that should not be loaded or unloaded by - /// the anvil system. - /// - /// This set is empty by default, but you can modify it at any time. - pub ignored_chunks: HashSet, - /// Chunks that need to be loaded. Chunks with `None` priority have already - /// been sent to the anvil thread. - pending: HashMap>, - /// Sender for the chunk worker thread. - sender: Sender, - /// Receiver for the chunk worker thread. - receiver: Receiver<(ChunkPos, WorkerResult)>, -} - -type WorkerResult = anyhow::Result>; - -impl AnvilLevel { - pub fn new(world_root: impl Into, biomes: &BiomeRegistry) -> Self { - let mut region_root = world_root.into(); - region_root.push("region"); - - let (pending_sender, pending_receiver) = flume::unbounded(); - let (finished_sender, finished_receiver) = flume::bounded(4096); - - Self { - worker_state: Some(ChunkWorkerState { - regions: LruCache::new(LRU_CACHE_SIZE), - region_root, - sender: finished_sender, - receiver: pending_receiver, - decompress_buf: vec![], - biome_to_id: biomes - .iter() - .map(|(id, name, _)| (name.to_string_ident(), id)) - .collect(), - }), - ignored_chunks: HashSet::new(), - pending: HashMap::new(), - sender: pending_sender, - receiver: finished_receiver, - } - } +use thiserror::Error; +use valence_nbt::Compound; - /// Forces a chunk to be loaded at a specific position in this world. This - /// will bypass [`AnvilLevel::ignored_chunks`]. - /// Note that the chunk will be unloaded next tick unless it has been added - /// to [`AnvilLevel::ignored_chunks`] or it is in view of a client. - /// - /// This has no effect if a chunk at the position is already present. - pub fn force_chunk_load(&mut self, pos: ChunkPos) { - match self.pending.entry(pos) { - Entry::Occupied(oe) => { - // If the chunk is already scheduled to load but hasn't been sent to the chunk - // worker yet, then give it the highest priority. - if let Some(priority) = oe.into_mut() { - *priority = 0; - } - } - Entry::Vacant(ve) => { - ve.insert(Some(0)); - } - } - } -} +#[cfg(feature = "bevy_plugin")] +mod bevy; +#[cfg(feature = "parsing")] +pub mod parsing; const LRU_CACHE_SIZE: NonZeroUsize = match NonZeroUsize::new(256) { Some(n) => n, None => unreachable!(), }; -/// The order in which chunks should be processed by the anvil worker. Smaller -/// values are sent first. -type Priority = u64; +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum RegionError { + #[error("an I/O error occurred: {0}")] + Io(#[from] std::io::Error), + #[error("invalid chunk sector offset")] + InvalidChunkSectorOffset, + #[error("invalid chunk size")] + InvalidChunkSize, + #[error("invalid compression scheme number of {0}")] + InvalidCompressionScheme(u8), + #[error("failed to parse NBT: {0}")] + Nbt(#[from] valence_nbt::binary::Error), + #[error("not all chunk NBT data was read")] + TrailingNbtData, +} #[derive(Debug)] -struct ChunkWorkerState { +pub struct RegionFolder { /// Region files. An LRU cache is used to limit the number of open file /// handles. regions: LruCache, /// Path to the "region" subdirectory in the world root. region_root: PathBuf, - /// Sender of finished chunks. - sender: Sender<(ChunkPos, WorkerResult)>, - /// Receiver of pending chunks. - receiver: Receiver, /// Scratch buffer for decompression. decompress_buf: Vec, - /// Mapping of biome names to their biome ID. - biome_to_id: BTreeMap, BiomeId>, } -impl ChunkWorkerState { - fn get_chunk(&mut self, pos: ChunkPos) -> anyhow::Result> { - let region_x = pos.x.div_euclid(32); - let region_z = pos.z.div_euclid(32); +impl RegionFolder { + pub fn new(region_root: impl Into) -> Self { + Self { + regions: LruCache::new(LRU_CACHE_SIZE), + region_root: region_root.into(), + decompress_buf: Vec::new(), + } + } + /// Gets the raw chunk at the given chunk position. + /// + /// Returns `Ok(Some(chunk))` if the chunk exists and no errors occurred + /// loading it. Returns `Ok(None)` if the chunk does not exist and no + /// errors occurred attempting to load it. Returns `Err(_)` if an error + /// occurred attempting to load the chunk. + pub fn get_chunk(&mut self, pos_x: i32, pos_z: i32) -> Result, RegionError> { + let region_x = pos_x.div_euclid(32); + let region_z = pos_z.div_euclid(32); let region = match self.regions.get_mut(&(region_x, region_z)) { Some(RegionEntry::Occupied(region)) => region, @@ -178,7 +119,7 @@ impl ChunkWorkerState { } }; - let chunk_idx = (pos.x.rem_euclid(32) + pos.z.rem_euclid(32) * 32) as usize; + let chunk_idx = (pos_x.rem_euclid(32) + pos_z.rem_euclid(32) * 32) as usize; let location_bytes = (®ion.header[chunk_idx * 4..]).read_u32::()?; let timestamp = (®ion.header[chunk_idx * 4 + SECTOR_SIZE..]).read_u32::()?; @@ -193,7 +134,9 @@ impl ChunkWorkerState { // If the sector offset was <2, then the chunk data would be inside the region // header. That doesn't make any sense. - ensure!(sector_offset >= 2, "invalid chunk sector offset"); + if sector_offset < 2 { + return Err(RegionError::InvalidChunkSectorOffset); + } // Seek to the beginning of the chunk's data. region @@ -203,10 +146,9 @@ impl ChunkWorkerState { let exact_chunk_size = region.file.read_u32::()? as usize; // size of this chunk in sectors must always be >= the exact size. - ensure!( - sector_count * SECTOR_SIZE >= exact_chunk_size, - "invalid chunk size" - ); + if sector_count * SECTOR_SIZE < exact_chunk_size { + return Err(RegionError::InvalidChunkSectorOffset); + } let mut data_buf = vec![0; exact_chunk_size].into_boxed_slice(); region.file.read_exact(&mut data_buf)?; @@ -232,20 +174,23 @@ impl ChunkWorkerState { // Uncompressed 3 => r, // Unknown - b => bail!("unknown compression scheme number of {b}"), + b => return Err(RegionError::InvalidCompressionScheme(b)), }; let (data, _) = Compound::from_binary(&mut nbt_slice)?; - ensure!(nbt_slice.is_empty(), "not all chunk NBT data was read"); + if !nbt_slice.is_empty() { + return Err(RegionError::TrailingNbtData); + } - Ok(Some(AnvilChunk { data, timestamp })) + Ok(Some(RawChunk { data, timestamp })) } } -struct AnvilChunk { - data: Compound, - timestamp: u32, +/// A chunk represented by the raw compound data. +pub struct RawChunk { + pub data: Compound, + pub timestamp: u32, } /// X and Z positions of a region. @@ -269,189 +214,3 @@ struct Region { } const SECTOR_SIZE: usize = 4096; - -pub struct AnvilPlugin; - -impl Plugin for AnvilPlugin { - fn build(&self, app: &mut App) { - app.add_event::() - .add_event::() - .add_systems(PreUpdate, remove_unviewed_chunks) - .add_systems( - PostUpdate, - (init_anvil, update_client_views, send_recv_chunks) - .chain() - .before(UpdateLayersPreClientSet), - ); - } -} - -fn init_anvil(mut query: Query<&mut AnvilLevel, (Added, With)>) { - for mut level in &mut query { - if let Some(state) = level.worker_state.take() { - thread::spawn(move || anvil_worker(state)); - } - } -} - -/// Removes all chunks no longer viewed by clients. -/// -/// This needs to run in `PreUpdate` where the chunk viewer counts have been -/// updated from the previous tick. -fn remove_unviewed_chunks( - mut chunk_layers: Query<(Entity, &mut ChunkLayer, &AnvilLevel)>, - mut unload_events: EventWriter, -) { - for (entity, mut layer, anvil) in &mut chunk_layers { - layer.retain_chunks(|pos, chunk| { - if chunk.viewer_count_mut() > 0 || anvil.ignored_chunks.contains(&pos) { - true - } else { - unload_events.send(ChunkUnloadEvent { - chunk_layer: entity, - pos, - }); - false - } - }); - } -} - -fn update_client_views( - clients: Query<(&EntityLayerId, Ref, View, OldView), With>, - mut chunk_layers: Query<(&ChunkLayer, &mut AnvilLevel)>, -) { - for (loc, old_loc, view, old_view) in &clients { - let view = view.get(); - let old_view = old_view.get(); - - if loc != &*old_loc || view != old_view || old_loc.is_added() { - let Ok((layer, mut anvil)) = chunk_layers.get_mut(loc.0) else { - continue; - }; - - let queue_pos = |pos| { - if !anvil.ignored_chunks.contains(&pos) && layer.chunk(pos).is_none() { - // Chunks closer to clients are prioritized. - match anvil.pending.entry(pos) { - Entry::Occupied(mut oe) => { - if let Some(priority) = oe.get_mut() { - let dist = view.pos.distance_squared(pos); - *priority = (*priority).min(dist); - } - } - Entry::Vacant(ve) => { - let dist = view.pos.distance_squared(pos); - ve.insert(Some(dist)); - } - } - } - }; - - // Queue all the new chunks in the view to be sent to the anvil worker. - if old_loc.is_added() { - view.iter().for_each(queue_pos); - } else { - view.diff(old_view).for_each(queue_pos); - } - } - } -} - -fn send_recv_chunks( - mut layers: Query<(Entity, &mut ChunkLayer, &mut AnvilLevel)>, - mut to_send: Local>, - mut load_events: EventWriter, -) { - for (entity, mut layer, anvil) in &mut layers { - let anvil = anvil.into_inner(); - - // Insert the chunks that are finished loading into the chunk layer and send - // load events. - for (pos, res) in anvil.receiver.drain() { - anvil.pending.remove(&pos); - - let status = match res { - Ok(Some((chunk, timestamp))) => { - layer.insert_chunk(pos, chunk); - ChunkLoadStatus::Success { timestamp } - } - Ok(None) => ChunkLoadStatus::Empty, - Err(e) => ChunkLoadStatus::Failed(e), - }; - - load_events.send(ChunkLoadEvent { - chunk_layer: entity, - pos, - status, - }); - } - - // Collect all the new chunks that need to be loaded this tick. - for (pos, priority) in &mut anvil.pending { - if let Some(pri) = priority.take() { - to_send.push((pri, *pos)); - } - } - - // Sort chunks by ascending priority. - to_send.sort_unstable_by_key(|(pri, _)| *pri); - - // Send the sorted chunks to be loaded. - for (_, pos) in to_send.drain(..) { - let _ = anvil.sender.try_send(pos); - } - } -} - -fn anvil_worker(mut state: ChunkWorkerState) { - while let Ok(pos) = state.receiver.recv() { - let res = get_chunk(pos, &mut state); - - let _ = state.sender.send((pos, res)); - } - - fn get_chunk(pos: ChunkPos, state: &mut ChunkWorkerState) -> WorkerResult { - let Some(anvil_chunk) = state.get_chunk(pos)? else { - return Ok(None); - }; - - let chunk = parse_chunk::parse_chunk(anvil_chunk.data, &state.biome_to_id)?; - - Ok(Some((chunk, anvil_chunk.timestamp))) - } -} - -/// An event sent by `valence_anvil` after an attempt to load a chunk is made. -#[derive(Event, Debug)] -pub struct ChunkLoadEvent { - /// The [`ChunkLayer`] where the chunk is located. - pub chunk_layer: Entity, - /// The position of the chunk in the layer. - pub pos: ChunkPos, - pub status: ChunkLoadStatus, -} - -#[derive(Debug)] -pub enum ChunkLoadStatus { - /// A new chunk was successfully loaded and inserted into the layer. - Success { - /// The time this chunk was last modified, measured in seconds since the - /// epoch. - timestamp: u32, - }, - /// The Anvil level does not have a chunk at the position. No chunk was - /// loaded. - Empty, - /// An attempt was made to load the chunk, but something went wrong. - Failed(anyhow::Error), -} - -/// An event sent by `valence_anvil` when a chunk is unloaded from an layer. -#[derive(Event, Debug)] -pub struct ChunkUnloadEvent { - /// The [`ChunkLayer`] where the chunk was unloaded. - pub chunk_layer: Entity, - /// The position of the chunk that was unloaded. - pub pos: ChunkPos, -} diff --git a/crates/valence_anvil/src/parse_chunk.rs b/crates/valence_anvil/src/parsing.rs similarity index 85% rename from crates/valence_anvil/src/parse_chunk.rs rename to crates/valence_anvil/src/parsing.rs index 65e7543f5..898aa4436 100644 --- a/crates/valence_anvil/src/parse_chunk.rs +++ b/crates/valence_anvil/src/parsing.rs @@ -1,5 +1,6 @@ use std::borrow::Cow; use std::collections::BTreeMap; +use std::path::PathBuf; use num_integer::div_ceil; use thiserror::Error; @@ -8,11 +9,61 @@ use valence_server::layer::chunk::{Chunk, UnloadedChunk}; use valence_server::nbt::{Compound, List, Value}; use valence_server::protocol::BlockKind; use valence_server::registry::biome::BiomeId; -use valence_server::Ident; +use valence_server::registry::BiomeRegistry; +use valence_server::{ChunkPos, Ident}; -#[derive(Clone, Debug, Error)] +use crate::{RegionError, RegionFolder}; + +#[derive(Debug)] +pub struct DimensionFolder { + region: RegionFolder, + /// Mapping of biome names to their biome ID. + biome_to_id: BTreeMap, BiomeId>, +} + +impl DimensionFolder { + pub fn new(dimension_root: impl Into, biomes: &BiomeRegistry) -> Self { + let mut region_root = dimension_root.into(); + region_root.push("region"); + + Self { + region: RegionFolder::new(region_root), + biome_to_id: biomes + .iter() + .map(|(id, name, _)| (name.to_string_ident(), id)) + .collect(), + } + } + + /// Gets the parsed chunk at the given chunk position. + /// + /// Returns `Ok(Some(chunk))` if the chunk exists and no errors occurred + /// loading it. Returns `Ok(None)` if the chunk does not exist and no + /// errors occurred attempting to load it. Returns `Err(_)` if an error + /// occurred attempting to load the chunk. + pub fn get_chunk(&mut self, pos: ChunkPos) -> Result, ParseChunkError> { + let Some(raw_chunk) = self.region.get_chunk(pos.x, pos.z)? else { + return Ok(None); + }; + let parsed = parse_chunk(raw_chunk.data, &self.biome_to_id)?; + Ok(Some(ParsedChunk { + chunk: parsed, + timestamp: raw_chunk.timestamp, + })) + } +} + +/// A chunk parsed to show block information, biome information etc. +pub struct ParsedChunk { + pub chunk: UnloadedChunk, + pub timestamp: u32, +} + +#[derive(Debug, Error)] #[non_exhaustive] -pub(crate) enum ParseChunkError { +pub enum ParseChunkError { + #[error("region error: {0}")] + Region(#[from] RegionError), #[error("missing chunk sections")] MissingSections, #[error("missing chunk section Y")] @@ -65,7 +116,7 @@ pub(crate) enum ParseChunkError { InvalidBlockEntityPosition, } -pub(crate) fn parse_chunk( +fn parse_chunk( mut nbt: Compound, biome_map: &BTreeMap, BiomeId>, // TODO: replace with biome registry arg. ) -> Result {