diff --git a/README.md b/README.md index def352310..5e198f10d 100644 --- a/README.md +++ b/README.md @@ -49,9 +49,9 @@ and customizable experience. It prioritizes performance and player enjoyment whi - [x] Lighting - [x] Entity Spawning - [x] Bossbar - - [x] Chunk Loading + - [x] Chunk Loading (Vanilla, Linear) - [x] Chunk Generation - - [x] Chunk Saving + - [x] Chunk Saving (Vanilla, Linear) - [x] World Time - [x] Scoreboard - [x] World Borders diff --git a/pumpkin-config/src/chunk.rs b/pumpkin-config/src/chunk.rs index edc09560e..8c20f4343 100644 --- a/pumpkin-config/src/chunk.rs +++ b/pumpkin-config/src/chunk.rs @@ -6,19 +6,20 @@ use serde::{Deserialize, Serialize}; #[serde(default)] pub struct ChunkConfig { pub compression: ChunkCompression, + pub format: ChunkFormat, } #[derive(Deserialize, Serialize)] pub struct ChunkCompression { - pub compression_algorithm: Compression, - pub compression_level: u32, + pub algorithm: Compression, + pub level: u32, } impl Default for ChunkCompression { fn default() -> Self { Self { - compression_algorithm: Compression::LZ4, - compression_level: 6, + algorithm: Compression::LZ4, + level: 6, } } } @@ -35,3 +36,11 @@ pub enum Compression { /// Custom compression algorithm (since 24w05a) Custom, } + +#[derive(Deserialize, Serialize, Clone, Default)] +#[repr(u8)] +pub enum ChunkFormat { + #[default] + Anvil, + Linear, +} diff --git a/pumpkin-world/Cargo.toml b/pumpkin-world/Cargo.toml index 5cc39fcb6..b7ac97395 100644 --- a/pumpkin-world/Cargo.toml +++ b/pumpkin-world/Cargo.toml @@ -30,6 +30,8 @@ num-traits = "0.2" # Compression flate2 = "1.0" lz4 = "1.28" +zstd = "0.13.2" + file-guard = "0.2" indexmap = "2.7" diff --git a/pumpkin-world/src/chunk/anvil.rs b/pumpkin-world/src/chunk/anvil.rs index dc761b6a7..766237ac1 100644 --- a/pumpkin-world/src/chunk/anvil.rs +++ b/pumpkin-world/src/chunk/anvil.rs @@ -254,17 +254,9 @@ impl ChunkWriter for AnvilChunkFormat { .map_err(|err| ChunkWritingError::ChunkSerializingError(err.to_string()))?; // Compress chunk data - let compression: Compression = ADVANCED_CONFIG - .chunk - .compression - .compression_algorithm - .clone() - .into(); + let compression: Compression = ADVANCED_CONFIG.chunk.compression.algorithm.clone().into(); let compressed_data = compression - .compress_data( - &raw_bytes, - ADVANCED_CONFIG.chunk.compression.compression_level, - ) + .compress_data(&raw_bytes, ADVANCED_CONFIG.chunk.compression.level) .map_err(ChunkWritingError::Compression)?; // Length of compressed data + compression type @@ -526,8 +518,8 @@ mod tests { fn test_writing() { let generator = get_world_gen(Seed(0)); let level_folder = LevelFolder { - root_folder: PathBuf::from("./tmp"), - region_folder: PathBuf::from("./tmp/region"), + root_folder: PathBuf::from("./tmp_Anvil"), + region_folder: PathBuf::from("./tmp_Anvil/region"), }; if fs::exists(&level_folder.root_folder).unwrap() { fs::remove_dir_all(&level_folder.root_folder).expect("Could not delete directory"); diff --git a/pumpkin-world/src/chunk/linear.rs b/pumpkin-world/src/chunk/linear.rs new file mode 100644 index 000000000..30c386db2 --- /dev/null +++ b/pumpkin-world/src/chunk/linear.rs @@ -0,0 +1,537 @@ +use std::fs::{File, OpenOptions}; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::path::Path; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::{chunk::ChunkWritingError, level::LevelFolder}; +use bytes::{Buf, BufMut}; +use log::error; +use pumpkin_config::ADVANCED_CONFIG; + +use super::anvil::AnvilChunkFormat; +use super::{ + ChunkData, ChunkReader, ChunkReadingError, ChunkSerializingError, ChunkWriter, + CompressionError, FILE_LOCK_MANAGER, +}; + +/// The side size of a region in chunks (one region is 32x32 chunks) +const REGION_SIZE: usize = 32; + +/// The number of bits that identify two chunks in the same region +const SUBREGION_BITS: u8 = pumpkin_util::math::ceil_log2(REGION_SIZE as u32); + +/// The number of chunks in a region +const CHUNK_COUNT: usize = REGION_SIZE * REGION_SIZE; + +/// The signature of the linear file format +/// used as a header and footer described in https://gist.github.com/Aaron2550/5701519671253d4c6190bde6706f9f98 +const SIGNATURE: [u8; 8] = u64::to_be_bytes(0xc3ff13183cca9d9a); + +#[derive(Default, Clone, Copy)] +struct LinearChunkHeader { + size: u32, + timestamp: u32, +} + +#[derive(Default, PartialEq, Eq, Clone, Copy)] +pub enum LinearVersion { + #[default] + /// Represents an invalid or uninitialized version. + None = 0x00, + /// Version 1 of the Linear Region File Format. (Default) + /// + /// Described in: https://github.com/xymb-endcrystalme/LinearRegionFileFormatTools/blob/linearv2/LINEAR.md + V1 = 0x01, + /// Version 2 of the Linear Region File Format (currently unsupported). + /// + /// Described in: https://github.com/xymb-endcrystalme/LinearRegionFileFormatTools/blob/linearv2/LINEARv2.md + V2 = 0x02, +} +struct LinearFileHeader { + /// ( 0.. 1 Bytes) The version of the Linear Region File format. + version: LinearVersion, + /// ( 1.. 9 Bytes) The timestamp of the newest chunk in the region file. + newest_timestamp: u64, + /// ( 9..10 Bytes) The zstd compression level used for chunk data. + compression_level: u8, + /// (10..12 Bytes) The number of non-zero-size chunks in the region file. + chunks_count: u16, + /// (12..16 Bytes) The total size in bytes of the compressed chunk headers and chunk data. + chunks_bytes: u32, + /// (16..24 Bytes) A hash of the region file (unused). + region_hash: u64, +} +struct LinearFile { + chunks_headers: Box<[LinearChunkHeader; CHUNK_COUNT]>, + chunks_data: Vec, +} + +#[derive(Clone, Default)] +pub struct LinearChunkFormat; + +impl LinearChunkHeader { + const CHUNK_HEADER_SIZE: usize = 8; + fn from_bytes(bytes: &[u8]) -> Self { + let mut bytes = bytes; + LinearChunkHeader { + size: bytes.get_u32(), + timestamp: bytes.get_u32(), + } + } + + fn to_bytes(self) -> [u8; 8] { + let mut bytes = Vec::with_capacity(LinearChunkHeader::CHUNK_HEADER_SIZE); + + bytes.put_u32(self.size); + bytes.put_u32(self.timestamp); + + // This should be a clear code error if the size of the header is not the expected + // so we can unwrap the conversion safely or panic the entire program if not + bytes + .try_into() + .unwrap_or_else(|_| panic!("ChunkHeader Struct/Size Mismatch")) + } +} + +impl From for LinearVersion { + fn from(value: u8) -> Self { + match value { + 0x01 => LinearVersion::V1, + 0x02 => LinearVersion::V2, + _ => LinearVersion::None, + } + } +} + +impl LinearFileHeader { + const FILE_HEADER_SIZE: usize = 24; + + fn check_version(&self) -> Result<(), ChunkReadingError> { + match self.version { + LinearVersion::None => { + error!("Invalid version in the file header"); + Err(ChunkReadingError::InvalidHeader) + } + LinearVersion::V2 => { + error!("LinearFormat Version 2 for Chunks is not supported yet"); + Err(ChunkReadingError::InvalidHeader) + } + _ => Ok(()), + } + } + fn from_bytes(bytes: &[u8]) -> Self { + let mut buf = bytes; + + LinearFileHeader { + version: buf.get_u8().into(), + newest_timestamp: buf.get_u64(), + compression_level: buf.get_u8(), + chunks_count: buf.get_u16(), + chunks_bytes: buf.get_u32(), + region_hash: buf.get_u64(), + } + } + + fn to_bytes(&self) -> [u8; Self::FILE_HEADER_SIZE] { + let mut bytes: Vec = Vec::with_capacity(LinearFileHeader::FILE_HEADER_SIZE); + + bytes.put_u8(self.version as u8); + bytes.put_u64(self.newest_timestamp); + bytes.put_u8(self.compression_level); + bytes.put_u16(self.chunks_count); + bytes.put_u32(self.chunks_bytes); + bytes.put_u64(self.region_hash); + + // This should be a clear code error if the size of the header is not the expected + // so we can unwrap the conversion safely or panic the entire program if not + bytes + .try_into() + .unwrap_or_else(|_| panic!("Header Struct/Size Mismatch")) + } +} + +impl LinearFile { + fn new() -> Self { + LinearFile { + chunks_headers: Box::new([LinearChunkHeader::default(); CHUNK_COUNT]), + chunks_data: vec![], + } + } + fn check_signature(file: &mut File) -> Result<(), ChunkReadingError> { + let mut signature = [0; 8]; + + file.seek(SeekFrom::Start(0)) + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; //seek to the start of the file + file.read_exact(&mut signature) + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; + if signature != SIGNATURE { + error!("Signature at the start of the file is invalid"); + return Err(ChunkReadingError::InvalidHeader); + } + + file.seek(SeekFrom::End(-8)) + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; //seek to the end of the file + file.read_exact(&mut signature) + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; + if signature != SIGNATURE { + error!("Signature at the end of the file is invalid"); + return Err(ChunkReadingError::InvalidHeader); + } + + file.rewind() + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; //rewind the file + + Ok(()) + } + + fn load(path: &Path) -> Result { + let mut file = OpenOptions::new() + .read(true) + .truncate(false) + .open(path) + .map_err(|err| match err.kind() { + std::io::ErrorKind::NotFound => ChunkReadingError::ChunkNotExist, + kind => ChunkReadingError::IoError(kind), + })?; + + Self::check_signature(&mut file)?; + + // Skip the signature and read the header + let mut header_bytes = [0; LinearFileHeader::FILE_HEADER_SIZE]; + file.seek(SeekFrom::Start(8)) + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; + file.read_exact(&mut header_bytes) + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; + + // Parse the header + let file_header = LinearFileHeader::from_bytes(&header_bytes); + file_header.check_version()?; + + // Read the compressed data + let mut compressed_data = vec![0; file_header.chunks_bytes as usize]; + file.read_exact(compressed_data.as_mut_slice()) + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; + + if compressed_data.len() != file_header.chunks_bytes as usize { + error!( + "Invalid compressed data size {} != {}", + compressed_data.len(), + file_header.chunks_bytes + ); + return Err(ChunkReadingError::InvalidHeader); + } + + // Uncompress the data (header + chunks) + let buffer = zstd::decode_all(compressed_data.as_slice()) + .map_err(|err| ChunkReadingError::IoError(err.kind()))?; + + let (headers_buffer, chunks_buffer) = + buffer.split_at(LinearChunkHeader::CHUNK_HEADER_SIZE * CHUNK_COUNT); + + // Parse the chunk headers + let chunk_headers: [LinearChunkHeader; CHUNK_COUNT] = headers_buffer + .chunks_exact(8) + .map(LinearChunkHeader::from_bytes) + .collect::>() + .try_into() + .map_err(|_| ChunkReadingError::InvalidHeader)?; + + // Check if the total bytes of the chunks match the header + let total_bytes = chunk_headers.iter().map(|header| header.size).sum::() as usize; + if chunks_buffer.len() != total_bytes { + error!( + "Invalid total bytes of the chunks {} != {}", + total_bytes, + chunks_buffer.len(), + ); + return Err(ChunkReadingError::InvalidHeader); + } + + Ok(LinearFile { + chunks_headers: Box::new(chunk_headers), + chunks_data: chunks_buffer.to_vec(), + }) + } + + fn save(&self, path: &Path) -> Result<(), ChunkWritingError> { + // Parse the headers to a buffer + let headers_buffer: Vec = self + .chunks_headers + .as_ref() + .iter() + .flat_map(|header| header.to_bytes()) + .collect(); + + // Compress the data buffer + let compressed_buffer = zstd::encode_all( + [headers_buffer.as_slice(), self.chunks_data.as_slice()] + .concat() + .as_slice(), + ADVANCED_CONFIG.chunk.compression.level as i32, + ) + .map_err(|err| ChunkWritingError::Compression(CompressionError::ZstdError(err)))?; + + // Update the header + let file_header = LinearFileHeader { + chunks_bytes: compressed_buffer.len() as u32, + compression_level: ADVANCED_CONFIG.chunk.compression.level as u8, + chunks_count: self + .chunks_headers + .iter() + .filter(|&header| header.size != 0) + .count() as u16, + newest_timestamp: self + .chunks_headers + .iter() + .map(|header| header.timestamp) + .max() + .unwrap_or(0) as u64, + version: LinearVersion::V1, + region_hash: 0, + } + .to_bytes(); + + // Write/OverWrite the data to the file + let mut file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(path) + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + + file.write_all( + [ + SIGNATURE.as_slice(), + file_header.as_slice(), + compressed_buffer.as_slice(), + SIGNATURE.as_slice(), + ] + .concat() + .as_slice(), + ) + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + + Ok(()) + } + + fn get_chunk( + &self, + at: &pumpkin_util::math::vector2::Vector2, + ) -> Result { + // We check if the chunk exists + let chunk_index: usize = LinearChunkFormat::get_chunk_index(at); + + let chunk_size = self.chunks_headers[chunk_index].size as usize; + if chunk_size == 0 { + return Err(ChunkReadingError::ChunkNotExist); + } + + // We iterate over the headers to sum the size of the chunks until the desired one + let mut offset: usize = 0; + for i in 0..chunk_index { + offset += self.chunks_headers[i].size as usize; + } + + ChunkData::from_bytes(&self.chunks_data[offset..offset + chunk_size], *at) + .map_err(ChunkReadingError::ParsingError) + } + + fn put_chunk( + &mut self, + chunk: &ChunkData, + at: &pumpkin_util::math::vector2::Vector2, + ) -> Result<(), ChunkSerializingError> { + let chunk_index: usize = LinearChunkFormat::get_chunk_index(at); + let chunk_raw = AnvilChunkFormat {} //We use Anvil format to serialize the chunk + .to_bytes(chunk)?; + + let new_chunk_size = chunk_raw.len(); + let old_chunk_size = self.chunks_headers[chunk_index].size as usize; + + self.chunks_headers[chunk_index] = LinearChunkHeader { + size: new_chunk_size as u32, + timestamp: SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() as u32, + }; + + // We calculate the start point of the chunk in the data buffer + let mut offset: usize = 0; + for i in 0..chunk_index { + offset += self.chunks_headers[i].size as usize; + } + + let old_total_size = self.chunks_data.len(); + let new_total_size = (old_total_size + new_chunk_size) - old_chunk_size; + + // We update the data buffer (avoiding reallocations) + if new_chunk_size > old_chunk_size { + self.chunks_data.resize(new_total_size, 0); + } + + self.chunks_data.copy_within( + offset + old_chunk_size..old_total_size, + offset + new_chunk_size, + ); + + self.chunks_data[offset..offset + new_chunk_size].copy_from_slice(&chunk_raw); + + if new_chunk_size < old_chunk_size { + self.chunks_data.truncate(new_total_size); + } + + Ok(()) + } +} + +impl LinearChunkFormat { + const fn get_region_coords(at: &pumpkin_util::math::vector2::Vector2) -> (i32, i32) { + (at.x >> SUBREGION_BITS, at.z >> SUBREGION_BITS) // Divide by 32 for the region coordinates + } + + const fn get_chunk_index(at: &pumpkin_util::math::vector2::Vector2) -> usize { + // we need only the 5 last bits of the x and z coordinates + let decode_x = at.x - ((at.x >> SUBREGION_BITS) << SUBREGION_BITS); + let decode_z = at.z - ((at.z >> SUBREGION_BITS) << SUBREGION_BITS); + + // we calculate the index of the chunk in the region file + ((decode_z << SUBREGION_BITS) + decode_x) as usize + } +} + +impl ChunkReader for LinearChunkFormat { + fn read_chunk( + &self, + save_file: &LevelFolder, + at: &pumpkin_util::math::vector2::Vector2, + ) -> Result { + let (region_x, region_z) = LinearChunkFormat::get_region_coords(at); + + let path = save_file + .region_folder + .join(format!("./r.{}.{}.linear", region_x, region_z)); + + tokio::task::block_in_place(|| { + let _reader_guard = FILE_LOCK_MANAGER.get_read_guard(&path); + //dbg!("Reading chunk at {:?}", at); + LinearFile::load(&path)?.get_chunk(at) + }) + } +} + +impl ChunkWriter for LinearChunkFormat { + fn write_chunk( + &self, + chunk: &ChunkData, + level_folder: &LevelFolder, + at: &pumpkin_util::math::vector2::Vector2, + ) -> Result<(), ChunkWritingError> { + let (region_x, region_z) = LinearChunkFormat::get_region_coords(at); + + let path = level_folder + .region_folder + .join(format!("./r.{}.{}.linear", region_x, region_z)); + + tokio::task::block_in_place(|| { + let _writer_guard = FILE_LOCK_MANAGER.get_write_guard(&path); + //dbg!("Writing chunk at {:?}", at); + + let mut file_data = match LinearFile::load(&path) { + Ok(file_data) => file_data, + Err(ChunkReadingError::ChunkNotExist) => LinearFile::new(), + Err(ChunkReadingError::IoError(err)) => { + error!("Error reading the data before write: {}", err); + return Err(ChunkWritingError::IoError(err)); + } + Err(_) => return Err(ChunkWritingError::IoError(std::io::ErrorKind::Other)), + }; + + file_data + .put_chunk(chunk, at) + .map_err(|err| ChunkWritingError::ChunkSerializingError(err.to_string()))?; + + file_data.save(&path) + }) + } +} + +#[cfg(test)] +mod tests { + use pumpkin_util::math::vector2::Vector2; + use std::fs; + use std::path::PathBuf; + + use crate::chunk::ChunkWriter; + use crate::generation::{get_world_gen, Seed}; + use crate::{ + chunk::{linear::LinearChunkFormat, ChunkReader, ChunkReadingError}, + level::LevelFolder, + }; + + #[test] + fn not_existing() { + let region_path = PathBuf::from("not_existing"); + let result = LinearChunkFormat.read_chunk( + &LevelFolder { + root_folder: PathBuf::from(""), + region_folder: region_path, + }, + &Vector2::new(0, 0), + ); + assert!(matches!(result, Err(ChunkReadingError::ChunkNotExist))); + } + + #[test] + fn test_writing() { + let generator = get_world_gen(Seed(0)); + let level_folder = LevelFolder { + root_folder: PathBuf::from("./tmp_Linear"), + region_folder: PathBuf::from("./tmp_Linear/region"), + }; + if fs::exists(&level_folder.root_folder).unwrap() { + fs::remove_dir_all(&level_folder.root_folder).expect("Could not delete directory"); + } + + fs::create_dir_all(&level_folder.region_folder).expect("Could not create directory"); + + // Generate chunks + let mut chunks = vec![]; + for x in -5..5 { + for y in -5..5 { + let position = Vector2::new(x, y); + chunks.push((position, generator.generate_chunk(position))); + } + } + + for i in 0..5 { + println!("Iteration {}", i + 1); + for (at, chunk) in &chunks { + LinearChunkFormat + .write_chunk(chunk, &level_folder, at) + .expect("Failed to write chunk"); + } + + let mut read_chunks = vec![]; + for (at, _chunk) in &chunks { + read_chunks.push( + LinearChunkFormat + .read_chunk(&level_folder, at) + .expect("Could not read chunk"), + ); + } + + for (at, chunk) in &chunks { + let read_chunk = read_chunks + .iter() + .find(|chunk| chunk.position == *at) + .expect("Missing chunk"); + assert_eq!(chunk.subchunks, read_chunk.subchunks, "Chunks don't match"); + } + } + + fs::remove_dir_all(&level_folder.root_folder).expect("Could not delete directory"); + + println!("Checked chunks successfully"); + } +} diff --git a/pumpkin-world/src/chunk/mod.rs b/pumpkin-world/src/chunk/mod.rs index 1da137069..7c2b1abbe 100644 --- a/pumpkin-world/src/chunk/mod.rs +++ b/pumpkin-world/src/chunk/mod.rs @@ -1,8 +1,17 @@ +use dashmap::{ + mapref::one::{Ref, RefMut}, + DashMap, +}; use fastnbt::LongArray; use pumpkin_data::chunk::ChunkStatus; use pumpkin_util::math::{ceil_log2, vector2::Vector2}; use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, iter::repeat_with}; +use std::{ + collections::HashMap, + iter::repeat_with, + path::{Path, PathBuf}, + sync::{Arc, LazyLock}, +}; use thiserror::Error; use crate::{ @@ -13,12 +22,16 @@ use crate::{ }; pub mod anvil; +pub mod linear; pub const CHUNK_AREA: usize = 16 * 16; pub const SUBCHUNK_VOLUME: usize = CHUNK_AREA * 16; pub const SUBCHUNKS_COUNT: usize = WORLD_HEIGHT / 16; pub const CHUNK_VOLUME: usize = CHUNK_AREA * WORLD_HEIGHT; +/// File locks manager to prevent multiple threads from writing to the same file at the same time +/// but allowing multiple threads to read from the same file at the same time. +static FILE_LOCK_MANAGER: LazyLock> = LazyLock::new(Arc::default); pub trait ChunkReader: Sync + Send { fn read_chunk( &self, @@ -72,6 +85,33 @@ pub enum CompressionError { GZipError(std::io::Error), #[error("Error while working with LZ4 compression: {0}")] LZ4Error(std::io::Error), + #[error("Error while working with zstd compression: {0}")] + ZstdError(std::io::Error), +} + +/// A guard that allows reading from a file while preventing writing to it +/// This is used to prevent writes while a read is in progress. +/// (dont suffer for "write starvation" problem) +/// +/// When the guard is dropped, the file is unlocked. +pub struct FileReadGuard<'a> { + _guard: Ref<'a, PathBuf, ()>, +} + +/// A guard that allows writing to a file while preventing reading from it +/// This is used to prevent multiple threads from writing to the same file at the same time. +/// (dont suffer for "write starvation" problem) +/// +/// When the guard is dropped, the file is unlocked. +pub struct FileWriteGuard<'a> { + _guard: RefMut<'a, PathBuf, ()>, +} + +/// Central File Lock Manager for chunk files +/// This is used to prevent multiple threads from writing to the same file at the same time +#[derive(Clone, Default)] +pub struct FileLocksManager { + locks: DashMap, } #[derive(Clone)] @@ -163,6 +203,32 @@ struct ChunkNbt { heightmaps: ChunkHeightmaps, } +impl FileLocksManager { + pub fn get_read_guard(&self, path: &Path) -> FileReadGuard { + if let Some(lock) = self.locks.get(path) { + FileReadGuard { _guard: lock } + } else { + FileReadGuard { + _guard: self + .locks + .entry(path.to_path_buf()) + .or_insert(()) + .downgrade(), + } + } + } + + pub fn get_write_guard(&self, path: &Path) -> FileWriteGuard { + FileWriteGuard { + _guard: self.locks.entry(path.to_path_buf()).or_insert(()), + } + } + + pub fn remove_file_lock(path: &Path) { + FILE_LOCK_MANAGER.locks.remove(path); + } +} + /// The Heightmap for a completely empty chunk impl Default for ChunkHeightmaps { fn default() -> Self { diff --git a/pumpkin-world/src/level.rs b/pumpkin-world/src/level.rs index 3c65aacd9..7d8f09e79 100644 --- a/pumpkin-world/src/level.rs +++ b/pumpkin-world/src/level.rs @@ -2,6 +2,7 @@ use std::{path::PathBuf, sync::Arc}; use dashmap::{DashMap, Entry}; use num_traits::Zero; +use pumpkin_config::{chunk::ChunkFormat, ADVANCED_CONFIG}; use pumpkin_util::math::vector2::Vector2; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use tokio::{ @@ -11,8 +12,8 @@ use tokio::{ use crate::{ chunk::{ - anvil::AnvilChunkFormat, ChunkData, ChunkParsingError, ChunkReader, ChunkReadingError, - ChunkWriter, + anvil::AnvilChunkFormat, linear::LinearChunkFormat, ChunkData, ChunkParsingError, + ChunkReader, ChunkReadingError, ChunkWriter, }, generation::{get_world_gen, Seed, WorldGenerator}, lock::{anvil::AnvilLevelLocker, LevelLocker}, @@ -72,13 +73,19 @@ impl Level { let seed = Seed(level_info.world_gen_settings.seed as u64); let world_gen = get_world_gen(seed).into(); + let chunk_format: (Arc, Arc) = + match ADVANCED_CONFIG.chunk.format { + ChunkFormat::Anvil => (Arc::new(AnvilChunkFormat), Arc::new(AnvilChunkFormat)), + ChunkFormat::Linear => (Arc::new(LinearChunkFormat), Arc::new(LinearChunkFormat)), + }; + Self { seed, world_gen, world_info_writer: Arc::new(AnvilLevelInfo), level_folder, - chunk_reader: Arc::new(AnvilChunkFormat), - chunk_writer: Arc::new(AnvilChunkFormat), + chunk_reader: chunk_format.0, + chunk_writer: chunk_format.1, loaded_chunks: Arc::new(DashMap::new()), chunk_watchers: Arc::new(DashMap::new()), level_info,