azalea/pathfinder/
world.rs

1use core::f32;
2use std::{
3    array,
4    cell::{RefCell, UnsafeCell},
5    mem,
6    sync::Arc,
7};
8
9use azalea_block::{BlockState, properties};
10use azalea_core::{
11    bitset::FastFixedBitSet,
12    position::{BlockPos, ChunkPos, ChunkSectionBlockPos},
13};
14use azalea_physics::collision::BlockWithShape;
15use azalea_registry::{builtin::BlockKind, tags};
16use azalea_world::{World, palette::PalettedContainer};
17use parking_lot::RwLock;
18use rustc_hash::FxHashMap;
19
20use super::{mining::MiningCache, positions::RelBlockPos};
21use crate::pathfinder::positions::SmallChunkSectionPos;
22
23const MAX_VIEW_DISTANCE: usize = 32;
24
25/// An efficient representation of the world used for the pathfinder.
26pub struct CachedWorld {
27    /// The origin that the [`RelBlockPos`] types will be relative to.
28    ///
29    /// This is for an optimization that reduces the size of the block positions
30    /// that are used by the pathfinder.
31    origin: BlockPos,
32
33    min_y: i32,
34    world_lock: Arc<RwLock<World>>,
35
36    // we use the bounded cache by default and then switch if it gets too big
37    bounded_chunk_cache: RefCell<[(ChunkPos, CachedChunk); MAX_VIEW_DISTANCE * MAX_VIEW_DISTANCE]>,
38    unbounded_chunk_cache: RefCell<FxHashMap<ChunkPos, CachedChunk>>,
39
40    cached_blocks: UnsafeCell<CachedSections>,
41
42    #[allow(clippy::type_complexity)]
43    cached_mining_costs: UnsafeCell<Option<Box<[(RelBlockPos, f32)]>>>,
44}
45
46// we store `PalettedContainer`s instead of `Chunk`s or `Section`s because it
47// doesn't contain any unnecessary data like heightmaps or biomes.
48type CachedChunk = Box<[PalettedContainer<BlockState>]>;
49
50#[derive(Default)]
51pub struct CachedSections {
52    pub last_index: usize,
53    pub second_last_index: usize,
54    pub sections: Vec<CachedSection>,
55}
56
57impl CachedSections {
58    #[inline]
59    pub fn get_mut(&mut self, pos: SmallChunkSectionPos) -> Option<&mut CachedSection> {
60        if let Some(last_item) = self.sections.get(self.last_index) {
61            if last_item.pos == pos {
62                return Some(&mut self.sections[self.last_index]);
63            } else if let Some(second_last_item) = self.sections.get(self.second_last_index)
64                && second_last_item.pos == pos
65            {
66                return Some(&mut self.sections[self.second_last_index]);
67            }
68        }
69
70        let index = self
71            .sections
72            .binary_search_by(|section| section.pos.cmp(&pos))
73            .ok();
74
75        if let Some(index) = index {
76            self.second_last_index = self.last_index;
77            self.last_index = index;
78            return Some(&mut self.sections[index]);
79        }
80        None
81    }
82
83    #[inline]
84    pub fn insert(&mut self, section: CachedSection) {
85        // self.sections.push(section);
86        // self.sections.sort_unstable_by(|a, b| a.pos.cmp(&b.pos));
87        let index = self
88            .sections
89            .binary_search_by(|s| s.pos.cmp(&section.pos))
90            .unwrap_or_else(|e| e);
91        self.sections.insert(index, section);
92    }
93}
94
95pub struct CachedSection {
96    pub pos: SmallChunkSectionPos,
97    pub bitsets: Box<SectionBitsets>,
98}
99#[derive(Default)]
100pub struct SectionBitsets {
101    /// Blocks that we can fully pass through (like air).
102    pub passable: FastFixedBitSet<4096>,
103    /// Blocks that we can stand on and do parkour from.
104    pub solid: FastFixedBitSet<4096>,
105    /// Blocks that we can stand on but might not be able to parkour from.
106    pub standable: FastFixedBitSet<4096>,
107    /// Water source blocks.
108    pub water: FastFixedBitSet<4096>,
109}
110
111impl CachedWorld {
112    pub fn new(world_lock: Arc<RwLock<World>>, origin: BlockPos) -> Self {
113        let min_y = world_lock.read().chunks.min_y;
114        Self {
115            origin,
116            min_y,
117            world_lock,
118            bounded_chunk_cache: RefCell::new(array::from_fn(|_| {
119                (ChunkPos::new(i32::MAX, i32::MAX), Default::default())
120            })),
121            unbounded_chunk_cache: Default::default(),
122            cached_blocks: Default::default(),
123            cached_mining_costs: UnsafeCell::new(None),
124        }
125    }
126
127    // ```
128    // fn get_block_state(&self, pos: BlockPos) -> Option<BlockState> {
129    //     self.with_section(ChunkSectionPos::from(pos), |section| {
130    //         let state = section.get(pos.x as usize, pos.y as usize, pos.z as usize);
131    //         BlockState::try_from(state).unwrap_or(BlockState::AIR)
132    //     })
133    // }
134    // ```
135
136    fn with_section<T>(
137        &self,
138        section_pos: SmallChunkSectionPos,
139        f: impl FnOnce(&azalea_world::palette::PalettedContainer<BlockState>) -> T,
140    ) -> Option<T> {
141        if section_pos.y * 16 < self.min_y {
142            // y position is out of bounds
143            return None;
144        }
145
146        let chunk_pos = ChunkPos::new(section_pos.x as i32, section_pos.z as i32);
147        let section_index =
148            azalea_world::chunk_storage::section_index(section_pos.y * 16, self.min_y) as usize;
149
150        let mut cache_idx = 0;
151
152        let mut unbounded_chunk_cache = self.unbounded_chunk_cache.borrow_mut();
153        let mut bounded_chunk_cache = self.bounded_chunk_cache.borrow_mut();
154        if unbounded_chunk_cache.is_empty() {
155            const D: i32 = MAX_VIEW_DISTANCE as i32;
156            let cache_x = i32::rem_euclid(chunk_pos.x, D) * D;
157            let cache_z = i32::rem_euclid(chunk_pos.z, D);
158            cache_idx = (cache_x + cache_z) as usize;
159
160            // get section from cache
161            if !bounded_chunk_cache[cache_idx].1.is_empty() {
162                if bounded_chunk_cache[cache_idx].0 != chunk_pos {
163                    // switch to the unbounded cache :(
164
165                    for (moving_chunk_pos, moving_chunk) in bounded_chunk_cache.iter_mut() {
166                        if !moving_chunk.is_empty() {
167                            unbounded_chunk_cache
168                                .insert(*moving_chunk_pos, mem::take(moving_chunk));
169                        }
170                    }
171                }
172
173                let sections = &bounded_chunk_cache[cache_idx].1;
174                if section_index >= sections.len() {
175                    // y position is out of bounds
176                    return None;
177                };
178                let section = &sections[section_index];
179                return Some(f(section));
180            }
181        } else if let Some(sections) = unbounded_chunk_cache.get(&chunk_pos) {
182            if section_index >= sections.len() {
183                // y position is out of bounds
184                return None;
185            };
186            let section = &sections[section_index];
187            return Some(f(section));
188        }
189
190        let world = self.world_lock.read();
191        let chunk = world.chunks.get(&chunk_pos)?;
192        let chunk = chunk.read();
193
194        let sections = chunk
195            .sections
196            .iter()
197            .map(|section| section.states.clone())
198            .collect::<Box<[PalettedContainer<BlockState>]>>();
199
200        if section_index >= sections.len() {
201            // y position is out of bounds
202            return None;
203        };
204
205        let section = &sections[section_index];
206        let r = f(section);
207
208        // add the sections to the chunk cache
209        if unbounded_chunk_cache.is_empty() {
210            bounded_chunk_cache[cache_idx] = (chunk_pos, sections);
211        } else {
212            unbounded_chunk_cache.insert(chunk_pos, sections);
213        }
214
215        Some(r)
216    }
217
218    fn calculate_bitsets_for_section(&self, section_pos: SmallChunkSectionPos) -> CachedSection {
219        let bitsets = self
220            .with_section(section_pos, |section| {
221                let mut passable_bitset = FastFixedBitSet::<4096>::new();
222                let mut solid_bitset = FastFixedBitSet::<4096>::new();
223                let mut standable_bitset = FastFixedBitSet::<4096>::new();
224                let mut water_bitset = FastFixedBitSet::<4096>::new();
225
226                for i in 0..4096 {
227                    let block_state = section.get_at_index(i);
228                    if is_block_state_passable(block_state) {
229                        passable_bitset.set(i);
230                    }
231                    if is_block_state_solid(block_state) {
232                        solid_bitset.set(i);
233                    }
234                    if is_block_state_standable(block_state) {
235                        standable_bitset.set(i);
236                    }
237                    if is_block_state_water(block_state) {
238                        water_bitset.set(i);
239                    }
240                }
241                Box::new(SectionBitsets {
242                    passable: passable_bitset,
243                    solid: solid_bitset,
244                    standable: standable_bitset,
245                    water: water_bitset,
246                })
247            })
248            .unwrap_or_default();
249
250        CachedSection {
251            pos: section_pos,
252            bitsets,
253        }
254    }
255
256    fn check_bitset_for_block(
257        &self,
258        pos: BlockPos,
259        cb: impl FnOnce(&SectionBitsets, usize) -> bool,
260    ) -> bool {
261        let (section_pos, section_block_pos) = (
262            SmallChunkSectionPos::from(pos),
263            ChunkSectionBlockPos::from(pos),
264        );
265        let index = u16::from(section_block_pos) as usize;
266        // SAFETY: we're only accessing this from one thread
267        let cached_blocks = unsafe { &mut *self.cached_blocks.get() };
268        if let Some(cached) = cached_blocks.get_mut(section_pos) {
269            return cb(&cached.bitsets, index);
270        }
271
272        let cached = self.calculate_bitsets_for_section(section_pos);
273        let passable = cb(&cached.bitsets, index);
274        cached_blocks.insert(cached);
275        passable
276    }
277
278    pub fn is_block_passable(&self, pos: RelBlockPos) -> bool {
279        self.is_block_pos_passable(pos.apply(self.origin))
280    }
281    fn is_block_pos_passable(&self, pos: BlockPos) -> bool {
282        self.check_bitset_for_block(pos, |bitsets, index| bitsets.passable.index(index))
283    }
284
285    pub fn is_block_water(&self, pos: RelBlockPos) -> bool {
286        self.is_block_pos_water(pos.apply(self.origin))
287    }
288    fn is_block_pos_water(&self, pos: BlockPos) -> bool {
289        self.check_bitset_for_block(pos, |bitsets, index| bitsets.water.index(index))
290    }
291
292    /// Get the block state at the given position.
293    ///
294    /// This is relatively slow, so you should avoid it whenever possible.
295    pub fn get_block_state(&self, pos: RelBlockPos) -> BlockState {
296        self.get_block_state_at_pos(pos.apply(self.origin))
297    }
298
299    fn get_block_state_at_pos(&self, pos: BlockPos) -> BlockState {
300        let (section_pos, section_block_pos) = (
301            SmallChunkSectionPos::from(pos),
302            ChunkSectionBlockPos::from(pos),
303        );
304        let index = u16::from(section_block_pos) as usize;
305
306        self.with_section(section_pos, |section| section.get_at_index(index))
307            .unwrap_or_default()
308    }
309
310    pub fn is_block_solid(&self, pos: RelBlockPos) -> bool {
311        self.is_block_pos_solid(pos.apply(self.origin))
312    }
313    pub fn is_block_standable(&self, pos: RelBlockPos) -> bool {
314        self.is_block_pos_standable(pos.apply(self.origin))
315    }
316
317    fn is_block_pos_solid(&self, pos: BlockPos) -> bool {
318        self.check_bitset_for_block(pos, |bitsets, index| bitsets.solid.index(index))
319    }
320    fn is_block_pos_standable(&self, pos: BlockPos) -> bool {
321        self.check_bitset_for_block(pos, |bitsets, index| bitsets.standable.index(index))
322    }
323
324    /// Returns how much it costs to break this block.
325    ///
326    /// Returns 0 if the block is already passable.
327    pub fn cost_for_breaking_block(&self, pos: RelBlockPos, mining_cache: &MiningCache) -> f32 {
328        let cached_mining_costs = self.cached_mining_costs();
329
330        let hash_index = calculate_cached_mining_costs_index(pos);
331        let &(cached_pos, potential_cost) =
332            unsafe { cached_mining_costs.get_unchecked(hash_index) };
333        if cached_pos == pos {
334            return potential_cost;
335        }
336
337        let cost = self.uncached_cost_for_breaking_block(pos, mining_cache);
338        unsafe {
339            *cached_mining_costs.get_unchecked_mut(hash_index) = (pos, cost);
340        };
341
342        cost
343    }
344
345    // this is fine because pathfinding is single-threaded
346    #[allow(clippy::mut_from_ref)]
347    fn cached_mining_costs(&self) -> &mut [(RelBlockPos, f32)] {
348        // SAFETY: again, pathfinding is single-threaded
349        let cached_mining_costs = unsafe { &mut *self.cached_mining_costs.get() };
350        if let Some(cached_mining_costs) = cached_mining_costs {
351            return cached_mining_costs;
352        }
353        // delay initialization so we don't have to create this if it's unused
354
355        // this uses about 48mb of memory. it *really* helps though.
356        *cached_mining_costs = Some(
357            vec![(RelBlockPos::new(i16::MAX, i32::MAX, i16::MAX), 0.); CACHED_MINING_COSTS_SIZE]
358                .into(),
359        );
360
361        cached_mining_costs.as_mut().unwrap()
362    }
363
364    fn uncached_cost_for_breaking_block(
365        &self,
366        pos: RelBlockPos,
367        mining_cache: &MiningCache,
368    ) -> f32 {
369        if self.is_block_passable(pos) {
370            // if the block is passable then it doesn't need to be broken
371            return 0.;
372        }
373
374        let rel_pos = pos;
375        let pos = pos.apply(self.origin);
376
377        let (section_pos, section_block_pos) = (
378            SmallChunkSectionPos::from(pos),
379            ChunkSectionBlockPos::from(pos),
380        );
381
382        // we use this as an optimization to avoid getting the section again if the
383        // block is in the same section
384        let up_is_in_same_section = section_block_pos.y != 15;
385        let north_is_in_same_section = section_block_pos.z != 0;
386        let east_is_in_same_section = section_block_pos.x != 15;
387        let south_is_in_same_section = section_block_pos.z != 15;
388        let west_is_in_same_section = section_block_pos.x != 0;
389
390        let mut is_falling_block_above = false;
391
392        let Some(mut mining_cost) = self.with_section(section_pos, |section| {
393            let block_state = section.get_at_index(u16::from(section_block_pos) as usize);
394            let mining_cost = mining_cache.cost_for(block_state);
395
396            if mining_cost == f32::INFINITY {
397                // the block is unbreakable
398                return f32::INFINITY;
399            }
400
401            // if there's a falling block or liquid above this block, abort
402            if up_is_in_same_section {
403                let up_block = section.get_at_index(u16::from(section_block_pos.up(1)) as usize);
404                if mining_cache.is_liquid(up_block) {
405                    return f32::INFINITY;
406                }
407                if mining_cache.is_falling_block(up_block) {
408                    is_falling_block_above = true;
409                }
410            }
411
412            // if there's a liquid to the north of this block, abort
413            if north_is_in_same_section {
414                let north_block =
415                    section.get_at_index(u16::from(section_block_pos.north(1)) as usize);
416                if mining_cache.is_liquid(north_block) {
417                    return f32::INFINITY;
418                }
419            }
420
421            // liquid to the east
422            if east_is_in_same_section {
423                let east_block =
424                    section.get_at_index(u16::from(section_block_pos.east(1)) as usize);
425                if mining_cache.is_liquid(east_block) {
426                    return f32::INFINITY;
427                }
428            }
429
430            // liquid to the south
431            if south_is_in_same_section {
432                let south_block =
433                    section.get_at_index(u16::from(section_block_pos.south(1)) as usize);
434                if mining_cache.is_liquid(south_block) {
435                    return f32::INFINITY;
436                }
437            }
438
439            // liquid to the west
440            if west_is_in_same_section {
441                let west_block =
442                    section.get_at_index(u16::from(section_block_pos.west(1)) as usize);
443                if mining_cache.is_liquid(west_block) {
444                    return f32::INFINITY;
445                }
446            }
447
448            // the block is probably safe to break, we'll have to check the adjacent blocks
449            // that weren't in the same section next though
450            mining_cost
451        }) else {
452            // the chunk isn't loaded
453            let cost = if self.is_block_pos_solid(pos) {
454                // assume it's unbreakable if it's solid and out of render distance
455                f32::INFINITY
456            } else {
457                0.
458            };
459            return cost;
460        };
461
462        if mining_cost == f32::INFINITY {
463            // the block is unbreakable
464            return f32::INFINITY;
465        }
466
467        fn check_should_avoid_this_block(
468            world: &CachedWorld,
469            pos: BlockPos,
470            check: impl FnOnce(BlockState) -> bool,
471        ) -> bool {
472            let block_state = world
473                .with_section(SmallChunkSectionPos::from(pos), |section| {
474                    section.get_at_index(u16::from(ChunkSectionBlockPos::from(pos)) as usize)
475                })
476                .unwrap_or_default();
477            check(block_state)
478        }
479
480        // check the adjacent blocks that weren't in the same section
481        if !up_is_in_same_section
482            && check_should_avoid_this_block(self, pos.up(1), |b| {
483                if mining_cache.is_falling_block(b) {
484                    is_falling_block_above = true;
485                }
486                mining_cache.is_liquid(b)
487            })
488        {
489            return f32::INFINITY;
490        }
491        if !north_is_in_same_section
492            && check_should_avoid_this_block(self, pos.north(1), |b| mining_cache.is_liquid(b))
493        {
494            return f32::INFINITY;
495        }
496        if !east_is_in_same_section
497            && check_should_avoid_this_block(self, pos.east(1), |b| mining_cache.is_liquid(b))
498        {
499            return f32::INFINITY;
500        }
501        if !south_is_in_same_section
502            && check_should_avoid_this_block(self, pos.south(1), |b| mining_cache.is_liquid(b))
503        {
504            return f32::INFINITY;
505        }
506        if !west_is_in_same_section
507            && check_should_avoid_this_block(self, pos.west(1), |b| mining_cache.is_liquid(b))
508        {
509            return f32::INFINITY;
510        }
511
512        if is_falling_block_above {
513            mining_cost += self.cost_for_breaking_block(rel_pos.up(1), mining_cache);
514        }
515
516        mining_cost
517    }
518
519    /// Whether this block and the block above are passable
520    pub fn is_passable(&self, pos: RelBlockPos) -> bool {
521        self.is_passable_at_block_pos(pos.apply(self.origin))
522    }
523    fn is_passable_at_block_pos(&self, pos: BlockPos) -> bool {
524        self.is_block_pos_passable(pos) && self.is_block_pos_passable(pos.up(1))
525    }
526
527    pub fn cost_for_passing(&self, pos: RelBlockPos, mining_cache: &MiningCache) -> f32 {
528        self.cost_for_breaking_block(pos, mining_cache)
529            + self.cost_for_breaking_block(pos.up(1), mining_cache)
530    }
531
532    /// Whether we can stand in this position.
533    ///
534    /// Checks if the block below is solid, and that the two blocks above that
535    /// are passable.
536    pub fn is_standable(&self, pos: RelBlockPos) -> bool {
537        self.is_standable_at_block_pos(pos.apply(self.origin))
538    }
539    fn is_standable_at_block_pos(&self, pos: BlockPos) -> bool {
540        self.is_block_pos_standable(pos.down(1)) && self.is_passable_at_block_pos(pos)
541    }
542
543    pub fn cost_for_standing(&self, pos: RelBlockPos, mining_cache: &MiningCache) -> f32 {
544        if !self.is_block_standable(pos.down(1)) {
545            return f32::INFINITY;
546        }
547        self.cost_for_passing(pos, mining_cache)
548    }
549
550    /// Get the amount of air/passable blocks until the next non-passable block
551    /// below this one.
552    pub fn fall_distance(&self, pos: RelBlockPos) -> u32 {
553        let mut distance = 0;
554        let mut current_pos = pos.down(1);
555        while self.is_block_passable(current_pos) {
556            distance += 1;
557            current_pos = current_pos.down(1);
558
559            if current_pos.y < self.min_y {
560                return u32::MAX;
561            }
562        }
563        distance
564    }
565
566    pub fn origin(&self) -> BlockPos {
567        self.origin
568    }
569}
570
571const CACHED_MINING_COSTS_SIZE: usize = 2usize.pow(22);
572fn calculate_cached_mining_costs_index(pos: RelBlockPos) -> usize {
573    // create a 22-bit index by taking the bottom bits from each axis
574
575    const X_BITS: usize = 8;
576    const Y_BITS: usize = 6;
577    const Z_BITS: usize = 8;
578
579    const X_MASK: usize = (1 << X_BITS) - 1;
580    const Y_MASK: usize = (1 << Y_BITS) - 1;
581    const Z_MASK: usize = (1 << Z_BITS) - 1;
582
583    let hash_index = ((pos.x as usize & X_MASK) << (Y_BITS + Z_BITS))
584        | ((pos.z as usize & Z_MASK) << Y_BITS)
585        | (pos.y as usize & Y_MASK);
586    debug_assert!(hash_index < CACHED_MINING_COSTS_SIZE);
587    hash_index
588}
589
590/// Whether our client could pass through this block.
591pub fn is_block_state_passable(block_state: BlockState) -> bool {
592    // i already tried optimizing this by having it cache in an IntMap/FxHashMap but
593    // it wasn't measurably faster
594
595    if block_state.is_air() {
596        // fast path
597        return true;
598    }
599    if !block_state.is_collision_shape_empty() {
600        return false;
601    }
602    let registry_block = BlockKind::from(block_state);
603    if registry_block == BlockKind::Water {
604        return false;
605    }
606    if block_state
607        .property::<azalea_block::properties::Waterlogged>()
608        .unwrap_or_default()
609    {
610        return false;
611    }
612    if registry_block == BlockKind::Lava {
613        return false;
614    }
615    // block.waterlogged currently doesn't account for seagrass and some other water
616    // blocks
617    if block_state == BlockKind::Seagrass.into() {
618        return false;
619    }
620
621    // don't walk into fire
622    if registry_block == BlockKind::Fire || registry_block == BlockKind::SoulFire {
623        return false;
624    }
625
626    if registry_block == BlockKind::PowderSnow {
627        // we can't jump out of powder snow
628        return false;
629    }
630
631    if registry_block == BlockKind::SweetBerryBush {
632        // these hurt us
633        return false;
634    }
635
636    true
637}
638
639/// Whether this block has a solid hitbox at the top (i.e. we can stand on it
640/// and do parkour from it).
641#[inline]
642pub fn is_block_state_solid(block_state: BlockState) -> bool {
643    if block_state.is_air() {
644        // fast path
645        return false;
646    }
647
648    // hazard
649    if block_state == BlockState::from(BlockKind::MagmaBlock) {
650        return false;
651    };
652
653    if block_state.is_collision_shape_full() {
654        return true;
655    }
656
657    if matches!(
658        block_state.property::<properties::Type>(),
659        Some(properties::Type::Top | properties::Type::Double)
660    ) {
661        // top slabs
662        return true;
663    }
664
665    let block = BlockKind::from(block_state);
666    // solid enough
667    if matches!(block, BlockKind::DirtPath | BlockKind::Farmland) {
668        return true;
669    }
670
671    false
672}
673
674/// Whether we can stand on this block (but not necessarily do parkour jumps
675/// from it).
676pub fn is_block_state_standable(block_state: BlockState) -> bool {
677    if is_block_state_solid(block_state) {
678        return true;
679    }
680
681    let block = BlockKind::from(block_state);
682    if tags::blocks::SLABS.contains(&block) || tags::blocks::STAIRS.contains(&block) {
683        return true;
684    }
685
686    false
687}
688
689pub fn is_block_state_water(block_state: BlockState) -> bool {
690    // only the default blockstate, which is source blocks
691    block_state == BlockState::from(BlockKind::Water)
692}
693
694#[cfg(test)]
695mod tests {
696    use azalea_world::{Chunk, ChunkStorage, PartialWorld};
697
698    use super::*;
699
700    #[test]
701    fn test_is_passable() {
702        let mut partial_world = PartialWorld::default();
703        let mut world = ChunkStorage::default();
704
705        partial_world
706            .chunks
707            .set(&ChunkPos { x: 0, z: 0 }, Some(Chunk::default()), &mut world);
708        partial_world.chunks.set_block_state(
709            BlockPos::new(0, 0, 0),
710            BlockKind::Stone.into(),
711            &world,
712        );
713        partial_world
714            .chunks
715            .set_block_state(BlockPos::new(0, 1, 0), BlockState::AIR, &world);
716
717        let ctx = CachedWorld::new(Arc::new(RwLock::new(world.into())), BlockPos::default());
718        assert!(!ctx.is_block_pos_passable(BlockPos::new(0, 0, 0)));
719        assert!(ctx.is_block_pos_passable(BlockPos::new(0, 1, 0)));
720    }
721
722    #[test]
723    fn test_is_solid() {
724        let mut partial_world = PartialWorld::default();
725        let mut world = ChunkStorage::default();
726        partial_world
727            .chunks
728            .set(&ChunkPos { x: 0, z: 0 }, Some(Chunk::default()), &mut world);
729        partial_world.chunks.set_block_state(
730            BlockPos::new(0, 0, 0),
731            BlockKind::Stone.into(),
732            &world,
733        );
734        partial_world
735            .chunks
736            .set_block_state(BlockPos::new(0, 1, 0), BlockState::AIR, &world);
737
738        let ctx = CachedWorld::new(Arc::new(RwLock::new(world.into())), BlockPos::default());
739        assert!(ctx.is_block_pos_solid(BlockPos::new(0, 0, 0)));
740        assert!(!ctx.is_block_pos_solid(BlockPos::new(0, 1, 0)));
741    }
742
743    #[test]
744    fn test_is_standable() {
745        let mut partial_world = PartialWorld::default();
746        let mut world = ChunkStorage::default();
747        partial_world
748            .chunks
749            .set(&ChunkPos { x: 0, z: 0 }, Some(Chunk::default()), &mut world);
750        partial_world.chunks.set_block_state(
751            BlockPos::new(0, 0, 0),
752            BlockKind::Stone.into(),
753            &world,
754        );
755        partial_world
756            .chunks
757            .set_block_state(BlockPos::new(0, 1, 0), BlockState::AIR, &world);
758        partial_world
759            .chunks
760            .set_block_state(BlockPos::new(0, 2, 0), BlockState::AIR, &world);
761        partial_world
762            .chunks
763            .set_block_state(BlockPos::new(0, 3, 0), BlockState::AIR, &world);
764
765        let ctx = CachedWorld::new(Arc::new(RwLock::new(world.into())), BlockPos::default());
766        assert!(ctx.is_standable_at_block_pos(BlockPos::new(0, 1, 0)));
767        assert!(!ctx.is_standable_at_block_pos(BlockPos::new(0, 0, 0)));
768        assert!(!ctx.is_standable_at_block_pos(BlockPos::new(0, 2, 0)));
769    }
770}