azalea_client/plugins/
task_pool.rs

1//! Borrowed from `bevy_core`.
2
3use std::marker::PhantomData;
4
5use bevy_app::{App, Last, Plugin};
6use bevy_ecs::prelude::*;
7use bevy_tasks::{
8    AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool, TaskPoolBuilder,
9    tick_global_task_pools_on_main_thread,
10};
11
12/// Setup of default task pools: `AsyncComputeTaskPool`, `ComputeTaskPool`,
13/// `IoTaskPool`.
14#[derive(Default)]
15pub struct TaskPoolPlugin {
16    /// Options for the [`TaskPool`](bevy_tasks::TaskPool) created at
17    /// application start.
18    pub task_pool_options: TaskPoolOptions,
19}
20
21impl Plugin for TaskPoolPlugin {
22    fn build(&self, app: &mut App) {
23        // Setup the default bevy task pools
24        self.task_pool_options.create_default_pools();
25
26        #[cfg(not(target_arch = "wasm32"))]
27        app.add_systems(Last, tick_global_task_pools);
28    }
29}
30
31pub struct NonSendMarker(PhantomData<*mut ()>);
32#[cfg(not(target_arch = "wasm32"))]
33fn tick_global_task_pools(_main_thread_marker: Option<NonSend<NonSendMarker>>) {
34    tick_global_task_pools_on_main_thread();
35}
36
37/// Helper for configuring and creating the default task pools.
38///
39/// For end-users who want full control, set up [`TaskPoolPlugin`]
40#[derive(Clone, Resource)]
41pub struct TaskPoolOptions {
42    /// If the number of physical cores is less than min_total_threads, force
43    /// using min_total_threads
44    pub min_total_threads: usize,
45    /// If the number of physical cores is greater than max_total_threads, force
46    /// using max_total_threads
47    pub max_total_threads: usize,
48
49    /// Used to determine number of IO threads to allocate
50    pub io: TaskPoolThreadAssignmentPolicy,
51    /// Used to determine number of async compute threads to allocate
52    pub async_compute: TaskPoolThreadAssignmentPolicy,
53    /// Used to determine number of compute threads to allocate
54    pub compute: TaskPoolThreadAssignmentPolicy,
55}
56
57impl Default for TaskPoolOptions {
58    fn default() -> Self {
59        TaskPoolOptions {
60            // By default, use however many cores are available on the system
61            min_total_threads: 1,
62            max_total_threads: usize::MAX,
63
64            // Use 25% of cores for IO, at least 1, no more than 4
65            io: TaskPoolThreadAssignmentPolicy {
66                min_threads: 1,
67                max_threads: 4,
68                percent: 0.25,
69            },
70
71            // Use 25% of cores for async compute, at least 1, no more than 4
72            async_compute: TaskPoolThreadAssignmentPolicy {
73                min_threads: 1,
74                max_threads: 4,
75                percent: 0.25,
76            },
77
78            // Use all remaining cores for compute (at least 1)
79            compute: TaskPoolThreadAssignmentPolicy {
80                min_threads: 1,
81                max_threads: usize::MAX,
82                percent: 1.0, // This 1.0 here means "whatever is left over"
83            },
84        }
85    }
86}
87
88impl TaskPoolOptions {
89    /// Inserts the default thread pools into the given resource map based on
90    /// the configured values
91    pub fn create_default_pools(&self) {
92        let total_threads = bevy_tasks::available_parallelism()
93            .clamp(self.min_total_threads, self.max_total_threads);
94
95        let mut remaining_threads = total_threads;
96
97        {
98            // Determine the number of IO threads we will use
99            let io_threads = self
100                .io
101                .get_number_of_threads(remaining_threads, total_threads);
102
103            remaining_threads = remaining_threads.saturating_sub(io_threads);
104
105            IoTaskPool::get_or_init(|| {
106                TaskPoolBuilder::default()
107                    .num_threads(io_threads)
108                    .thread_name("IO Task Pool".to_string())
109                    .build()
110            });
111        }
112
113        {
114            // Determine the number of async compute threads we will use
115            let async_compute_threads = self
116                .async_compute
117                .get_number_of_threads(remaining_threads, total_threads);
118
119            remaining_threads = remaining_threads.saturating_sub(async_compute_threads);
120
121            AsyncComputeTaskPool::get_or_init(|| {
122                TaskPoolBuilder::default()
123                    .num_threads(async_compute_threads)
124                    .thread_name("Async Compute Task Pool".to_string())
125                    .build()
126            });
127        }
128
129        {
130            // Determine the number of compute threads we will use
131            // This is intentionally last so that an end user can specify 1.0 as the percent
132            let compute_threads = self
133                .compute
134                .get_number_of_threads(remaining_threads, total_threads);
135
136            ComputeTaskPool::get_or_init(|| {
137                TaskPoolBuilder::default()
138                    .num_threads(compute_threads)
139                    .thread_name("Compute Task Pool".to_string())
140                    .build()
141            });
142        }
143    }
144}
145
146/// Defines a simple way to determine how many threads to use given the number
147/// of remaining cores and number of total cores
148#[derive(Clone)]
149pub struct TaskPoolThreadAssignmentPolicy {
150    /// Force using at least this many threads
151    pub min_threads: usize,
152    /// Under no circumstance use more than this many threads for this pool
153    pub max_threads: usize,
154    /// Target using this percentage of total cores, clamped by min_threads and
155    /// max_threads.
156    ///
157    /// It is permitted to use 1.0 to try to use all remaining
158    /// threads
159    pub percent: f32,
160}
161
162impl TaskPoolThreadAssignmentPolicy {
163    /// Determine the number of threads to use for this task pool
164    fn get_number_of_threads(&self, remaining_threads: usize, total_threads: usize) -> usize {
165        assert!(self.percent >= 0.0);
166        let mut desired = (total_threads as f32 * self.percent).round() as usize;
167
168        // Limit ourselves to the number of cores available
169        desired = desired.min(remaining_threads);
170
171        // Clamp by min_threads, max_threads. (This may result in us using more threads
172        // than are available, this is intended. An example case where this
173        // might happen is a device with <= 2 threads.
174        desired.clamp(self.min_threads, self.max_threads)
175    }
176}