azalea_client/task_pool.rs
1//! Borrowed from `bevy_core`.
2
3use std::marker::PhantomData;
4
5use bevy_app::{App, Last, Plugin};
6use bevy_ecs::system::{NonSend, Resource};
7use bevy_tasks::{
8 tick_global_task_pools_on_main_thread, AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool,
9 TaskPoolBuilder,
10};
11
12/// Setup of default task pools: `AsyncComputeTaskPool`, `ComputeTaskPool`,
13/// `IoTaskPool`.
14#[derive(Default)]
15pub struct TaskPoolPlugin {
16 /// Options for the [`TaskPool`](bevy_tasks::TaskPool) created at
17 /// application start.
18 pub task_pool_options: TaskPoolOptions,
19}
20
21impl Plugin for TaskPoolPlugin {
22 fn build(&self, app: &mut App) {
23 // Setup the default bevy task pools
24 self.task_pool_options.create_default_pools();
25
26 #[cfg(not(target_arch = "wasm32"))]
27 app.add_systems(Last, tick_global_task_pools);
28 }
29}
30
31pub struct NonSendMarker(PhantomData<*mut ()>);
32#[cfg(not(target_arch = "wasm32"))]
33fn tick_global_task_pools(_main_thread_marker: Option<NonSend<NonSendMarker>>) {
34 tick_global_task_pools_on_main_thread();
35}
36
37/// Helper for configuring and creating the default task pools. For end-users
38/// who want full control, set up [`TaskPoolPlugin`]
39#[derive(Clone, Resource)]
40pub struct TaskPoolOptions {
41 /// If the number of physical cores is less than min_total_threads, force
42 /// using min_total_threads
43 pub min_total_threads: usize,
44 /// If the number of physical cores is greater than max_total_threads, force
45 /// using max_total_threads
46 pub max_total_threads: usize,
47
48 /// Used to determine number of IO threads to allocate
49 pub io: TaskPoolThreadAssignmentPolicy,
50 /// Used to determine number of async compute threads to allocate
51 pub async_compute: TaskPoolThreadAssignmentPolicy,
52 /// Used to determine number of compute threads to allocate
53 pub compute: TaskPoolThreadAssignmentPolicy,
54}
55
56impl Default for TaskPoolOptions {
57 fn default() -> Self {
58 TaskPoolOptions {
59 // By default, use however many cores are available on the system
60 min_total_threads: 1,
61 max_total_threads: usize::MAX,
62
63 // Use 25% of cores for IO, at least 1, no more than 4
64 io: TaskPoolThreadAssignmentPolicy {
65 min_threads: 1,
66 max_threads: 4,
67 percent: 0.25,
68 },
69
70 // Use 25% of cores for async compute, at least 1, no more than 4
71 async_compute: TaskPoolThreadAssignmentPolicy {
72 min_threads: 1,
73 max_threads: 4,
74 percent: 0.25,
75 },
76
77 // Use all remaining cores for compute (at least 1)
78 compute: TaskPoolThreadAssignmentPolicy {
79 min_threads: 1,
80 max_threads: usize::MAX,
81 percent: 1.0, // This 1.0 here means "whatever is left over"
82 },
83 }
84 }
85}
86
87impl TaskPoolOptions {
88 // /// Create a configuration that forces using the given number of threads.
89 // pub fn with_num_threads(thread_count: usize) -> Self {
90 // TaskPoolOptions {
91 // min_total_threads: thread_count,
92 // max_total_threads: thread_count,
93 // ..Default::default()
94 // }
95 // }
96
97 /// Inserts the default thread pools into the given resource map based on
98 /// the configured values
99 pub fn create_default_pools(&self) {
100 let total_threads = bevy_tasks::available_parallelism()
101 .clamp(self.min_total_threads, self.max_total_threads);
102
103 let mut remaining_threads = total_threads;
104
105 {
106 // Determine the number of IO threads we will use
107 let io_threads = self
108 .io
109 .get_number_of_threads(remaining_threads, total_threads);
110
111 remaining_threads = remaining_threads.saturating_sub(io_threads);
112
113 IoTaskPool::get_or_init(|| {
114 TaskPoolBuilder::default()
115 .num_threads(io_threads)
116 .thread_name("IO Task Pool".to_string())
117 .build()
118 });
119 }
120
121 {
122 // Determine the number of async compute threads we will use
123 let async_compute_threads = self
124 .async_compute
125 .get_number_of_threads(remaining_threads, total_threads);
126
127 remaining_threads = remaining_threads.saturating_sub(async_compute_threads);
128
129 AsyncComputeTaskPool::get_or_init(|| {
130 TaskPoolBuilder::default()
131 .num_threads(async_compute_threads)
132 .thread_name("Async Compute Task Pool".to_string())
133 .build()
134 });
135 }
136
137 {
138 // Determine the number of compute threads we will use
139 // This is intentionally last so that an end user can specify 1.0 as the percent
140 let compute_threads = self
141 .compute
142 .get_number_of_threads(remaining_threads, total_threads);
143
144 ComputeTaskPool::get_or_init(|| {
145 TaskPoolBuilder::default()
146 .num_threads(compute_threads)
147 .thread_name("Compute Task Pool".to_string())
148 .build()
149 });
150 }
151 }
152}
153
154/// Defines a simple way to determine how many threads to use given the number
155/// of remaining cores and number of total cores
156#[derive(Clone)]
157pub struct TaskPoolThreadAssignmentPolicy {
158 /// Force using at least this many threads
159 pub min_threads: usize,
160 /// Under no circumstance use more than this many threads for this pool
161 pub max_threads: usize,
162 /// Target using this percentage of total cores, clamped by min_threads and
163 /// max_threads. It is permitted to use 1.0 to try to use all remaining
164 /// threads
165 pub percent: f32,
166}
167
168impl TaskPoolThreadAssignmentPolicy {
169 /// Determine the number of threads to use for this task pool
170 fn get_number_of_threads(&self, remaining_threads: usize, total_threads: usize) -> usize {
171 assert!(self.percent >= 0.0);
172 let mut desired = (total_threads as f32 * self.percent).round() as usize;
173
174 // Limit ourselves to the number of cores available
175 desired = desired.min(remaining_threads);
176
177 // Clamp by min_threads, max_threads. (This may result in us using more threads
178 // than are available, this is intended. An example case where this
179 // might happen is a device with <= 2 threads.
180 desired.clamp(self.min_threads, self.max_threads)
181 }
182}