diff --git a/crates/viewer/re_renderer/src/allocator/cpu_write_gpu_read_belt.rs b/crates/viewer/re_renderer/src/allocator/cpu_write_gpu_read_belt.rs index cca47e9eb8dd..dd492ca0e8df 100644 --- a/crates/viewer/re_renderer/src/allocator/cpu_write_gpu_read_belt.rs +++ b/crates/viewer/re_renderer/src/allocator/cpu_write_gpu_read_belt.rs @@ -123,15 +123,31 @@ where #[inline] pub fn extend( &mut self, - mut elements: impl Iterator, + mut elements: impl ExactSizeIterator, ) -> Result { re_tracing::profile_function!(); // TODO(emilk): optimize the extend function. // Right now it is 3-4x faster to collect to a vec first, which is crazy. - if true { - let vec = elements.collect::>(); - self.extend_from_slice(&vec)?; + // Still, we use the slow path for single-elements, otherwise we hit some weird compiler bug, + // resulting us hitting a debug-assertion in `vec.as_slice()`. + // See https://github.com/rerun-io/rerun/pull/7563 for more + let i_want_to_crash_in_debug_builds = false; + if 1 < elements.len() || i_want_to_crash_in_debug_builds { + let vec: Vec = elements.collect(); + + #[allow(clippy::dbg_macro)] + if i_want_to_crash_in_debug_builds { + dbg!(std::any::type_name::()); + dbg!(std::mem::size_of::()); + dbg!(std::mem::align_of::()); + dbg!(vec.len()); + dbg!(vec.as_ptr()); + dbg!(vec.as_ptr() as usize % std::mem::align_of::()); + dbg!(vec.as_slice().len()); + } + + self.extend_from_slice(vec.as_slice())?; Ok(vec.len()) } else { let num_written_before = self.num_written();