diff --git a/Cargo.toml b/Cargo.toml index 5efa16115826e6..6afa867321b331 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,11 +165,19 @@ path = "examples/2d/text2d.rs" name = "texture_atlas" path = "examples/2d/texture_atlas.rs" +[[example]] +name = "mouse_tracking" +path = "examples/2d/mouse_tracking.rs" + # 3D Rendering [[example]] name = "3d_scene" path = "examples/3d/3d_scene.rs" +[[example]] +name = "screen_to_world" +path = "examples/3d/screen_to_world.rs" + [[example]] name = "lighting" path = "examples/3d/lighting.rs" diff --git a/crates/bevy_render/src/camera/camera.rs b/crates/bevy_render/src/camera/camera.rs index 07e748fe8211fe..b69632da8a47a5 100644 --- a/crates/bevy_render/src/camera/camera.rs +++ b/crates/bevy_render/src/camera/camera.rs @@ -1,6 +1,10 @@ use crate::{ - camera::CameraProjection, prelude::Image, render_asset::RenderAssets, - render_resource::TextureView, view::ExtractedWindows, + camera::CameraProjection, + prelude::Image, + primitives::{Line, Plane}, + render_asset::RenderAssets, + render_resource::TextureView, + view::ExtractedWindows, }; use bevy_asset::{AssetEvent, Assets, Handle}; use bevy_ecs::{ @@ -12,7 +16,7 @@ use bevy_ecs::{ reflect::ReflectComponent, system::{QuerySet, Res}, }; -use bevy_math::{Mat4, UVec2, Vec2, Vec3}; +use bevy_math::{Mat4, UVec2, Vec2, Vec3, Vec4}; use bevy_reflect::{Reflect, ReflectDeserialize}; use bevy_transform::components::GlobalTransform; use bevy_utils::HashSet; @@ -138,6 +142,74 @@ impl Camera { None } } + + /// Given a position in screen space, compute the world-space line that corresponds to it. + pub fn screen_to_world_ray( + &self, + pos_screen: Vec2, + windows: &Windows, + images: &Assets, + camera_transform: &GlobalTransform, + ) -> Line { + let camera_position = camera_transform.compute_matrix(); + let window_size = self.target.get_logical_size(windows, images).unwrap(); + let projection_matrix = self.projection_matrix; + + // Normalized device coordinate cursor position from (-1, -1, -1) to (1, 1, 1) + let cursor_ndc = (pos_screen / window_size) * 2.0 - Vec2::from([1.0, 1.0]); + let cursor_pos_ndc_near: Vec3 = cursor_ndc.extend(-1.0); + let cursor_pos_ndc_far: Vec3 = cursor_ndc.extend(1.0); + + // Use near and far ndc points to generate a ray in world space + // This method is more robust than using the location of the camera as the start of + // the ray, because ortho cameras have a focal point at infinity! + let ndc_to_world: Mat4 = camera_position * projection_matrix.inverse(); + let cursor_pos_near: Vec3 = ndc_to_world.project_point3(cursor_pos_ndc_near); + let cursor_pos_far: Vec3 = ndc_to_world.project_point3(cursor_pos_ndc_far); + let ray_direction = cursor_pos_far - cursor_pos_near; + Line::from_point_direction(cursor_pos_near, ray_direction) + } + + /// Given a position in screen space and a plane in world space, compute what point on the plane the point in screen space corresponds to. + /// In 2D, use `screen_to_point_2d`. + pub fn screen_to_point_on_plane( + &self, + pos_screen: Vec2, + plane: Plane, + windows: &Windows, + images: &Assets, + camera_transform: &GlobalTransform, + ) -> Option { + let world_ray = self.screen_to_world_ray(pos_screen, windows, images, camera_transform); + let d = world_ray.point.dot(plane.normal()); + if d == 0. { + None + } else { + let diff = world_ray.point.extend(1.0) - plane.normal_d(); + let p = diff.dot(plane.normal_d()); + let dist = p / d; + Some(world_ray.point - world_ray.direction * dist) + } + } + + /// Computes the world position for a given screen position. + /// The output will always be on the XY plane with Z at zero. It is designed for 2D, but also works with a 3D camera. + /// For more flexibility in 3D, consider `screen_to_point_on_plane`. + pub fn screen_to_point_2d( + &self, + pos_screen: Vec2, + windows: &Windows, + images: &Assets, + camera_transform: &GlobalTransform, + ) -> Option { + self.screen_to_point_on_plane( + pos_screen, + Plane::new(Vec4::new(0., 0., 1., 0.)), + windows, + images, + camera_transform, + ) + } } #[allow(clippy::type_complexity)] diff --git a/crates/bevy_render/src/primitives/mod.rs b/crates/bevy_render/src/primitives/mod.rs index 161e8c24fd75ab..2b4f10e87f44c2 100644 --- a/crates/bevy_render/src/primitives/mod.rs +++ b/crates/bevy_render/src/primitives/mod.rs @@ -192,6 +192,18 @@ impl CubemapFrusta { } } +#[derive(Clone, Copy, Debug, Default)] +pub struct Line { + pub point: Vec3, + pub direction: Vec3, +} + +impl Line { + pub fn from_point_direction(point: Vec3, direction: Vec3) -> Self { + Self { point, direction } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/examples/2d/mouse_tracking.rs b/examples/2d/mouse_tracking.rs new file mode 100644 index 00000000000000..7cdffc33ed66f3 --- /dev/null +++ b/examples/2d/mouse_tracking.rs @@ -0,0 +1,43 @@ +use bevy::{prelude::*, render::camera::Camera}; + +fn main() { + App::new() + .add_plugins(DefaultPlugins) + .add_startup_system(setup) + .add_system(follow) + .run(); +} + +#[derive(Component)] +struct Follow; + +fn setup(mut commands: Commands, asset_server: Res) { + let texture_handle = asset_server.load("branding/icon.png"); + commands.spawn_bundle(OrthographicCameraBundle::new_2d()); + commands + .spawn_bundle(SpriteBundle { + texture: texture_handle, + ..Default::default() + }) + .insert(Follow); +} + +fn follow( + mut q: Query<&mut Transform, With>, + q_camera: Query<(&Camera, &GlobalTransform)>, + windows: Res, + images: Res>, + mut evr_cursor: EventReader, +) { + let (camera, camera_transform) = q_camera.single(); + if let Some(cursor) = evr_cursor.iter().next() { + for mut transform in q.iter_mut() { + let point: Option = + camera.screen_to_point_2d(cursor.position, &windows, &images, camera_transform); + println!("Point {:?}", point); + if let Some(point) = point { + transform.translation = point; + } + } + } +} diff --git a/examples/3d/screen_to_world.rs b/examples/3d/screen_to_world.rs new file mode 100644 index 00000000000000..5da3b87bb88f5f --- /dev/null +++ b/examples/3d/screen_to_world.rs @@ -0,0 +1,71 @@ +use bevy::{prelude::*, render::camera::Camera, render::primitives::Plane}; + +fn main() { + App::new() + .insert_resource(Msaa { samples: 4 }) + .add_plugins(DefaultPlugins) + .add_startup_system(setup) + .add_system(follow) + .run(); +} + +#[derive(Component)] +struct Follow; + +/// set up a simple 3D scene +fn setup( + mut commands: Commands, + mut meshes: ResMut>, + mut materials: ResMut>, +) { + // plane + commands.spawn_bundle(PbrBundle { + mesh: meshes.add(Mesh::from(shape::Plane { size: 5.0 })), + material: materials.add(Color::rgb(0.3, 0.5, 0.3).into()), + ..Default::default() + }); + // cube + commands + .spawn_bundle(PbrBundle { + mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), + material: materials.add(Color::rgb(0.8, 0.7, 0.6).into()), + transform: Transform::from_xyz(0.0, 0.5, 0.0), + ..Default::default() + }) + .insert(Follow); + // light + commands.spawn_bundle(PointLightBundle { + transform: Transform::from_xyz(4.0, 8.0, 4.0), + ..Default::default() + }); + // camera + commands.spawn_bundle(PerspectiveCameraBundle { + transform: Transform::from_xyz(-2.0, 2.5, 5.0).looking_at(Vec3::ZERO, Vec3::Y), + ..Default::default() + }); +} + +fn follow( + mut q: Query<&mut Transform, With>, + q_camera: Query<(&Camera, &GlobalTransform)>, + windows: Res, + images: Res>, + mut evr_cursor: EventReader, +) { + // Assumes there is at least one camera + let (camera, camera_transform) = q_camera.iter().next().unwrap(); + if let Some(cursor) = evr_cursor.iter().next() { + for mut transform in q.iter_mut() { + let point: Option = camera.screen_to_point_on_plane( + cursor.position, + Plane::new(Vec4::new(0., 1., 0., 1.)), + &windows, + &images, + camera_transform, + ); + if let Some(point) = point { + transform.translation = point + Vec3::new(0., 0.5, 0.); + } + } + } +}