Skip to content

Commit

Permalink
Adding Screen to world function
Browse files Browse the repository at this point in the history
  • Loading branch information
omarbassam88 committed Mar 10, 2022
1 parent ec5d0ef commit 47a93ca
Show file tree
Hide file tree
Showing 5 changed files with 209 additions and 3 deletions.
8 changes: 8 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -165,11 +165,19 @@ path = "examples/2d/text2d.rs"
name = "texture_atlas"
path = "examples/2d/texture_atlas.rs"

[[example]]
name = "mouse_tracking"
path = "examples/2d/mouse_tracking.rs"

# 3D Rendering
[[example]]
name = "3d_scene"
path = "examples/3d/3d_scene.rs"

[[example]]
name = "screen_to_world"
path = "examples/3d/screen_to_world.rs"

[[example]]
name = "lighting"
path = "examples/3d/lighting.rs"
Expand Down
78 changes: 75 additions & 3 deletions crates/bevy_render/src/camera/camera.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
use crate::{
camera::CameraProjection, prelude::Image, render_asset::RenderAssets,
render_resource::TextureView, view::ExtractedWindows,
camera::CameraProjection,
prelude::Image,
primitives::{Line, Plane},
render_asset::RenderAssets,
render_resource::TextureView,
view::ExtractedWindows,
};
use bevy_asset::{AssetEvent, Assets, Handle};
use bevy_ecs::{
Expand All @@ -12,7 +16,7 @@ use bevy_ecs::{
reflect::ReflectComponent,
system::{QuerySet, Res},
};
use bevy_math::{Mat4, UVec2, Vec2, Vec3};
use bevy_math::{Mat4, UVec2, Vec2, Vec3, Vec4};
use bevy_reflect::{Reflect, ReflectDeserialize};
use bevy_transform::components::GlobalTransform;
use bevy_utils::HashSet;
Expand Down Expand Up @@ -138,6 +142,74 @@ impl Camera {
None
}
}

/// Given a position in screen space, compute the world-space line that corresponds to it.
pub fn screen_to_world_ray(
&self,
pos_screen: Vec2,
windows: &Windows,
images: &Assets<Image>,
camera_transform: &GlobalTransform,
) -> Line {
let camera_position = camera_transform.compute_matrix();
let window_size = self.target.get_logical_size(windows, images).unwrap();
let projection_matrix = self.projection_matrix;

// Normalized device coordinate cursor position from (-1, -1, -1) to (1, 1, 1)
let cursor_ndc = (pos_screen / window_size) * 2.0 - Vec2::from([1.0, 1.0]);
let cursor_pos_ndc_near: Vec3 = cursor_ndc.extend(-1.0);
let cursor_pos_ndc_far: Vec3 = cursor_ndc.extend(1.0);

// Use near and far ndc points to generate a ray in world space
// This method is more robust than using the location of the camera as the start of
// the ray, because ortho cameras have a focal point at infinity!
let ndc_to_world: Mat4 = camera_position * projection_matrix.inverse();
let cursor_pos_near: Vec3 = ndc_to_world.project_point3(cursor_pos_ndc_near);
let cursor_pos_far: Vec3 = ndc_to_world.project_point3(cursor_pos_ndc_far);
let ray_direction = cursor_pos_far - cursor_pos_near;
Line::from_point_direction(cursor_pos_near, ray_direction)
}

/// Given a position in screen space and a plane in world space, compute what point on the plane the point in screen space corresponds to.
/// In 2D, use `screen_to_point_2d`.
pub fn screen_to_point_on_plane(
&self,
pos_screen: Vec2,
plane: Plane,
windows: &Windows,
images: &Assets<Image>,
camera_transform: &GlobalTransform,
) -> Option<Vec3> {
let world_ray = self.screen_to_world_ray(pos_screen, windows, images, camera_transform);
let d = world_ray.point.dot(plane.normal());
if d == 0. {
None
} else {
let diff = world_ray.point.extend(1.0) - plane.normal_d();
let p = diff.dot(plane.normal_d());
let dist = p / d;
Some(world_ray.point - world_ray.direction * dist)
}
}

/// Computes the world position for a given screen position.
/// The output will always be on the XY plane with Z at zero. It is designed for 2D, but also works with a 3D camera.
/// For more flexibility in 3D, consider `screen_to_point_on_plane`.
pub fn screen_to_point_2d(
&self,
pos_screen: Vec2,
windows: &Windows,
images: &Assets<Image>,
camera_transform: &GlobalTransform,
) -> Option<Vec3> {
self.screen_to_point_on_plane(
pos_screen,
Plane::new(Vec4::new(0., 0., 1., 0.)),
windows,
images,
camera_transform,
)
}
}

#[allow(clippy::type_complexity)]
Expand Down
12 changes: 12 additions & 0 deletions crates/bevy_render/src/primitives/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,18 @@ impl CubemapFrusta {
}
}

#[derive(Clone, Copy, Debug, Default)]
pub struct Line {
pub point: Vec3,
pub direction: Vec3,
}

impl Line {
pub fn from_point_direction(point: Vec3, direction: Vec3) -> Self {
Self { point, direction }
}
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down
43 changes: 43 additions & 0 deletions examples/2d/mouse_tracking.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
use bevy::{prelude::*, render::camera::Camera};

fn main() {
App::new()
.add_plugins(DefaultPlugins)
.add_startup_system(setup)
.add_system(follow)
.run();
}

#[derive(Component)]
struct Follow;

fn setup(mut commands: Commands, asset_server: Res<AssetServer>) {
let texture_handle = asset_server.load("branding/icon.png");
commands.spawn_bundle(OrthographicCameraBundle::new_2d());
commands
.spawn_bundle(SpriteBundle {
texture: texture_handle,
..Default::default()
})
.insert(Follow);
}

fn follow(
mut q: Query<&mut Transform, With<Follow>>,
q_camera: Query<(&Camera, &GlobalTransform)>,
windows: Res<Windows>,
images: Res<Assets<Image>>,
mut evr_cursor: EventReader<CursorMoved>,
) {
let (camera, camera_transform) = q_camera.single();
if let Some(cursor) = evr_cursor.iter().next() {
for mut transform in q.iter_mut() {
let point: Option<Vec3> =
camera.screen_to_point_2d(cursor.position, &windows, &images, camera_transform);
println!("Point {:?}", point);
if let Some(point) = point {
transform.translation = point;
}
}
}
}
71 changes: 71 additions & 0 deletions examples/3d/screen_to_world.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
use bevy::{prelude::*, render::camera::Camera, render::primitives::Plane};

fn main() {
App::new()
.insert_resource(Msaa { samples: 4 })
.add_plugins(DefaultPlugins)
.add_startup_system(setup)
.add_system(follow)
.run();
}

#[derive(Component)]
struct Follow;

/// set up a simple 3D scene
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
) {
// plane
commands.spawn_bundle(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Plane { size: 5.0 })),
material: materials.add(Color::rgb(0.3, 0.5, 0.3).into()),
..Default::default()
});
// cube
commands
.spawn_bundle(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.8, 0.7, 0.6).into()),
transform: Transform::from_xyz(0.0, 0.5, 0.0),
..Default::default()
})
.insert(Follow);
// light
commands.spawn_bundle(PointLightBundle {
transform: Transform::from_xyz(4.0, 8.0, 4.0),
..Default::default()
});
// camera
commands.spawn_bundle(PerspectiveCameraBundle {
transform: Transform::from_xyz(-2.0, 2.5, 5.0).looking_at(Vec3::ZERO, Vec3::Y),
..Default::default()
});
}

fn follow(
mut q: Query<&mut Transform, With<Follow>>,
q_camera: Query<(&Camera, &GlobalTransform)>,
windows: Res<Windows>,
images: Res<Assets<Image>>,
mut evr_cursor: EventReader<CursorMoved>,
) {
// Assumes there is at least one camera
let (camera, camera_transform) = q_camera.iter().next().unwrap();
if let Some(cursor) = evr_cursor.iter().next() {
for mut transform in q.iter_mut() {
let point: Option<Vec3> = camera.screen_to_point_on_plane(
cursor.position,
Plane::new(Vec4::new(0., 1., 0., 1.)),
&windows,
&images,
camera_transform,
);
if let Some(point) = point {
transform.translation = point + Vec3::new(0., 0.5, 0.);
}
}
}
}

0 comments on commit 47a93ca

Please sign in to comment.