1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
use glfw;
use na::{Point2, Point3, Point4, Vector2, Vector3, Matrix4, Isometry3};
use na;
use resource::ShaderUniform;

/// Trait every camera must implement.
pub trait Camera {
    /*
     * Event handling.
     */
    /// Handle a mouse event.
    fn handle_event(&mut self, _window: &glfw::Window, _event: &glfw::WindowEvent);

    /*
     * Transformation-related methods.
     */
    /// The camera position.
    fn eye(&self) -> Point3<f32>; // FIXME: should this be here?
    /// The camera view transform.
    fn view_transform(&self) -> Isometry3<f32>;
    /// The transformation applied by the camera to transform a point in world coordinates to
    /// a point in device coordinates.
    fn transformation(&self) -> Matrix4<f32>;
    /// The transformation applied by the camera to transform point in device coordinates to a
    /// point in world coordinate.
    fn inverse_transformation(&self) -> Matrix4<f32>;
    /// The clipping planes, aka. (`znear`, `zfar`).
    fn clip_planes(&self) -> (f32, f32); // FIXME: should this be here?

    /*
     * Update & upload
     */
    // FIXME: dont use glfw::Window
    /// Update the camera. This is called once at the beginning of the render loop.
    fn update(&mut self, window: &glfw::Window);

    /// Upload the camera transformation to the gpu. This can be called multiple times on the
    /// render loop.
    #[inline]
    fn upload(&self, _pass: usize, uniform: &mut ShaderUniform<Matrix4<f32>>) {
        uniform.upload(&self.transformation());
    }

    /// The number of passes required by this camera.
    #[inline]
    fn num_passes(&self) -> usize { 1usize }

    /// Indicates that a pass will begin.
    #[inline]
    fn start_pass(&self, _pass: usize, _window: &glfw::Window) { }

    /// Indicates that the scene has been rendered and the post-processing is being run.
    #[inline]
    fn render_complete(&self, _window: &glfw::Window) { }

    /// Converts a 3d point to 2d screen coordinates, assuming the screen has the size `size`.
    fn project(&self, world_coord: &Point3<f32>, size: &Vector2<f32>) -> Vector2<f32> {
        let h_world_coord      = world_coord.to_homogeneous();
        let h_normalized_coord = self.transformation() * h_world_coord;

        let normalized_coord = Point3::from_homogeneous(h_normalized_coord).unwrap();

        Vector2::new(
            (1.0 + normalized_coord.x) * size.x / 2.0,
            (1.0 + normalized_coord.y) * size.y / 2.0)
    }

    /// Converts a point in 2d screen coordinates to a ray (a 3d position and a direction).
    ///
    /// The screen is assumed to have a size given by `size`.
    fn unproject(&self, window_coord: &Point2<f32>, size: &Vector2<f32>) -> (Point3<f32>, Vector3<f32>) {
        let normalized_coord = Point2::new(
            2.0 * window_coord.x  / size.x - 1.0,
            2.0 * -window_coord.y / size.y + 1.0);

        let normalized_begin = Point4::new(normalized_coord.x, normalized_coord.y, -1.0, 1.0);
        let normalized_end   = Point4::new(normalized_coord.x, normalized_coord.y, 1.0, 1.0);

        let cam = self.inverse_transformation();

        let h_unprojected_begin = cam * normalized_begin;
        let h_unprojected_end   = cam * normalized_end;

        let unprojected_begin = Point3::from_homogeneous(h_unprojected_begin.coords).unwrap();
        let unprojected_end   = Point3::from_homogeneous(h_unprojected_end.coords).unwrap();

        (unprojected_begin, na::normalize(&(unprojected_end - unprojected_begin)))
    }
}