1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
use {
    crate::{
        pretty_wrappers::PrettySize, AllocationRequirements, AllocatorError,
        DeviceMemory,
    },
    ash::vk,
};

#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Debug, Hash)]
pub(crate) struct AllocationId {
    memory: vk::DeviceMemory,
    offset_in_bytes: vk::DeviceSize,
}

/// A GPU memory allocation.
#[derive(Clone)]
pub struct Allocation {
    parent: Option<AllocationId>,
    device_memory: DeviceMemory,
    offset_in_bytes: vk::DeviceSize,
    size_in_bytes: vk::DeviceSize,
    memory_type_index: usize,
    allocation_requirements: AllocationRequirements,
}

// Public API
// ----------

impl Allocation {
    /// The underlying Vulkan memory handle.
    ///
    /// # Safety
    ///
    /// Unsafe because the allocation logically owns the device memory. It is
    /// incorrect to free the memory by any means other than to return the
    /// full allocation instance to the memory allocator.
    pub unsafe fn memory(&self) -> vk::DeviceMemory {
        self.device_memory.memory()
    }

    /// The offset where this allocation begins in device memory.
    ///
    /// This is needed because some memory allocator implementations will
    /// subdivide big regions of GPU memory into smaller allocations. Therefore
    /// the actual device memory handle can be shared by many allocations.
    pub fn offset_in_bytes(&self) -> vk::DeviceSize {
        self.offset_in_bytes
    }

    /// The size of the allocation in bytes.
    pub fn size_in_bytes(&self) -> vk::DeviceSize {
        self.size_in_bytes
    }

    /// The allocation requirements used when acquiring the device memory.
    pub fn allocation_requirements(&self) -> &AllocationRequirements {
        &self.allocation_requirements
    }

    /// Map the allocation into application address space.
    ///
    /// # Safety
    ///
    /// Unsafe because:
    /// - The application must synchronize access to the underlying device
    ///   memory. All previously submitted GPU commands which write to the
    ///   memory owned by this alloctaion must be finished before the host reads
    ///   or writes from the mapped pointer.
    /// - Synchronization requirements vary depending on the HOST_COHERENT
    ///   memory property. See the Vulkan spec for details.
    ///
    /// For details, see the specification at:
    /// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/vkMapMemory.html
    pub unsafe fn map(
        &self,
        device: &ash::Device,
    ) -> Result<*mut std::ffi::c_void, AllocatorError> {
        // Get the ptr to the start of the device memory
        let base_ptr = self.device_memory.map(device)?;
        let base_ptr_address = base_ptr as usize;

        // compute the address for this allocation
        let with_offset = base_ptr_address + self.offset_in_bytes() as usize;

        Ok(with_offset as *mut std::ffi::c_void)
    }

    /// Unmap the allocation.
    ///
    /// # Safety
    ///
    /// Unsafe because:
    /// - The pointer returned by map() must not be used after the call to
    ///   unmap()
    /// - The application must synchronize all host access to the allocation.
    pub unsafe fn unmap(
        &self,
        device: &ash::Device,
    ) -> Result<(), AllocatorError> {
        self.device_memory.unmap(device)
    }
}

impl std::fmt::Debug for Allocation {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("Allocation")
            .field("device_memory", &self.device_memory)
            .field("offset_in_bytes", &PrettySize(self.offset_in_bytes))
            .field("size_in_bytes", &PrettySize(self.size_in_bytes))
            .field("allocation_requirements", &self.allocation_requirements)
            .finish()
    }
}

impl std::fmt::Display for Allocation {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.write_fmt(format_args!("{:#?}", self))
    }
}

// Private API
// -----------

impl Allocation {
    /// Create a new memory allocation.
    pub(crate) fn new(
        device_memory: DeviceMemory,
        memory_type_index: usize,
        offset_in_bytes: vk::DeviceSize,
        size_in_bytes: vk::DeviceSize,
        allocation_requirements: AllocationRequirements,
    ) -> Self {
        Self {
            parent: None,
            device_memory,
            memory_type_index,
            offset_in_bytes,
            size_in_bytes,
            allocation_requirements,
        }
    }

    /// A unique ID for non-overlapping allocations.
    ///
    /// # Safety
    ///
    /// Unsafe because:
    ///   - IDs may not be unique if there is a bug in a memory allocator.
    pub(crate) unsafe fn id(&self) -> AllocationId {
        AllocationId {
            memory: self.memory(),
            offset_in_bytes: self.offset_in_bytes(),
        }
    }

    /// Returns the Allocation ID for the allocation's parent.
    ///
    /// # Safety
    ///
    /// Unsafe beacuse:
    ///   - There are no lifetime guarantees. The parent may not exist even if
    ///     this function returns a Some().
    pub(crate) unsafe fn parent_id(&self) -> Option<AllocationId> {
        self.parent
    }

    /// Create an allocation which refers to the same underlying device memory.
    ///
    /// # Params
    ///
    /// * allocation: The original memory allocation which will be subdivided.
    /// * offset: The offset relative to the original allocation's offset.
    /// * size_in_bytes: The size of the suballocation.
    ///
    /// # Safety
    ///
    /// Unsafe because:
    /// * This constructor only checks that the suballocation fits within the
    ///   original allocation. There is nothing to prevent aliasing. The caller
    ///   must have their own strategy for tracking the original allocation's
    ///   usage and synchronizing access.
    /// * Freeing the device memory will make interacting with any Allocation
    ///   struct behave badly. The caller must have some strategy for tracking
    ///   suballocations and ensuring they're all cleaned up before allowing the
    ///   original allocation to be freed.
    pub(crate) unsafe fn suballocate(
        allocation: &Allocation,
        offset: vk::DeviceSize,
        size_in_bytes: vk::DeviceSize,
        offset_alignment: u64,
    ) -> Self {
        let full_offset = allocation.offset_in_bytes() + offset;
        assert!(
            full_offset + size_in_bytes
                <= allocation.offset_in_bytes() + allocation.size_in_bytes(),
            "Attempted to suballocate outside of an allocation's bounds!"
        );
        assert!(
            full_offset % offset_alignment == 0,
            "Attempted to suballocate with invalid alignment!"
        );
        Self {
            parent: Some(allocation.id()),
            device_memory: allocation.device_memory.clone(),
            offset_in_bytes: full_offset,
            size_in_bytes,
            memory_type_index: allocation.memory_type_index(),
            allocation_requirements: AllocationRequirements {
                size_in_bytes,
                alignment: offset_alignment,
                ..allocation.allocation_requirements
            },
        }
    }

    /// The index for the memory type used to allocate this chunk of memory.
    pub(crate) fn memory_type_index(&self) -> usize {
        self.memory_type_index
    }
}