File size: 9,720 Bytes
84d2a97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
use std::sync::Arc;

use ash::vk;
use gpu_allocator::vulkan::{Allocation, AllocationCreateDesc, AllocationScheme};
use gpu_allocator::MemoryLocation;
use parking_lot::Mutex;

use crate::*;

static DOWNLOAD_NOT_ALLOWED_ERROR: &str = "Download from the GPU buffer is not allowed";
static UPLOAD_NOT_ALLOWED_ERROR: &str = "Upload to the GPU buffer is not allowed";

/// Buffer is a GPU resource that represents a linear memory region.
pub struct Buffer {
    /// Device that owns the buffer.
    device: Arc<Device>,

    /// Vulkan buffer handle.
    vk_buffer: vk::Buffer,

    /// Buffer type. It defines how the buffer can be used.
    buffer_type: BufferType,

    /// Buffer size in bytes.
    size: usize,

    /// GPU memory allocation that backs the buffer.
    allocation: Mutex<Allocation>,
}

/// Buffer type defines how the buffer can be used.
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum BufferType {
    /// Uniform data for a shader.
    Uniform,

    /// Storage buffer can be used as a large read/write buffer.
    Storage,

    /// CpuToGpu buffer can be used as a source for transfer operations only.
    CpuToGpu,

    /// GpuToCpu buffer can be used as a destination for transfer operations only.
    GpuToCpu,
}

// Mark `Buffer` as a GPU resource that should be kept alive while it's in use by the GPU context.
impl Resource for Buffer {}

impl Buffer {
    pub fn new(
        device: Arc<Device>,   // Device that owns the buffer.
        name: impl AsRef<str>, // Name of the buffer for tracking and debugging purposes.
        buffer_type: BufferType,
        size: usize,
    ) -> GpuResult<Arc<Self>> {
        if size == 0 {
            return Err(GpuError::NotSupported(
                "Zero-sized GPU buffers are not supported".to_string(),
            ));
        }

        // Vulkan API requires buffer usage flags to be specified during the buffer creation.
        let vk_usage_flags = match buffer_type {
            BufferType::Uniform => {
                vk::BufferUsageFlags::UNIFORM_BUFFER // mark as uniform buffer.
                    | vk::BufferUsageFlags::TRANSFER_DST // For uploading.
                    | vk::BufferUsageFlags::TRANSFER_SRC // For downloading.
            }
            BufferType::Storage => {
                vk::BufferUsageFlags::STORAGE_BUFFER // mark as storage buffer.
                    | vk::BufferUsageFlags::TRANSFER_DST // For uploading.
                    | vk::BufferUsageFlags::TRANSFER_SRC // For downloading.
            }
            // CpuToGpu buffer can be used as a source for transfer operations only.
            BufferType::CpuToGpu => vk::BufferUsageFlags::TRANSFER_SRC,
            // GpuToCpu buffer can be used as a destination for transfer operations only.
            BufferType::GpuToCpu => vk::BufferUsageFlags::TRANSFER_DST,
        };

        // Memory location depends on the buffer type.
        let location = match buffer_type {
            // Allocate Uniform/Storage buffers in GPU memory only.
            BufferType::Uniform => MemoryLocation::GpuOnly,
            BufferType::Storage => MemoryLocation::GpuOnly,
            // Transfer buffers will be visible to both CPU and GPU.
            BufferType::CpuToGpu => MemoryLocation::CpuToGpu,
            BufferType::GpuToCpu => MemoryLocation::GpuToCpu,
        };

        // Create a Vulkan buffer.
        let vk_create_buffer_info = vk::BufferCreateInfo::default()
            .size(size as vk::DeviceSize)
            .usage(vk_usage_flags)
            .sharing_mode(vk::SharingMode::EXCLUSIVE);
        let vk_buffer = unsafe {
            device
                .vk_device()
                .create_buffer(&vk_create_buffer_info, device.cpu_allocation_callbacks())
                .unwrap()
        };

        // Allocate memory for the buffer.
        let buffer_allocation_requirements =
            unsafe { device.vk_device().get_buffer_memory_requirements(vk_buffer) };
        let allocation_result = device.allocate(&AllocationCreateDesc {
            name: name.as_ref(),
            requirements: buffer_allocation_requirements,
            location,
            linear: true, // Buffers are always linear.
            allocation_scheme: AllocationScheme::GpuAllocatorManaged,
        });

        // Check if the allocation was successful.
        let allocation = match allocation_result {
            Ok(allocation) => allocation,
            Err(e) => {
                unsafe {
                    // Because vulkan buffers lifetime is managed manually,
                    // we need to destroy the buffer in case of an allocation error.
                    device
                        .vk_device()
                        .destroy_buffer(vk_buffer, device.cpu_allocation_callbacks());
                }
                return Err(e);
            }
        };

        // Bind the buffer to the allocated memory.
        let bind_result = unsafe {
            device.vk_device().bind_buffer_memory(
                vk_buffer,
                allocation.memory(),
                allocation.offset(),
            )
        };
        if let Err(e) = bind_result {
            // Free the allocated memory in case of an error.
            device.free(allocation);
            unsafe {
                // Destroy the buffer.
                device
                    .vk_device()
                    .destroy_buffer(vk_buffer, device.cpu_allocation_callbacks());
            }
            return Err(GpuError::from(e));
        }

        Ok(Arc::new(Self {
            device,
            vk_buffer,
            buffer_type,
            size,
            allocation: Mutex::new(allocation),
        }))
    }

    pub fn size(&self) -> usize {
        self.size
    }

    pub fn vk_buffer(&self) -> vk::Buffer {
        self.vk_buffer
    }

    pub fn buffer_type(&self) -> BufferType {
        self.buffer_type
    }

    /// Download data from the buffer to the RAM.
    pub fn download<T: Sized>(&self, data: &mut T, offset: usize) -> GpuResult<()> {
        if self.buffer_type != BufferType::GpuToCpu {
            return Err(GpuError::Other(DOWNLOAD_NOT_ALLOWED_ERROR.to_string()));
        }

        let bytes = memory::mmap_ops::transmute_to_u8_mut(data);
        self.download_bytes(bytes, offset)
    }

    /// Download data from the buffer to the RAM.
    pub fn download_slice<T: Sized>(&self, data: &mut [T], offset: usize) -> GpuResult<()> {
        if self.buffer_type != BufferType::GpuToCpu {
            return Err(GpuError::Other(DOWNLOAD_NOT_ALLOWED_ERROR.to_string()));
        }

        let bytes = memory::mmap_ops::transmute_to_u8_slice_mut(data);
        self.download_bytes(bytes, offset)
    }

    /// Download data from the buffer to the RAM.
    pub fn download_bytes(&self, data: &mut [u8], offset: usize) -> GpuResult<()> {
        if self.buffer_type != BufferType::GpuToCpu {
            return Err(GpuError::Other(DOWNLOAD_NOT_ALLOWED_ERROR.to_string()));
        }

        if data.len() + offset > self.size {
            return Err(GpuError::OutOfBounds(
                "Out of bounds while downloading from GPU".to_string(),
            ));
        }

        let allocation = self.allocation.lock();
        if let Some(slice) = allocation.mapped_slice() {
            data.copy_from_slice(&slice[offset..offset + data.len()]);
            Ok(())
        } else {
            Err(GpuError::Other(DOWNLOAD_NOT_ALLOWED_ERROR.to_string()))
        }
    }

    /// Upload data from the RAM to the buffer.
    pub fn upload<T: Sized>(&self, data: &T, offset: usize) -> GpuResult<()> {
        if self.buffer_type != BufferType::CpuToGpu {
            return Err(GpuError::Other(UPLOAD_NOT_ALLOWED_ERROR.to_string()));
        }

        let bytes = memory::mmap_ops::transmute_to_u8(data);
        self.upload_bytes(bytes, offset)
    }

    /// Upload data from the RAM to the buffer.
    pub fn upload_slice<T: Sized>(&self, data: &[T], offset: usize) -> GpuResult<()> {
        if self.buffer_type != BufferType::CpuToGpu {
            return Err(GpuError::Other(UPLOAD_NOT_ALLOWED_ERROR.to_string()));
        }

        let bytes = memory::mmap_ops::transmute_to_u8_slice(data);
        self.upload_bytes(bytes, offset)
    }

    /// Upload data from the RAM to the buffer.
    pub fn upload_bytes(&self, data: &[u8], offset: usize) -> GpuResult<()> {
        if self.buffer_type != BufferType::CpuToGpu {
            return Err(GpuError::Other(UPLOAD_NOT_ALLOWED_ERROR.to_string()));
        }

        if data.len() + offset > self.size {
            return Err(GpuError::OutOfBounds(
                "Out of bounds while uploading to GPU".to_string(),
            ));
        }

        let mut allocation = self.allocation.lock();
        if let Some(slice) = allocation.mapped_slice_mut() {
            slice[offset..offset + data.len()].copy_from_slice(data);
            Ok(())
        } else {
            Err(GpuError::Other(DOWNLOAD_NOT_ALLOWED_ERROR.to_string()))
        }
    }
}

impl Drop for Buffer {
    fn drop(&mut self) {
        if self.vk_buffer != vk::Buffer::null() {
            // Drop the allocation and free the allocated memory.
            let mut allocation = Mutex::new(Allocation::default());
            std::mem::swap(&mut allocation, &mut self.allocation);
            let allocation = allocation.into_inner();
            self.device.free(allocation);

            // Destroy the buffer.
            unsafe {
                self.device
                    .vk_device()
                    .destroy_buffer(self.vk_buffer, self.device.cpu_allocation_callbacks())
            };
            self.vk_buffer = vk::Buffer::null();
        }
    }
}