1use ringkernel_core::error::{Result, RingKernelError};
4use ringkernel_core::memory::GpuBuffer;
5use std::sync::Arc;
6
7pub struct CpuBuffer {
9 data: Arc<parking_lot::RwLock<Vec<u8>>>,
11 size: usize,
13}
14
15impl CpuBuffer {
16 pub fn new(size: usize) -> Self {
18 Self {
19 data: Arc::new(parking_lot::RwLock::new(vec![0u8; size])),
20 size,
21 }
22 }
23
24 pub fn from_data(data: Vec<u8>) -> Self {
26 let size = data.len();
27 Self {
28 data: Arc::new(parking_lot::RwLock::new(data)),
29 size,
30 }
31 }
32
33 pub fn read(&self) -> parking_lot::RwLockReadGuard<'_, Vec<u8>> {
35 self.data.read()
36 }
37
38 pub fn write(&self) -> parking_lot::RwLockWriteGuard<'_, Vec<u8>> {
40 self.data.write()
41 }
42}
43
44impl GpuBuffer for CpuBuffer {
45 fn size(&self) -> usize {
46 self.size
47 }
48
49 fn device_ptr(&self) -> usize {
50 self.data.read().as_ptr() as usize
52 }
53
54 fn copy_from_host(&self, data: &[u8]) -> Result<()> {
55 if data.len() > self.size {
56 return Err(RingKernelError::TransferFailed(format!(
57 "Source ({}) larger than buffer ({})",
58 data.len(),
59 self.size
60 )));
61 }
62
63 let mut buf = self.data.write();
64 buf[..data.len()].copy_from_slice(data);
65 Ok(())
66 }
67
68 fn copy_to_host(&self, data: &mut [u8]) -> Result<()> {
69 let buf = self.data.read();
70 let len = data.len().min(buf.len());
71 data[..len].copy_from_slice(&buf[..len]);
72 Ok(())
73 }
74}
75
76impl Clone for CpuBuffer {
77 fn clone(&self) -> Self {
78 Self {
79 data: Arc::clone(&self.data),
80 size: self.size,
81 }
82 }
83}
84
85#[allow(dead_code)]
87pub struct CpuDeviceMemory {
88 total: usize,
90 allocated: std::sync::atomic::AtomicUsize,
92}
93
94impl CpuDeviceMemory {
95 #[allow(dead_code)]
97 pub fn new(total_memory: usize) -> Self {
98 Self {
99 total: total_memory,
100 allocated: std::sync::atomic::AtomicUsize::new(0),
101 }
102 }
103}
104
105impl ringkernel_core::memory::DeviceMemory for CpuDeviceMemory {
106 fn allocate(&self, size: usize) -> Result<Box<dyn GpuBuffer>> {
107 let current = self.allocated.load(std::sync::atomic::Ordering::Relaxed);
108 if current + size > self.total {
109 return Err(RingKernelError::OutOfMemory {
110 requested: size,
111 available: self.total - current,
112 });
113 }
114 self.allocated
115 .fetch_add(size, std::sync::atomic::Ordering::Relaxed);
116 Ok(Box::new(CpuBuffer::new(size)))
117 }
118
119 fn allocate_aligned(&self, size: usize, alignment: usize) -> Result<Box<dyn GpuBuffer>> {
120 let aligned_size = ringkernel_core::memory::align::align_up(size, alignment);
121 self.allocate(aligned_size)
122 }
123
124 fn total_memory(&self) -> usize {
125 self.total
126 }
127
128 fn free_memory(&self) -> usize {
129 let allocated = self.allocated.load(std::sync::atomic::Ordering::Relaxed);
130 self.total.saturating_sub(allocated)
131 }
132}
133
134#[cfg(test)]
135mod tests {
136 use super::*;
137
138 #[test]
139 fn test_cpu_buffer() {
140 let buffer = CpuBuffer::new(1024);
141 assert_eq!(buffer.size(), 1024);
142
143 let data = vec![1u8, 2, 3, 4, 5];
144 buffer.copy_from_host(&data).unwrap();
145
146 let mut result = vec![0u8; 5];
147 buffer.copy_to_host(&mut result).unwrap();
148 assert_eq!(result, data);
149 }
150
151 #[test]
152 fn test_cpu_device_memory() {
153 use ringkernel_core::memory::DeviceMemory;
154
155 let mem = CpuDeviceMemory::new(1024 * 1024); assert_eq!(mem.total_memory(), 1024 * 1024);
157
158 let buf = mem.allocate(1024).unwrap();
159 assert_eq!(buf.size(), 1024);
160 assert_eq!(mem.free_memory(), 1024 * 1024 - 1024);
161 }
162}