glib/
bytes.rs

1// Take a look at the license at the top of the repository in the LICENSE file.
2
3use std::{
4    borrow::Borrow,
5    cmp::Ordering,
6    fmt,
7    hash::{Hash, Hasher},
8    mem,
9    ops::{Bound, Deref, RangeBounds},
10    slice,
11};
12
13use crate::{ffi, translate::*};
14
15wrapper! {
16    // rustdoc-stripper-ignore-next
17    /// A shared immutable byte slice (the equivalent of `Rc<[u8]>`).
18    ///
19    /// `From` implementations that take references (e.g. `&[u8]`) copy the
20    /// data. The `from_static` constructor avoids copying static data.
21    ///
22    /// ```
23    /// use glib::Bytes;
24    ///
25    /// let v = vec![1, 2, 3];
26    /// let b = Bytes::from(&v);
27    /// assert_eq!(v, b);
28    ///
29    /// let s = b"xyz";
30    /// let b = Bytes::from_static(s);
31    /// assert_eq!(&s[..], b);
32    /// ```
33    // rustdoc-stripper-ignore-next-stop
34    /// A simple reference counted data type representing an immutable sequence of
35    /// zero or more bytes from an unspecified origin.
36    ///
37    /// The purpose of a `GBytes` is to keep the memory region that it holds
38    /// alive for as long as anyone holds a reference to the bytes.  When
39    /// the last reference count is dropped, the memory is released. Multiple
40    /// unrelated callers can use byte data in the `GBytes` without coordinating
41    /// their activities, resting assured that the byte data will not change or
42    /// move while they hold a reference.
43    ///
44    /// A `GBytes` can come from many different origins that may have
45    /// different procedures for freeing the memory region.  Examples are
46    /// memory from `malloc()`, from memory slices, from a
47    /// `GLib::MappedFile` or memory from other allocators.
48    ///
49    /// `GBytes` work well as keys in `GLib::HashTable`. Use
50    /// `GLib::Bytes::equal()` and `GLib::Bytes::hash()` as parameters to
51    /// `GLib::HashTable::new()` or `GLib::HashTable::new_full()`.
52    /// `GBytes` can also be used as keys in a `GLib::Tree` by passing the
53    /// `GLib::Bytes::compare()` function to `GLib::Tree::new()`.
54    ///
55    /// The data pointed to by this bytes must not be modified. For a mutable
56    /// array of bytes see [`ByteArray`][crate::ByteArray]. Use
57    /// [`unref_to_array()`][Self::unref_to_array()] to create a mutable array for a `GBytes`
58    /// sequence. To create an immutable `GBytes` from a mutable
59    /// [`ByteArray`][crate::ByteArray], use the [`ByteArray::free_to_bytes()`][crate::ByteArray::free_to_bytes()]
60    /// function.
61    #[doc(alias = "GBytes")]
62    pub struct Bytes(Shared<ffi::GBytes>);
63
64    match fn {
65        ref => |ptr| ffi::g_bytes_ref(ptr),
66        unref => |ptr| ffi::g_bytes_unref(ptr),
67        type_ => || ffi::g_bytes_get_type(),
68    }
69}
70
71impl Bytes {
72    // rustdoc-stripper-ignore-next
73    /// Copies `data` into a new shared slice.
74    // rustdoc-stripper-ignore-next-stop
75    /// Creates a new [`Bytes`][crate::Bytes] from @data.
76    ///
77    /// @data is copied. If @size is 0, @data may be `NULL`.
78    ///
79    /// As an optimization, `GLib::Bytes::new()` may avoid an extra allocation by
80    /// copying the data within the resulting bytes structure if sufficiently small
81    /// (since GLib 2.84).
82    /// ## `data`
83    ///
84    ///   the data to be used for the bytes
85    ///
86    /// # Returns
87    ///
88    /// a new [`Bytes`][crate::Bytes]
89    #[doc(alias = "g_bytes_new")]
90    #[inline]
91    fn new<T: AsRef<[u8]>>(data: T) -> Bytes {
92        let data = data.as_ref();
93        unsafe { from_glib_full(ffi::g_bytes_new(data.as_ptr() as *const _, data.len())) }
94    }
95
96    // rustdoc-stripper-ignore-next
97    /// Creates a view into static `data` without copying.
98    #[doc(alias = "g_bytes_new_static")]
99    #[inline]
100    pub fn from_static(data: &'static [u8]) -> Bytes {
101        unsafe {
102            from_glib_full(ffi::g_bytes_new_static(
103                data.as_ptr() as *const _,
104                data.len(),
105            ))
106        }
107    }
108
109    // rustdoc-stripper-ignore-next
110    /// Takes ownership of `data` and creates a new `Bytes` without copying.
111    #[doc(alias = "g_bytes_new")]
112    pub fn from_owned<T: AsRef<[u8]> + Send + 'static>(data: T) -> Bytes {
113        let data: Box<T> = Box::new(data);
114        let (size, data_ptr) = {
115            let data = (*data).as_ref();
116            (data.len(), data.as_ptr())
117        };
118
119        unsafe extern "C" fn drop_box<T: AsRef<[u8]> + Send + 'static>(b: ffi::gpointer) {
120            unsafe {
121                let _: Box<T> = Box::from_raw(b as *mut _);
122            }
123        }
124
125        unsafe {
126            from_glib_full(ffi::g_bytes_new_with_free_func(
127                data_ptr as *const _,
128                size,
129                Some(drop_box::<T>),
130                Box::into_raw(data) as *mut _,
131            ))
132        }
133    }
134
135    // rustdoc-stripper-ignore-next
136    /// Returns the underlying data of the `Bytes`.
137    ///
138    /// If there is no other reference to `self` then this does not copy the data, otherwise
139    /// it is copied into newly allocated heap memory.
140    #[doc(alias = "g_bytes_unref_to_data")]
141    pub fn into_data(self) -> crate::collections::Slice<u8> {
142        unsafe {
143            let mut size = mem::MaybeUninit::uninit();
144            let ret = ffi::g_bytes_unref_to_data(self.into_glib_ptr(), size.as_mut_ptr());
145            crate::collections::Slice::from_glib_full_num(ret as *mut u8, size.assume_init())
146        }
147    }
148
149    fn calculate_offset_size(&self, range: impl RangeBounds<usize>) -> (usize, usize) {
150        let len = self.len();
151
152        let start_offset = match range.start_bound() {
153            Bound::Included(v) => *v,
154            Bound::Excluded(v) => v.checked_add(1).expect("Invalid start offset"),
155            Bound::Unbounded => 0,
156        };
157        assert!(start_offset <= len, "Start offset after valid range");
158
159        let end_offset = match range.end_bound() {
160            Bound::Included(v) => v.checked_add(1).expect("Invalid end offset"),
161            Bound::Excluded(v) => *v,
162            Bound::Unbounded => len,
163        };
164        assert!(end_offset <= len, "End offset after valid range");
165
166        let size = end_offset.saturating_sub(start_offset);
167
168        (start_offset, size)
169    }
170
171    // rustdoc-stripper-ignore-next
172    /// Creates a new `Bytes` that references the given `range` of `bytes`.
173    // rustdoc-stripper-ignore-next-stop
174    /// Creates a [`Bytes`][crate::Bytes] which is a subsection of another `GBytes`.
175    ///
176    /// The @offset + @length may not be longer than the size of @bytes.
177    ///
178    /// A reference to @bytes will be held by the newly created `GBytes` until
179    /// the byte data is no longer needed.
180    ///
181    /// Since 2.56, if @offset is 0 and @length matches the size of @bytes, then
182    /// @bytes will be returned with the reference count incremented by 1. If @bytes
183    /// is a slice of another `GBytes`, then the resulting `GBytes` will reference
184    /// the same `GBytes` instead of @bytes. This allows consumers to simplify the
185    /// usage of `GBytes` when asynchronously writing to streams.
186    /// ## `bytes`
187    /// a [`Bytes`][crate::Bytes]
188    /// ## `offset`
189    /// offset which subsection starts at
190    /// ## `length`
191    /// length of subsection
192    ///
193    /// # Returns
194    ///
195    /// a new [`Bytes`][crate::Bytes]
196    #[doc(alias = "g_bytes_new_from_bytes")]
197    pub fn from_bytes(bytes: &Self, range: impl RangeBounds<usize>) -> Self {
198        let (offset, size) = bytes.calculate_offset_size(range);
199        unsafe {
200            from_glib_full(ffi::g_bytes_new_from_bytes(
201                bytes.to_glib_none().0,
202                offset,
203                size,
204            ))
205        }
206    }
207}
208
209unsafe impl Send for Bytes {}
210unsafe impl Sync for Bytes {}
211
212impl<'a, T: ?Sized + Borrow<[u8]> + 'a> From<&'a T> for Bytes {
213    #[inline]
214    fn from(value: &'a T) -> Bytes {
215        Bytes::new(value.borrow())
216    }
217}
218
219impl fmt::Debug for Bytes {
220    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
221        f.debug_struct("Bytes")
222            .field("ptr", &ToGlibPtr::<*const _>::to_glib_none(self).0)
223            .field("data", &&self[..])
224            .finish()
225    }
226}
227
228impl AsRef<[u8]> for Bytes {
229    #[inline]
230    fn as_ref(&self) -> &[u8] {
231        self
232    }
233}
234
235impl Deref for Bytes {
236    type Target = [u8];
237
238    #[inline]
239    fn deref(&self) -> &[u8] {
240        unsafe {
241            let mut len = 0;
242            let ptr = ffi::g_bytes_get_data(self.to_glib_none().0, &mut len);
243            if ptr.is_null() || len == 0 {
244                &[]
245            } else {
246                slice::from_raw_parts(ptr as *const u8, len)
247            }
248        }
249    }
250}
251
252impl PartialEq for Bytes {
253    #[doc(alias = "g_bytes_equal")]
254    #[inline]
255    fn eq(&self, other: &Self) -> bool {
256        unsafe {
257            from_glib(ffi::g_bytes_equal(
258                ToGlibPtr::<*const _>::to_glib_none(self).0 as *const _,
259                ToGlibPtr::<*const _>::to_glib_none(other).0 as *const _,
260            ))
261        }
262    }
263}
264
265impl Eq for Bytes {}
266
267impl PartialOrd for Bytes {
268    #[inline]
269    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
270        Some(self.cmp(other))
271    }
272}
273
274impl Ord for Bytes {
275    #[inline]
276    fn cmp(&self, other: &Self) -> Ordering {
277        unsafe {
278            let ret = ffi::g_bytes_compare(
279                ToGlibPtr::<*const _>::to_glib_none(self).0 as *const _,
280                ToGlibPtr::<*const _>::to_glib_none(other).0 as *const _,
281            );
282            ret.cmp(&0)
283        }
284    }
285}
286
287macro_rules! impl_cmp {
288    ($lhs:ty, $rhs: ty) => {
289        #[allow(clippy::redundant_slicing)]
290        #[allow(clippy::extra_unused_lifetimes)]
291        impl<'a, 'b> PartialEq<$rhs> for $lhs {
292            #[inline]
293            fn eq(&self, other: &$rhs) -> bool {
294                self[..].eq(&other[..])
295            }
296        }
297
298        #[allow(clippy::redundant_slicing)]
299        #[allow(clippy::extra_unused_lifetimes)]
300        impl<'a, 'b> PartialEq<$lhs> for $rhs {
301            #[inline]
302            fn eq(&self, other: &$lhs) -> bool {
303                self[..].eq(&other[..])
304            }
305        }
306
307        #[allow(clippy::redundant_slicing)]
308        #[allow(clippy::extra_unused_lifetimes)]
309        impl<'a, 'b> PartialOrd<$rhs> for $lhs {
310            #[inline]
311            fn partial_cmp(&self, other: &$rhs) -> Option<Ordering> {
312                self[..].partial_cmp(&other[..])
313            }
314        }
315
316        #[allow(clippy::redundant_slicing)]
317        #[allow(clippy::extra_unused_lifetimes)]
318        impl<'a, 'b> PartialOrd<$lhs> for $rhs {
319            #[inline]
320            fn partial_cmp(&self, other: &$lhs) -> Option<Ordering> {
321                self[..].partial_cmp(&other[..])
322            }
323        }
324    };
325}
326
327impl_cmp!(Bytes, [u8]);
328impl_cmp!(Bytes, &'a [u8]);
329impl_cmp!(&'a Bytes, [u8]);
330impl_cmp!(Bytes, Vec<u8>);
331impl_cmp!(&'a Bytes, Vec<u8>);
332
333impl Hash for Bytes {
334    #[inline]
335    fn hash<H: Hasher>(&self, state: &mut H) {
336        self.len().hash(state);
337        Hash::hash_slice(self, state)
338    }
339}
340
341#[cfg(test)]
342mod tests {
343    use std::collections::HashSet;
344
345    use super::*;
346
347    #[test]
348    fn eq() {
349        let abc: &[u8] = b"abc";
350        let def: &[u8] = b"def";
351        let a1 = Bytes::from(abc);
352        let a2 = Bytes::from(abc);
353        let d = Bytes::from(def);
354        assert_eq!(a1, a2);
355        assert_eq!(def, d);
356        assert_ne!(a1, d);
357        assert_ne!(a1, def);
358    }
359
360    #[test]
361    fn ord() {
362        let abc: &[u8] = b"abc";
363        let def: &[u8] = b"def";
364        let a = Bytes::from(abc);
365        let d = Bytes::from(def);
366        assert!(a < d);
367        assert!(a < def);
368        assert!(abc < d);
369        assert!(d > a);
370        assert!(d > abc);
371        assert!(def > a);
372    }
373
374    #[test]
375    fn hash() {
376        let b1 = Bytes::from(b"this is a test");
377        let b2 = Bytes::from(b"this is a test");
378        let b3 = Bytes::from(b"test");
379        let mut set = HashSet::new();
380        set.insert(b1);
381        assert!(set.contains(&b2));
382        assert!(!set.contains(&b3));
383    }
384
385    #[test]
386    fn from_static() {
387        let b1 = Bytes::from_static(b"this is a test");
388        let b2 = Bytes::from(b"this is a test");
389        assert_eq!(b1, b2);
390    }
391
392    #[test]
393    fn from_owned() {
394        let b = Bytes::from_owned(vec![1, 2, 3]);
395        assert_eq!(b, [1u8, 2u8, 3u8].as_ref());
396    }
397
398    #[test]
399    fn from_bytes() {
400        let b1 = Bytes::from_owned(vec![1, 2, 3]);
401        let b2 = Bytes::from_bytes(&b1, 1..=1);
402        assert_eq!(b2, [2u8].as_ref());
403        let b2 = Bytes::from_bytes(&b1, 1..);
404        assert_eq!(b2, [2u8, 3u8].as_ref());
405        let b2 = Bytes::from_bytes(&b1, ..2);
406        assert_eq!(b2, [1u8, 2u8].as_ref());
407        let b2 = Bytes::from_bytes(&b1, ..);
408        assert_eq!(b2, [1u8, 2u8, 3u8].as_ref());
409        let b2 = Bytes::from_bytes(&b1, 3..);
410        assert_eq!(b2, [].as_ref());
411    }
412
413    #[test]
414    pub fn into_data() {
415        let b = Bytes::from(b"this is a test");
416        let d = b.into_data();
417        assert_eq!(d.as_slice(), b"this is a test");
418    }
419}