glib/
bytes.rs

1// Take a look at the license at the top of the repository in the LICENSE file.
2
3use std::{
4    borrow::Borrow,
5    cmp::Ordering,
6    fmt,
7    hash::{Hash, Hasher},
8    mem,
9    ops::{Bound, Deref, RangeBounds},
10    slice,
11};
12
13use crate::{ffi, translate::*};
14
15wrapper! {
16    // rustdoc-stripper-ignore-next
17    /// A shared immutable byte slice (the equivalent of `Rc<[u8]>`).
18    ///
19    /// `From` implementations that take references (e.g. `&[u8]`) copy the
20    /// data. The `from_static` constructor avoids copying static data.
21    ///
22    /// ```
23    /// use glib::Bytes;
24    ///
25    /// let v = vec![1, 2, 3];
26    /// let b = Bytes::from(&v);
27    /// assert_eq!(v, b);
28    ///
29    /// let s = b"xyz";
30    /// let b = Bytes::from_static(s);
31    /// assert_eq!(&s[..], b);
32    /// ```
33    // rustdoc-stripper-ignore-next-stop
34    /// A simple reference counted data type representing an immutable sequence of
35    /// zero or more bytes from an unspecified origin.
36    ///
37    /// The purpose of a `GBytes` is to keep the memory region that it holds
38    /// alive for as long as anyone holds a reference to the bytes.  When
39    /// the last reference count is dropped, the memory is released. Multiple
40    /// unrelated callers can use byte data in the `GBytes` without coordinating
41    /// their activities, resting assured that the byte data will not change or
42    /// move while they hold a reference.
43    ///
44    /// A `GBytes` can come from many different origins that may have
45    /// different procedures for freeing the memory region.  Examples are
46    /// memory from `malloc()`, from memory slices, from a
47    /// `GLib::MappedFile` or memory from other allocators.
48    ///
49    /// `GBytes` work well as keys in `GLib::HashTable`. Use
50    /// `GLib::Bytes::equal()` and `GLib::Bytes::hash()` as parameters to
51    /// `GLib::HashTable::new()` or `GLib::HashTable::new_full()`.
52    /// `GBytes` can also be used as keys in a `GLib::Tree` by passing the
53    /// `GLib::Bytes::compare()` function to `GLib::Tree::new()`.
54    ///
55    /// The data pointed to by this bytes must not be modified. For a mutable
56    /// array of bytes see [`ByteArray`][crate::ByteArray]. Use
57    /// [`unref_to_array()`][Self::unref_to_array()] to create a mutable array for a `GBytes`
58    /// sequence. To create an immutable `GBytes` from a mutable
59    /// [`ByteArray`][crate::ByteArray], use the [`ByteArray::free_to_bytes()`][crate::ByteArray::free_to_bytes()]
60    /// function.
61    // rustdoc-stripper-ignore-next-stop
62    /// A simple reference counted data type representing an immutable sequence of
63    /// zero or more bytes from an unspecified origin.
64    ///
65    /// The purpose of a `GBytes` is to keep the memory region that it holds
66    /// alive for as long as anyone holds a reference to the bytes.  When
67    /// the last reference count is dropped, the memory is released. Multiple
68    /// unrelated callers can use byte data in the `GBytes` without coordinating
69    /// their activities, resting assured that the byte data will not change or
70    /// move while they hold a reference.
71    ///
72    /// A `GBytes` can come from many different origins that may have
73    /// different procedures for freeing the memory region.  Examples are
74    /// memory from `malloc()`, from memory slices, from a
75    /// `GLib::MappedFile` or memory from other allocators.
76    ///
77    /// `GBytes` work well as keys in `GLib::HashTable`. Use
78    /// `GLib::Bytes::equal()` and `GLib::Bytes::hash()` as parameters to
79    /// `GLib::HashTable::new()` or `GLib::HashTable::new_full()`.
80    /// `GBytes` can also be used as keys in a `GLib::Tree` by passing the
81    /// `GLib::Bytes::compare()` function to `GLib::Tree::new()`.
82    ///
83    /// The data pointed to by this bytes must not be modified. For a mutable
84    /// array of bytes see [`ByteArray`][crate::ByteArray]. Use
85    /// [`unref_to_array()`][Self::unref_to_array()] to create a mutable array for a `GBytes`
86    /// sequence. To create an immutable `GBytes` from a mutable
87    /// [`ByteArray`][crate::ByteArray], use the [`ByteArray::free_to_bytes()`][crate::ByteArray::free_to_bytes()]
88    /// function.
89    #[doc(alias = "GBytes")]
90    pub struct Bytes(Shared<ffi::GBytes>);
91
92    match fn {
93        ref => |ptr| ffi::g_bytes_ref(ptr),
94        unref => |ptr| ffi::g_bytes_unref(ptr),
95        type_ => || ffi::g_bytes_get_type(),
96    }
97}
98
99impl Bytes {
100    // rustdoc-stripper-ignore-next
101    /// Copies `data` into a new shared slice.
102    // rustdoc-stripper-ignore-next-stop
103    /// Creates a new [`Bytes`][crate::Bytes] from @data.
104    ///
105    /// @data is copied. If @size is 0, @data may be `NULL`.
106    ///
107    /// As an optimization, `GLib::Bytes::new()` may avoid an extra allocation by
108    /// copying the data within the resulting bytes structure if sufficiently small
109    /// (since GLib 2.84).
110    /// ## `data`
111    ///
112    ///   the data to be used for the bytes
113    ///
114    /// # Returns
115    ///
116    /// a new [`Bytes`][crate::Bytes]
117    // rustdoc-stripper-ignore-next-stop
118    /// Creates a new [`Bytes`][crate::Bytes] from @data.
119    ///
120    /// @data is copied. If @size is 0, @data may be `NULL`.
121    ///
122    /// As an optimization, `GLib::Bytes::new()` may avoid an extra allocation by
123    /// copying the data within the resulting bytes structure if sufficiently small
124    /// (since GLib 2.84).
125    /// ## `data`
126    ///
127    ///   the data to be used for the bytes
128    ///
129    /// # Returns
130    ///
131    /// a new [`Bytes`][crate::Bytes]
132    #[doc(alias = "g_bytes_new")]
133    #[inline]
134    fn new<T: AsRef<[u8]>>(data: T) -> Bytes {
135        let data = data.as_ref();
136        unsafe { from_glib_full(ffi::g_bytes_new(data.as_ptr() as *const _, data.len())) }
137    }
138
139    // rustdoc-stripper-ignore-next
140    /// Creates a view into static `data` without copying.
141    #[doc(alias = "g_bytes_new_static")]
142    #[inline]
143    pub fn from_static(data: &'static [u8]) -> Bytes {
144        unsafe {
145            from_glib_full(ffi::g_bytes_new_static(
146                data.as_ptr() as *const _,
147                data.len(),
148            ))
149        }
150    }
151
152    // rustdoc-stripper-ignore-next
153    /// Takes ownership of `data` and creates a new `Bytes` without copying.
154    #[doc(alias = "g_bytes_new")]
155    pub fn from_owned<T: AsRef<[u8]> + Send + 'static>(data: T) -> Bytes {
156        let data: Box<T> = Box::new(data);
157        let (size, data_ptr) = {
158            let data = (*data).as_ref();
159            (data.len(), data.as_ptr())
160        };
161
162        unsafe extern "C" fn drop_box<T: AsRef<[u8]> + Send + 'static>(b: ffi::gpointer) {
163            let _: Box<T> = Box::from_raw(b as *mut _);
164        }
165
166        unsafe {
167            from_glib_full(ffi::g_bytes_new_with_free_func(
168                data_ptr as *const _,
169                size,
170                Some(drop_box::<T>),
171                Box::into_raw(data) as *mut _,
172            ))
173        }
174    }
175
176    // rustdoc-stripper-ignore-next
177    /// Returns the underlying data of the `Bytes`.
178    ///
179    /// If there is no other reference to `self` then this does not copy the data, otherwise
180    /// it is copied into newly allocated heap memory.
181    #[doc(alias = "g_bytes_unref_to_data")]
182    pub fn into_data(self) -> crate::collections::Slice<u8> {
183        unsafe {
184            let mut size = mem::MaybeUninit::uninit();
185            let ret = ffi::g_bytes_unref_to_data(self.into_glib_ptr(), size.as_mut_ptr());
186            crate::collections::Slice::from_glib_full_num(ret as *mut u8, size.assume_init())
187        }
188    }
189
190    fn calculate_offset_size(&self, range: impl RangeBounds<usize>) -> (usize, usize) {
191        let len = self.len();
192
193        let start_offset = match range.start_bound() {
194            Bound::Included(v) => *v,
195            Bound::Excluded(v) => v.checked_add(1).expect("Invalid start offset"),
196            Bound::Unbounded => 0,
197        };
198        assert!(start_offset <= len, "Start offset after valid range");
199
200        let end_offset = match range.end_bound() {
201            Bound::Included(v) => v.checked_add(1).expect("Invalid end offset"),
202            Bound::Excluded(v) => *v,
203            Bound::Unbounded => len,
204        };
205        assert!(end_offset <= len, "End offset after valid range");
206
207        let size = end_offset.saturating_sub(start_offset);
208
209        (start_offset, size)
210    }
211
212    // rustdoc-stripper-ignore-next
213    /// Creates a new `Bytes` that references the given `range` of `bytes`.
214    // rustdoc-stripper-ignore-next-stop
215    /// Creates a [`Bytes`][crate::Bytes] which is a subsection of another `GBytes`.
216    ///
217    /// The @offset + @length may not be longer than the size of @bytes.
218    ///
219    /// A reference to @bytes will be held by the newly created `GBytes` until
220    /// the byte data is no longer needed.
221    ///
222    /// Since 2.56, if @offset is 0 and @length matches the size of @bytes, then
223    /// @bytes will be returned with the reference count incremented by 1. If @bytes
224    /// is a slice of another `GBytes`, then the resulting `GBytes` will reference
225    /// the same `GBytes` instead of @bytes. This allows consumers to simplify the
226    /// usage of `GBytes` when asynchronously writing to streams.
227    /// ## `bytes`
228    /// a [`Bytes`][crate::Bytes]
229    /// ## `offset`
230    /// offset which subsection starts at
231    /// ## `length`
232    /// length of subsection
233    ///
234    /// # Returns
235    ///
236    /// a new [`Bytes`][crate::Bytes]
237    // rustdoc-stripper-ignore-next-stop
238    /// Creates a [`Bytes`][crate::Bytes] which is a subsection of another `GBytes`.
239    ///
240    /// The @offset + @length may not be longer than the size of @bytes.
241    ///
242    /// A reference to @bytes will be held by the newly created `GBytes` until
243    /// the byte data is no longer needed.
244    ///
245    /// Since 2.56, if @offset is 0 and @length matches the size of @bytes, then
246    /// @bytes will be returned with the reference count incremented by 1. If @bytes
247    /// is a slice of another `GBytes`, then the resulting `GBytes` will reference
248    /// the same `GBytes` instead of @bytes. This allows consumers to simplify the
249    /// usage of `GBytes` when asynchronously writing to streams.
250    /// ## `bytes`
251    /// a [`Bytes`][crate::Bytes]
252    /// ## `offset`
253    /// offset which subsection starts at
254    /// ## `length`
255    /// length of subsection
256    ///
257    /// # Returns
258    ///
259    /// a new [`Bytes`][crate::Bytes]
260    #[doc(alias = "g_bytes_new_from_bytes")]
261    pub fn from_bytes(bytes: &Self, range: impl RangeBounds<usize>) -> Self {
262        let (offset, size) = bytes.calculate_offset_size(range);
263        unsafe {
264            from_glib_full(ffi::g_bytes_new_from_bytes(
265                bytes.to_glib_none().0,
266                offset,
267                size,
268            ))
269        }
270    }
271}
272
273unsafe impl Send for Bytes {}
274unsafe impl Sync for Bytes {}
275
276impl<'a, T: ?Sized + Borrow<[u8]> + 'a> From<&'a T> for Bytes {
277    #[inline]
278    fn from(value: &'a T) -> Bytes {
279        Bytes::new(value.borrow())
280    }
281}
282
283impl fmt::Debug for Bytes {
284    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
285        f.debug_struct("Bytes")
286            .field("ptr", &ToGlibPtr::<*const _>::to_glib_none(self).0)
287            .field("data", &&self[..])
288            .finish()
289    }
290}
291
292impl AsRef<[u8]> for Bytes {
293    #[inline]
294    fn as_ref(&self) -> &[u8] {
295        self
296    }
297}
298
299impl Deref for Bytes {
300    type Target = [u8];
301
302    #[inline]
303    fn deref(&self) -> &[u8] {
304        unsafe {
305            let mut len = 0;
306            let ptr = ffi::g_bytes_get_data(self.to_glib_none().0, &mut len);
307            if ptr.is_null() || len == 0 {
308                &[]
309            } else {
310                slice::from_raw_parts(ptr as *const u8, len)
311            }
312        }
313    }
314}
315
316impl PartialEq for Bytes {
317    #[doc(alias = "g_bytes_equal")]
318    #[inline]
319    fn eq(&self, other: &Self) -> bool {
320        unsafe {
321            from_glib(ffi::g_bytes_equal(
322                ToGlibPtr::<*const _>::to_glib_none(self).0 as *const _,
323                ToGlibPtr::<*const _>::to_glib_none(other).0 as *const _,
324            ))
325        }
326    }
327}
328
329impl Eq for Bytes {}
330
331impl PartialOrd for Bytes {
332    #[inline]
333    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
334        Some(self.cmp(other))
335    }
336}
337
338impl Ord for Bytes {
339    #[inline]
340    fn cmp(&self, other: &Self) -> Ordering {
341        unsafe {
342            let ret = ffi::g_bytes_compare(
343                ToGlibPtr::<*const _>::to_glib_none(self).0 as *const _,
344                ToGlibPtr::<*const _>::to_glib_none(other).0 as *const _,
345            );
346            ret.cmp(&0)
347        }
348    }
349}
350
351macro_rules! impl_cmp {
352    ($lhs:ty, $rhs: ty) => {
353        #[allow(clippy::redundant_slicing)]
354        #[allow(clippy::extra_unused_lifetimes)]
355        impl<'a, 'b> PartialEq<$rhs> for $lhs {
356            #[inline]
357            fn eq(&self, other: &$rhs) -> bool {
358                self[..].eq(&other[..])
359            }
360        }
361
362        #[allow(clippy::redundant_slicing)]
363        #[allow(clippy::extra_unused_lifetimes)]
364        impl<'a, 'b> PartialEq<$lhs> for $rhs {
365            #[inline]
366            fn eq(&self, other: &$lhs) -> bool {
367                self[..].eq(&other[..])
368            }
369        }
370
371        #[allow(clippy::redundant_slicing)]
372        #[allow(clippy::extra_unused_lifetimes)]
373        impl<'a, 'b> PartialOrd<$rhs> for $lhs {
374            #[inline]
375            fn partial_cmp(&self, other: &$rhs) -> Option<Ordering> {
376                self[..].partial_cmp(&other[..])
377            }
378        }
379
380        #[allow(clippy::redundant_slicing)]
381        #[allow(clippy::extra_unused_lifetimes)]
382        impl<'a, 'b> PartialOrd<$lhs> for $rhs {
383            #[inline]
384            fn partial_cmp(&self, other: &$lhs) -> Option<Ordering> {
385                self[..].partial_cmp(&other[..])
386            }
387        }
388    };
389}
390
391impl_cmp!(Bytes, [u8]);
392impl_cmp!(Bytes, &'a [u8]);
393impl_cmp!(&'a Bytes, [u8]);
394impl_cmp!(Bytes, Vec<u8>);
395impl_cmp!(&'a Bytes, Vec<u8>);
396
397impl Hash for Bytes {
398    #[inline]
399    fn hash<H: Hasher>(&self, state: &mut H) {
400        self.len().hash(state);
401        Hash::hash_slice(self, state)
402    }
403}
404
405#[cfg(test)]
406mod tests {
407    use std::collections::HashSet;
408
409    use super::*;
410
411    #[test]
412    fn eq() {
413        let abc: &[u8] = b"abc";
414        let def: &[u8] = b"def";
415        let a1 = Bytes::from(abc);
416        let a2 = Bytes::from(abc);
417        let d = Bytes::from(def);
418        assert_eq!(a1, a2);
419        assert_eq!(def, d);
420        assert_ne!(a1, d);
421        assert_ne!(a1, def);
422    }
423
424    #[test]
425    fn ord() {
426        let abc: &[u8] = b"abc";
427        let def: &[u8] = b"def";
428        let a = Bytes::from(abc);
429        let d = Bytes::from(def);
430        assert!(a < d);
431        assert!(a < def);
432        assert!(abc < d);
433        assert!(d > a);
434        assert!(d > abc);
435        assert!(def > a);
436    }
437
438    #[test]
439    fn hash() {
440        let b1 = Bytes::from(b"this is a test");
441        let b2 = Bytes::from(b"this is a test");
442        let b3 = Bytes::from(b"test");
443        let mut set = HashSet::new();
444        set.insert(b1);
445        assert!(set.contains(&b2));
446        assert!(!set.contains(&b3));
447    }
448
449    #[test]
450    fn from_static() {
451        let b1 = Bytes::from_static(b"this is a test");
452        let b2 = Bytes::from(b"this is a test");
453        assert_eq!(b1, b2);
454    }
455
456    #[test]
457    fn from_owned() {
458        let b = Bytes::from_owned(vec![1, 2, 3]);
459        assert_eq!(b, [1u8, 2u8, 3u8].as_ref());
460    }
461
462    #[test]
463    fn from_bytes() {
464        let b1 = Bytes::from_owned(vec![1, 2, 3]);
465        let b2 = Bytes::from_bytes(&b1, 1..=1);
466        assert_eq!(b2, [2u8].as_ref());
467        let b2 = Bytes::from_bytes(&b1, 1..);
468        assert_eq!(b2, [2u8, 3u8].as_ref());
469        let b2 = Bytes::from_bytes(&b1, ..2);
470        assert_eq!(b2, [1u8, 2u8].as_ref());
471        let b2 = Bytes::from_bytes(&b1, ..);
472        assert_eq!(b2, [1u8, 2u8, 3u8].as_ref());
473        let b2 = Bytes::from_bytes(&b1, 3..);
474        assert_eq!(b2, [].as_ref());
475    }
476
477    #[test]
478    pub fn into_data() {
479        let b = Bytes::from(b"this is a test");
480        let d = b.into_data();
481        assert_eq!(d.as_slice(), b"this is a test");
482    }
483}