Skip to content

Commit

Permalink
support 16-bit platforms (except zerocopy doesn't support them)
Browse files Browse the repository at this point in the history
  • Loading branch information
kadiwa4 committed Mar 5, 2024
1 parent 944536f commit bc10ab1
Show file tree
Hide file tree
Showing 5 changed files with 90 additions and 84 deletions.
3 changes: 2 additions & 1 deletion src/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ impl<'a> DevicetreeBuilder<'a> {
/// Constructs the devicetree.
///
/// # Errors
/// Throws [`Error::DevicetreeTooLarge`] if it is too large.
/// Throws [`Error::DevicetreeTooLarge`] or runs out of memory if it is too
/// large.
pub fn build(&self) -> Result<Box<Devicetree>> {
let struct_offset = blob::Header::SIZE
+ size_of_val(self.mem_reserve_entries)
Expand Down
51 changes: 32 additions & 19 deletions src/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,11 @@ use crate::{
BlobError, DeserializeNode, DeserializeProperty, MemReserveEntries, NodeContext, Path,
};

// on 16-bit platforms, the maximum valid devicetree size is i16::MAX
#[cfg(target_pointer_width = "16")]
type DtUint = u16;
#[cfg(not(target_pointer_width = "16"))]
type DtUint = u32;
type Result<T, E = BlobError> = core::result::Result<T, E>;

pub(crate) const DTB_OPTIMAL_ALIGN: usize = 8;
Expand Down Expand Up @@ -80,10 +85,10 @@ impl Devicetree {
let blob = slice::from_raw_parts(ptr, Header::SIZE / DTB_OPTIMAL_ALIGN);
Self::check_magic(blob)?;
Self::totalsize(blob)
}? as usize;
}?;

if usize::overflowing_add(ptr as usize, size).1 {
// the buffer wraps around
if size > isize::MAX as usize || usize::overflowing_add(ptr as usize, size).1 {
// the buffer occupies more than half of the address space or wraps around
return Err(BlobError::InvalidTotalsize);
}

Expand Down Expand Up @@ -166,7 +171,7 @@ impl Devicetree {
let size = unsafe {
Self::check_magic(blob)?;
Self::totalsize(blob)
}? as usize;
}?;
if size_of_val(blob) < size {
return Err(BlobError::InvalidTotalsize);
}
Expand All @@ -192,13 +197,13 @@ impl Devicetree {
/// # Safety
/// `size_of_val(blob) >= Header::SIZE`
#[deny(unsafe_op_in_unsafe_fn)]
unsafe fn totalsize(blob: &[u64]) -> Result<u32> {
unsafe fn totalsize(blob: &[u64]) -> Result<usize> {
let header = blob as *const _ as *const Header;
let size = u32::from_be(unsafe { (*header).totalsize });
if size < Header::SIZE as u32 {
return Err(BlobError::InvalidTotalsize);
}
Ok(size)
usize::try_from(size)
.ok()
.filter(|&s| s >= Header::SIZE)
.ok_or(BlobError::InvalidTotalsize)
}

fn late_checks(&self) -> Result<()> {
Expand All @@ -207,21 +212,28 @@ impl Devicetree {
return Err(BlobError::IncompatibleVersion);
}

let offset = u32::from_be(header.off_dt_struct) as usize;
let len = u32::from_be(header.size_dt_struct) as usize;
let exact_size = u32::from_be(header.totalsize) as usize;
if offset % STRUCT_BLOCK_OPTIMAL_ALIGN != 0 || len % STRUCT_BLOCK_OPTIMAL_ALIGN != 0 {
let (offset, size) = Option::zip(
usize::try_from(u32::from_be(header.off_dt_struct)).ok(),
usize::try_from(u32::from_be(header.size_dt_struct)).ok(),
)
.filter(|&(o, s)| usize::checked_add(o, s).is_some_and(|e| e <= exact_size))
.ok_or(BlobError::BlockOutOfBounds)?;

if offset % STRUCT_BLOCK_OPTIMAL_ALIGN != 0 || size % STRUCT_BLOCK_OPTIMAL_ALIGN != 0 {
return Err(BlobError::UnalignedBlock);
}
if offset + len > exact_size {
return Err(BlobError::BlockOutOfBounds);
}

let offset = u32::from_be(header.off_dt_strings) as usize;
let len = u32::from_be(header.size_dt_strings) as usize;
if offset + len > exact_size {
if !Option::zip(
usize::try_from(u32::from_be(header.off_dt_strings)).ok(),
usize::try_from(u32::from_be(header.size_dt_strings)).ok(),
)
.and_then(|(o, s)| usize::checked_add(o, s))
.is_some_and(|e| e <= exact_size)
{
return Err(BlobError::BlockOutOfBounds);
}

Ok(())
}

Expand Down Expand Up @@ -345,7 +357,8 @@ impl Devicetree {

/// Iterates over the memory reservation block.
pub fn mem_reserve_entries(&self) -> Result<MemReserveEntries<'_>> {
let offset = u32::from_be(self.header().off_mem_rsvmap) as usize;
let offset = usize::try_from(u32::from_be(self.header().off_mem_rsvmap))
.map_err(|_| BlobError::BlockOutOfBounds)?;
if offset % MEM_RESERVE_BLOCK_OPTIMAL_ALIGN != 0 {
return Err(BlobError::UnalignedBlock);
}
Expand Down
54 changes: 17 additions & 37 deletions src/blob/node.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,11 @@
//! Nodes from the blob’s struct block.
use core::{
fmt::{self, Display, Formatter, Write},
hash::{Hash, Hasher},
};
use core::fmt::{self, Display, Formatter, Write};

use fallible_iterator::FallibleIterator;

use crate::{
blob::{self, Cursor, Devicetree, Item, Property, Token, TOKEN_SIZE},
BlobError, Error, NodeContext, PushDeserializedNode, Result,
};
use super::{Cursor, Devicetree, DtUint, Item, Property, Token, TOKEN_SIZE};
use crate::{blob, BlobError, Error, NodeContext, PushDeserializedNode, Result};

/// A node of the tree structure in a [`Devicetree`] blob's struct block.
/// It contains [`Property`]s and child nodes.
Expand Down Expand Up @@ -58,7 +53,7 @@ impl<'dtb> Node<'dtb> {
pub fn start_cursor(&self) -> Cursor {
Cursor {
depth: self.contents.depth - 1,
offset: ((self.contents.offset - self.name.len() as u32 - 1)
offset: ((self.contents.offset - self.name.len() as DtUint - 1)
& TOKEN_SIZE.wrapping_neg())
- TOKEN_SIZE,
}
Expand Down Expand Up @@ -156,7 +151,7 @@ impl<'dtb> Node<'dtb> {

impl<'dtb> Display for Node<'dtb> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
fn write_indent(f: &mut Formatter<'_>, depth: u32) -> fmt::Result {
fn write_indent(f: &mut Formatter<'_>, depth: DtUint) -> fmt::Result {
for _ in 0..depth {
f.write_char('\t')?;
}
Expand Down Expand Up @@ -218,7 +213,7 @@ impl<'dtb> Display for Node<'dtb> {
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Items<'dtb> {
dt: &'dtb Devicetree,
at_depth: u32,
at_depth: DtUint,
pub(crate) cursor: Cursor,
}

Expand Down Expand Up @@ -412,9 +407,8 @@ pub struct NamedRange<'dtb>(Option<(&'dtb str, BaseRange)>);
impl PartialEq for NamedRange<'_> {
fn eq(&self, other: &Self) -> bool {
if let (Self(Some((name0, base0))), Self(Some((name1, base1)))) = (*self, *other) {
let ret = base0.first_offset == base1.first_offset && base0.len == base1.len;
let ret = base0.first == base1.first && base0.len == base1.len;
if ret {
debug_assert_eq!(base0.depth, base1.depth);
debug_assert_eq!(name0, name1);
}
ret
Expand All @@ -433,7 +427,7 @@ impl<'dtb> PushDeserializedNode<'dtb> for NamedRange<'dtb> {
return Ok(());
};
let cursor = node.start_cursor();
debug_assert_eq!(cursor.depth, base.depth);
debug_assert_eq!(cursor.depth, base.first.depth);
base.len += 1;
Ok(())
}
Expand All @@ -449,8 +443,7 @@ impl<'dtb> NamedRange<'dtb> {
Ok(Self(Some((
node.split_name()?.0,
BaseRange {
depth: cursor.depth,
first_offset: cursor.offset,
first: cursor,
len: 1,
},
))))
Expand Down Expand Up @@ -481,36 +474,23 @@ impl<'dtb> NamedRange<'dtb> {
}
}

#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct BaseRange {
depth: u32,
first_offset: u32,
len: u32,
first: Cursor,
len: DtUint,
}

impl Hash for BaseRange {
fn hash<H: Hasher>(&self, state: &mut H) {
self.first_offset.hash(state);
self.len.hash(state);
}
}
impl BaseRange {
#[inline]
fn first(self) -> Cursor {
Cursor {
depth: self.depth,
offset: self.first_offset,
}
self.first
}

fn to_children(self, dt: &Devicetree) -> Children<'_> {
Children(Items {
dt,
at_depth: self.depth,
cursor: Cursor {
depth: self.depth,
offset: self.first_offset,
},
at_depth: self.first.depth,
cursor: self.first,
})
}
}
Expand All @@ -527,7 +507,7 @@ impl<'dtb> NamedRangeIter<'dtb> {

#[inline]
pub fn remaining_len(&self) -> u32 {
self.0.as_ref().map_or(0, |i| i.len)
self.0.as_ref().map_or(0, |i| DtUint::into(i.len))
}
}

Expand Down Expand Up @@ -560,5 +540,5 @@ impl<'dtb> FallibleIterator for NamedRangeIter<'dtb> {
struct NamedRangeIterInner<'dtb> {
children: Children<'dtb>,
filter_name: &'dtb str,
len: u32,
len: DtUint,
}
48 changes: 30 additions & 18 deletions src/blob/token.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
use core::{cmp::Ordering, mem::size_of};
use core::{
cmp::Ordering,
hash::{Hash, Hasher},
mem::size_of,
};

use zerocopy::{FromBytes, FromZeroes};

use crate::{
blob::{BlobError, Devicetree, Item, Node, Property, Result},
util,
};
use super::{BlobError, Devicetree, DtUint, Item, Node, Property, Result};
use crate::util;

pub(super) const TOKEN_SIZE: u32 = 4;
pub(super) const TOKEN_SIZE: DtUint = 4;

/// A parsed token from the [`Devicetree`] blob's struct block.
///
Expand Down Expand Up @@ -41,8 +43,8 @@ impl<'dtb> Token<'dtb> {
/// Do not compare cursors from different devicetrees.
#[derive(Clone, Copy, Debug, Eq)]
pub struct Cursor {
pub(super) depth: u32,
pub(super) offset: u32,
pub(super) depth: DtUint,
pub(super) offset: DtUint,
}

impl PartialOrd for Cursor {
Expand Down Expand Up @@ -70,9 +72,18 @@ impl PartialEq for Cursor {
}
}

impl Hash for Cursor {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.offset.hash(state);
}
}

impl Cursor {
fn increase_offset(&mut self, add: u32, blob: &[u8]) -> Result<()> {
let offset = u32::checked_add(self.offset, add)
/// Increases the offset of the cursor to the value of `add` but rounded up
/// to the next multiple of `TOKEN_SIZE`.
fn increase_offset(&mut self, add: DtUint, blob: &[u8]) -> Result<()> {
let offset = DtUint::checked_add(self.offset, add)
.and_then(|o| o.checked_next_multiple_of(TOKEN_SIZE))
.ok_or(BlobError::UnexpectedEnd)?;

Expand All @@ -98,7 +109,7 @@ impl Devicetree {
// bounds check was done in Self::late_checks
let mut cursor = Cursor {
depth: 0,
offset: u32::from_be(self.header().off_dt_struct),
offset: u32::from_be(self.header().off_dt_struct) as DtUint,
};
match self.next_token(&mut cursor)? {
Some(Token::BeginNode(node)) if node.name.is_empty() => Ok(node),
Expand All @@ -125,7 +136,7 @@ impl Devicetree {
let name = &blob[offset..];
let name = util::get_c_str(name)?;

cursor.increase_offset(name.len() as u32 + 1, blob)?;
cursor.increase_offset(name.len() as DtUint + 1, blob)?;
cursor.depth += 1;
Token::BeginNode(Node {
dt: self,
Expand All @@ -134,7 +145,7 @@ impl Devicetree {
})
}
RawToken::EndNode => {
let depth = u32::checked_sub(cursor.depth, 1)
let depth = DtUint::checked_sub(cursor.depth, 1)
.ok_or(BlobError::UnexpectedEndNodeToken)?;
cursor.depth = depth;

Expand All @@ -144,15 +155,16 @@ impl Devicetree {
let header = PropHeader::read_from_prefix(&blob[offset..])
.ok_or(BlobError::InvalidPropertyHeader)?;

let name_blob = self
.strings_blob()
.get(u32::from_be(header.nameoff) as usize..)
let name_blob = usize::try_from(u32::from_be(header.nameoff))
.ok()
.and_then(|offset| self.strings_blob().get(offset..))
.ok_or(BlobError::InvalidString)?;

cursor.offset += size_of::<PropHeader>() as u32;
cursor.offset += size_of::<PropHeader>() as DtUint;
let offset = cursor.offset as usize;

let len = u32::from_be(header.len);
let len = DtUint::try_from(u32::from_be(header.len))
.map_err(|_| BlobError::InvalidPropertyHeader)?;
let value = util::slice_get_with_len(blob, offset, len as usize)
.ok_or(BlobError::InvalidPropertyHeader)?;

Expand Down
Loading

0 comments on commit bc10ab1

Please sign in to comment.