Skip to content

Commit

Permalink
Use newer bitflags crate which supports bitflag consts. Closes #166.
Browse files Browse the repository at this point in the history
  • Loading branch information
kevinaboos committed Jul 26, 2019
1 parent 242e3dc commit eefab0c
Show file tree
Hide file tree
Showing 9 changed files with 79 additions and 93 deletions.
16 changes: 8 additions & 8 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion kernel/ata/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ version = "0.1.0"
build = "../../build.rs"

[dependencies]
bitflags = "1.0.4"
bitflags = "1.1.0"
spin = "0.4.10"

[dependencies.log]
Expand Down
2 changes: 1 addition & 1 deletion kernel/gdt/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ spin = "0.4.10"
# x86_64 = { git = "https://github.com/kevinaboos/x86_64" }
x86_64 = { path = "../../libs/x86_64" } # currently using our local copy, forked from Phil Opp's crate
bit_field = "0.7.0"
bitflags = "1.0.4"
bitflags = "1.1.0"

[dependencies.log]
default-features = false
Expand Down
86 changes: 42 additions & 44 deletions kernel/gdt/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#![no_std]
#![feature(const_fn)]

#[macro_use] extern crate lazy_static;
// #[macro_use] extern crate log;
Expand Down Expand Up @@ -210,60 +211,57 @@ pub enum Descriptor {
}

impl Descriptor {
pub fn kernel_code_segment() -> Descriptor {
let flags = DescriptorFlags::LONG_MODE |
DescriptorFlags::PRESENT |
DescriptorFlags::PRIVILEGE_RING0 |
DescriptorFlags::USER_SEGMENT |
DescriptorFlags::EXECUTABLE |
DescriptorFlags::READ_WRITE;
Descriptor::UserSegment(flags.bits())
pub const fn kernel_code_segment() -> Descriptor {
let flags = DescriptorFlags::LONG_MODE.bits() |
DescriptorFlags::PRESENT.bits() |
DescriptorFlags::PRIVILEGE_RING0.bits() |
DescriptorFlags::USER_SEGMENT.bits() |
DescriptorFlags::EXECUTABLE.bits() |
DescriptorFlags::READ_WRITE.bits();
Descriptor::UserSegment(flags)
}

pub fn kernel_data_segment() -> Descriptor {
let flags = DescriptorFlags::PRESENT |
DescriptorFlags::PRIVILEGE_RING0 |
DescriptorFlags::USER_SEGMENT |
DescriptorFlags::READ_WRITE;
// | DescriptorFlags::ACCESSED;
Descriptor::UserSegment(flags.bits())
pub const fn kernel_data_segment() -> Descriptor {
let flags = DescriptorFlags::PRESENT.bits() |
DescriptorFlags::PRIVILEGE_RING0.bits() |
DescriptorFlags::USER_SEGMENT.bits() |
DescriptorFlags::READ_WRITE.bits();
Descriptor::UserSegment(flags)
}

pub fn user_code_32_segment() -> Descriptor {
let flags = DescriptorFlags::SIZE |
DescriptorFlags::PRESENT |
DescriptorFlags::PRIVILEGE_RING3 |
DescriptorFlags::USER_SEGMENT |
DescriptorFlags::EXECUTABLE;
Descriptor::UserSegment(flags.bits())
pub const fn user_code_32_segment() -> Descriptor {
let flags = DescriptorFlags::SIZE.bits() |
DescriptorFlags::PRESENT.bits() |
DescriptorFlags::PRIVILEGE_RING3.bits() |
DescriptorFlags::USER_SEGMENT.bits() |
DescriptorFlags::EXECUTABLE.bits();
Descriptor::UserSegment(flags)
}

pub fn user_data_32_segment() -> Descriptor {
let flags = DescriptorFlags::SIZE |
DescriptorFlags::PRESENT |
DescriptorFlags::PRIVILEGE_RING3 |
DescriptorFlags::USER_SEGMENT |
DescriptorFlags::READ_WRITE;
// | DescriptorFlags::ACCESSED;
Descriptor::UserSegment(flags.bits())
pub const fn user_data_32_segment() -> Descriptor {
let flags = DescriptorFlags::SIZE.bits() |
DescriptorFlags::PRESENT.bits() |
DescriptorFlags::PRIVILEGE_RING3.bits() |
DescriptorFlags::USER_SEGMENT.bits() |
DescriptorFlags::READ_WRITE.bits();
Descriptor::UserSegment(flags)
}

pub fn user_code_64_segment() -> Descriptor {
let flags = DescriptorFlags::LONG_MODE |
DescriptorFlags::PRESENT |
DescriptorFlags::PRIVILEGE_RING3 |
DescriptorFlags::USER_SEGMENT |
DescriptorFlags::EXECUTABLE;
Descriptor::UserSegment(flags.bits())
pub const fn user_code_64_segment() -> Descriptor {
let flags = DescriptorFlags::LONG_MODE.bits() |
DescriptorFlags::PRESENT.bits() |
DescriptorFlags::PRIVILEGE_RING3.bits() |
DescriptorFlags::USER_SEGMENT.bits() |
DescriptorFlags::EXECUTABLE.bits();
Descriptor::UserSegment(flags)
}

pub fn user_data_64_segment() -> Descriptor {
let flags = DescriptorFlags::PRESENT |
DescriptorFlags::PRIVILEGE_RING3 |
DescriptorFlags::USER_SEGMENT |
DescriptorFlags::READ_WRITE;
// | DescriptorFlags::ACCESSED;
Descriptor::UserSegment(flags.bits())
pub const fn user_data_64_segment() -> Descriptor {
let flags = DescriptorFlags::PRESENT.bits() |
DescriptorFlags::PRIVILEGE_RING3.bits() |
DescriptorFlags::USER_SEGMENT.bits() |
DescriptorFlags::READ_WRITE.bits();
Descriptor::UserSegment(flags)
}


Expand Down
2 changes: 1 addition & 1 deletion kernel/memory/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ build = "../../build.rs"

[dependencies]
spin = "0.4.10"
bitflags = "1.0.4"
bitflags = "1.1.0"
multiboot2 = "0.7.1"
# x86_64 = { git = "https://github.com/kevinaboos/x86_64" }
x86_64 = { path = "../../libs/x86_64" } # currently using our local copy, forked from Phil Opp's crate
Expand Down
29 changes: 8 additions & 21 deletions kernel/mod_mgmt/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,22 +97,9 @@ pub fn get_namespaces_directory() -> Option<DirRef> {
}


/// This should be a const, but Rust doesn't like OR-ing bitflags as a const expression.
#[allow(non_snake_case)]
pub fn TEXT_SECTION_FLAGS() -> EntryFlags {
EntryFlags::PRESENT
}
/// This should be a const, but Rust doesn't like OR-ing bitflags as a const expression.
#[allow(non_snake_case)]
pub fn RODATA_SECTION_FLAGS() -> EntryFlags {
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE
}
/// This should be a const, but Rust doesn't like OR-ing bitflags as a const expression.
#[allow(non_snake_case)]
pub fn DATA_BSS_SECTION_FLAGS() -> EntryFlags {
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE
}

const TEXT_SECTION_FLAGS: EntryFlags = EntryFlags::PRESENT;
const RODATA_SECTION_FLAGS: EntryFlags = EntryFlags::from_bits_truncate(EntryFlags::PRESENT.bits() | EntryFlags::NO_EXECUTE.bits());
const DATA_BSS_SECTION_FLAGS: EntryFlags = EntryFlags::from_bits_truncate(EntryFlags::PRESENT.bits() | EntryFlags::NO_EXECUTE.bits() | EntryFlags::WRITABLE.bits());


/// Initializes the module management system based on the bootloader-provided modules, and
Expand Down Expand Up @@ -1943,10 +1930,10 @@ impl CrateNamespace {
// Finally, remap each section's mapped pages to the proper permission bits,
// since we initially mapped them all as writable
if let Some(ref tp) = new_crate.text_pages {
tp.lock().remap(&mut kernel_mmi_ref.lock().page_table, TEXT_SECTION_FLAGS())?;
tp.lock().remap(&mut kernel_mmi_ref.lock().page_table, TEXT_SECTION_FLAGS)?;
}
if let Some(ref rp) = new_crate.rodata_pages {
rp.lock().remap(&mut kernel_mmi_ref.lock().page_table, RODATA_SECTION_FLAGS())?;
rp.lock().remap(&mut kernel_mmi_ref.lock().page_table, RODATA_SECTION_FLAGS)?;
}
// data/bss sections are already mapped properly, since they're supposed to be writable

Expand Down Expand Up @@ -2403,9 +2390,9 @@ fn allocate_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef)

// we must allocate these pages separately because they will have different flags later
(
if text_bytecount > 0 { allocate_pages_closure(text_bytecount, TEXT_SECTION_FLAGS()).ok() } else { None },
if rodata_bytecount > 0 { allocate_pages_closure(rodata_bytecount, RODATA_SECTION_FLAGS()).ok() } else { None },
if data_bytecount > 0 { allocate_pages_closure(data_bytecount, DATA_BSS_SECTION_FLAGS()).ok() } else { None }
if text_bytecount > 0 { allocate_pages_closure(text_bytecount, TEXT_SECTION_FLAGS).ok() } else { None },
if rodata_bytecount > 0 { allocate_pages_closure(rodata_bytecount, RODATA_SECTION_FLAGS).ok() } else { None },
if data_bytecount > 0 { allocate_pages_closure(data_bytecount, DATA_BSS_SECTION_FLAGS).ok() } else { None }
)
};

Expand Down
10 changes: 5 additions & 5 deletions kernel/mod_mgmt/src/metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -235,15 +235,15 @@ impl LoadedCrate {
// We initially map the as writable because we'll have to copy things into them
let (new_text_pages, new_rodata_pages, new_data_pages) = {
let new_text_pages = match self.text_pages {
Some(ref tp) => Some(tp.lock().deep_copy(Some(TEXT_SECTION_FLAGS() | EntryFlags::WRITABLE), page_table, allocator)?),
Some(ref tp) => Some(tp.lock().deep_copy(Some(TEXT_SECTION_FLAGS | EntryFlags::WRITABLE), page_table, allocator)?),
None => None,
};
let new_rodata_pages = match self.rodata_pages {
Some(ref rp) => Some(rp.lock().deep_copy(Some(RODATA_SECTION_FLAGS() | EntryFlags::WRITABLE), page_table, allocator)?),
Some(ref rp) => Some(rp.lock().deep_copy(Some(RODATA_SECTION_FLAGS | EntryFlags::WRITABLE), page_table, allocator)?),
None => None,
};
let new_data_pages = match self.data_pages {
Some(ref dp) => Some(dp.lock().deep_copy(Some(DATA_BSS_SECTION_FLAGS()), page_table, allocator)?),
Some(ref dp) => Some(dp.lock().deep_copy(Some(DATA_BSS_SECTION_FLAGS), page_table, allocator)?),
None => None,
};
(new_text_pages, new_rodata_pages, new_data_pages)
Expand Down Expand Up @@ -385,10 +385,10 @@ impl LoadedCrate {

// since we mapped all the new MappedPages as writable, we need to properly remap them.
if let Some(ref mut tp) = new_text_pages_locked {
tp.remap(page_table, TEXT_SECTION_FLAGS())?;
tp.remap(page_table, TEXT_SECTION_FLAGS)?;
}
if let Some(ref mut rp) = new_rodata_pages_locked {
rp.remap(page_table, RODATA_SECTION_FLAGS())?;
rp.remap(page_table, RODATA_SECTION_FLAGS)?;
}
// data/bss sections are already mapped properly, since they're supposed to be writable

Expand Down
4 changes: 2 additions & 2 deletions kernel/network_interface_card/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ extern crate nic_buffers;

use alloc::vec::Vec;
use memory::{create_contiguous_mapping};
use nic_initialization::{nic_mapping_flags};
use nic_initialization::NIC_MAPPING_FLAGS;
use intel_ethernet::descriptors:: {TxDescriptor, RxDescriptor};
use nic_queues::{RxQueue, TxQueue};
use nic_buffers::{TransmitBuffer, ReceiveBuffer, ReceivedFrame};
Expand Down Expand Up @@ -79,7 +79,7 @@ pub trait NetworkInterfaceCard {
warn!("NIC RX BUF POOL WAS EMPTY.... reallocating! This means that no task is consuming the accumulated received ethernet frames.");
// if the pool was empty, then we allocate a new receive buffer
let len = rx_buffer_size;
let (mp, phys_addr) = create_contiguous_mapping(len as usize, nic_mapping_flags())?;
let (mp, phys_addr) = create_contiguous_mapping(len as usize, NIC_MAPPING_FLAGS)?;
ReceiveBuffer::new(mp, phys_addr, len, rx_buffer_pool)
}
};
Expand Down
21 changes: 11 additions & 10 deletions kernel/nic_initialization/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,12 @@ use volatile::Volatile;


/// The mapping flags used for pages that the NIC will map.
/// This should be a const, but Rust doesn't yet allow constants for the bitflags type
pub fn nic_mapping_flags() -> EntryFlags {
EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_CACHE | EntryFlags::NO_EXECUTE
}
pub const NIC_MAPPING_FLAGS: EntryFlags = EntryFlags::from_bits_truncate(
EntryFlags::PRESENT.bits() |
EntryFlags::WRITABLE.bits() |
EntryFlags::NO_CACHE.bits() |
EntryFlags::NO_EXECUTE.bits()
);


/// Allocates memory for the NIC registers
Expand Down Expand Up @@ -65,14 +67,13 @@ pub fn allocate_memory(mem_base: PhysicalAddress, mem_size_in_bytes: usize) -> R
// set up virtual pages and physical frames to be mapped
let pages_nic = allocate_pages_by_bytes(mem_size_in_bytes).ok_or("NicInit::mem_map(): couldn't allocated virtual page!")?;
let frames_nic = FrameRange::from_phys_addr(mem_base, mem_size_in_bytes);
let flags = nic_mapping_flags();

// debug!("NicInit: memory base: {:#X}, memory size: {}", mem_base, mem_size_in_bytes);

let kernel_mmi_ref = get_kernel_mmi_ref().ok_or("NicInit::mem_map(): KERNEL_MMI was not yet initialized!")?;
let mut kernel_mmi = kernel_mmi_ref.lock();
let mut fa = FRAME_ALLOCATOR.try().ok_or("NicInit::mem_map(): Couldn't get FRAME ALLOCATOR")?.lock();
let nic_mapped_page = kernel_mmi.page_table.map_allocated_pages_to(pages_nic, frames_nic, flags, fa.deref_mut())?;
let nic_mapped_page = kernel_mmi.page_table.map_allocated_pages_to(pages_nic, frames_nic, NIC_MAPPING_FLAGS, fa.deref_mut())?;

Ok(nic_mapped_page)
}
Expand All @@ -86,7 +87,7 @@ pub fn allocate_memory(mem_base: PhysicalAddress, mem_size_in_bytes: usize) -> R
pub fn init_rx_buf_pool(num_rx_buffers: usize, buffer_size: u16, rx_buffer_pool: &'static mpmc::Queue<ReceiveBuffer>) -> Result<(), &'static str> {
let length = buffer_size;
for _i in 0..num_rx_buffers {
let (mp, phys_addr) = create_contiguous_mapping(length as usize, nic_mapping_flags())?;
let (mp, phys_addr) = create_contiguous_mapping(length as usize, NIC_MAPPING_FLAGS)?;
let rx_buf = ReceiveBuffer::new(mp, phys_addr, length, rx_buffer_pool);
if rx_buffer_pool.push(rx_buf).is_err() {
// if the queue is full, it returns an Err containing the object trying to be pushed
Expand Down Expand Up @@ -116,7 +117,7 @@ pub fn init_rx_queue<T: RxDescriptor>(num_desc: usize, rx_buffer_pool: &'static
let size_in_bytes_of_all_rx_descs_per_queue = num_desc * core::mem::size_of::<T>();

// Rx descriptors must be 128 byte-aligned, which is satisfied below because it's aligned to a page boundary.
let (rx_descs_mapped_pages, rx_descs_starting_phys_addr) = create_contiguous_mapping(size_in_bytes_of_all_rx_descs_per_queue, nic_mapping_flags())?;
let (rx_descs_mapped_pages, rx_descs_starting_phys_addr) = create_contiguous_mapping(size_in_bytes_of_all_rx_descs_per_queue, NIC_MAPPING_FLAGS)?;

// cast our physically-contiguous MappedPages into a slice of receive descriptors
let mut rx_descs = BoxRefMut::new(Box::new(rx_descs_mapped_pages)).try_map_mut(|mp| mp.as_slice_mut::<T>(0, num_desc))?;
Expand All @@ -129,7 +130,7 @@ pub fn init_rx_queue<T: RxDescriptor>(num_desc: usize, rx_buffer_pool: &'static
let rx_buf = rx_buffer_pool.pop()
.ok_or("Couldn't obtain a ReceiveBuffer from the pool")
.or_else(|_e| {
create_contiguous_mapping(buffer_size, nic_mapping_flags())
create_contiguous_mapping(buffer_size, NIC_MAPPING_FLAGS)
.map(|(buf_mapped, buf_paddr)|
ReceiveBuffer::new(buf_mapped, buf_paddr, buffer_size as u16, rx_buffer_pool)
)
Expand Down Expand Up @@ -175,7 +176,7 @@ pub fn init_tx_queue<T: TxDescriptor>(num_desc: usize, tdbal: &mut Volatile<Tdba
let size_in_bytes_of_all_tx_descs = num_desc * core::mem::size_of::<T>();

// Tx descriptors must be 128 byte-aligned, which is satisfied below because it's aligned to a page boundary.
let (tx_descs_mapped_pages, tx_descs_starting_phys_addr) = create_contiguous_mapping(size_in_bytes_of_all_tx_descs, nic_mapping_flags())?;
let (tx_descs_mapped_pages, tx_descs_starting_phys_addr) = create_contiguous_mapping(size_in_bytes_of_all_tx_descs, NIC_MAPPING_FLAGS)?;

// cast our physically-contiguous MappedPages into a slice of transmit descriptors
let mut tx_descs = BoxRefMut::new(Box::new(tx_descs_mapped_pages))
Expand Down

0 comments on commit eefab0c

Please sign in to comment.