mirror of
https://github.com/ipxe/ipxe
synced 2025-12-08 18:30:28 +03:00
[ioapi] Allow iounmap() to be called for port I/O addresses
Allow code using the combined MMIO and port I/O accessors to safely call iounmap() to unmap the MMIO or port I/O region. In the virtual offset I/O mapping API as used for UEFI, 32-bit BIOS, and 32-bit RISC-V SBI, iounmap() is a no-op anyway. In 64-bit RISC-V SBI, we have no concept of port I/O and so the issue is moot. This leaves only 64-bit BIOS, where it suffices to simply do nothing for any pages outside of the chosen MMIO virtual address range. For symmetry, we implement the equivalent change in the very closely related RISC-V page management code. Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
@@ -83,6 +83,10 @@ enum pte_flags {
|
||||
/** The page table */
|
||||
extern struct page_table page_table;
|
||||
|
||||
/** Maximum number of I/O pages */
|
||||
#define MAP_PAGE_COUNT \
|
||||
( sizeof ( page_table.pte ) / sizeof ( page_table.pte[0] ) )
|
||||
|
||||
/** I/O page size
|
||||
*
|
||||
* We choose to use 1GB "gigapages", since these are supported by all
|
||||
@@ -146,17 +150,14 @@ static void * svpage_map ( physaddr_t phys, size_t len, unsigned long attrs ) {
|
||||
/* Calculate number of pages required */
|
||||
count = ( ( offset + len + MAP_PAGE_SIZE - 1 ) / MAP_PAGE_SIZE );
|
||||
assert ( count != 0 );
|
||||
assert ( count < ( sizeof ( page_table.pte ) /
|
||||
sizeof ( page_table.pte[0] ) ) );
|
||||
assert ( count <= MAP_PAGE_COUNT );
|
||||
|
||||
/* Round up number of pages to a power of two */
|
||||
stride = ( 1 << fls ( count - 1 ) );
|
||||
assert ( count <= stride );
|
||||
|
||||
/* Allocate pages */
|
||||
for ( first = 0 ; first < ( sizeof ( page_table.pte ) /
|
||||
sizeof ( page_table.pte[0] ) ) ;
|
||||
first += stride ) {
|
||||
for ( first = 0 ; first < MAP_PAGE_COUNT ; first += stride ) {
|
||||
|
||||
/* Calculate virtual address */
|
||||
virt = ( MAP_BASE + ( first * MAP_PAGE_SIZE ) + offset );
|
||||
@@ -216,6 +217,10 @@ static void svpage_unmap ( const volatile void *virt ) {
|
||||
/* Calculate first page table entry */
|
||||
first = ( ( virt - MAP_BASE ) / MAP_PAGE_SIZE );
|
||||
|
||||
/* Ignore unmappings outside of the I/O range */
|
||||
if ( first >= MAP_PAGE_COUNT )
|
||||
return;
|
||||
|
||||
/* Clear page table entries */
|
||||
for ( i = first ; ; i++ ) {
|
||||
|
||||
|
||||
@@ -346,6 +346,10 @@ enum page_flags {
|
||||
/** The I/O space page table */
|
||||
extern struct page_table io_pages;
|
||||
|
||||
/** Maximum number of I/O pages */
|
||||
#define IO_PAGE_COUNT \
|
||||
( sizeof ( io_pages.page ) / sizeof ( io_pages.page[0] ) )
|
||||
|
||||
/** I/O page size
|
||||
*
|
||||
* We choose to use 2MB pages for I/O space, to minimise the number of
|
||||
|
||||
@@ -303,17 +303,14 @@ static void * ioremap_pages ( unsigned long bus_addr, size_t len ) {
|
||||
/* Calculate number of pages required */
|
||||
count = ( ( offset + len + IO_PAGE_SIZE - 1 ) / IO_PAGE_SIZE );
|
||||
assert ( count != 0 );
|
||||
assert ( count < ( sizeof ( io_pages.page ) /
|
||||
sizeof ( io_pages.page[0] ) ) );
|
||||
assert ( count <= IO_PAGE_COUNT );
|
||||
|
||||
/* Round up number of pages to a power of two */
|
||||
stride = ( 1 << fls ( count - 1 ) );
|
||||
assert ( count <= stride );
|
||||
|
||||
/* Allocate pages */
|
||||
for ( first = 0 ; first < ( sizeof ( io_pages.page ) /
|
||||
sizeof ( io_pages.page[0] ) ) ;
|
||||
first += stride ) {
|
||||
for ( first = 0 ; first < IO_PAGE_COUNT ; first += stride ) {
|
||||
|
||||
/* Calculate I/O address */
|
||||
io_addr = ( IO_BASE + ( first * IO_PAGE_SIZE ) + offset );
|
||||
@@ -366,6 +363,10 @@ static void iounmap_pages ( volatile const void *io_addr ) {
|
||||
/* Calculate first page table entry */
|
||||
first = ( ( io_addr - IO_BASE ) / IO_PAGE_SIZE );
|
||||
|
||||
/* Ignore unmappings outside of the I/O range */
|
||||
if ( first >= IO_PAGE_COUNT )
|
||||
return;
|
||||
|
||||
/* Clear page table entries */
|
||||
for ( i = first ; ; i++ ) {
|
||||
|
||||
|
||||
Reference in New Issue
Block a user