mirror of
https://github.com/ipxe/ipxe
synced 2026-01-22 03:32:59 +03:00
[riscv] Hold virtual address offset in the thread pointer register
iPXE does not make use of any thread-local storage. Use the otherwise
unused thread pointer register ("tp") to hold the current value of
the virtual address offset, rather than using a global variable.
This ensures that virt_offset can be made valid even during very early
initialisation (when iPXE may be executing directly from read-only
memory and so cannot update a global variable).
Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
33
src/arch/riscv/include/bits/virt_offset.h
Normal file
33
src/arch/riscv/include/bits/virt_offset.h
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
#ifndef _BITS_VIRT_OFFSET_H
|
||||||
|
#define _BITS_VIRT_OFFSET_H
|
||||||
|
|
||||||
|
/** @file
|
||||||
|
*
|
||||||
|
* RISCV-specific virtual address offset
|
||||||
|
*
|
||||||
|
* We use the thread pointer register (tp) to hold the virtual address
|
||||||
|
* offset, so that virtual-to-physical address translations work as
|
||||||
|
* expected even while we are executing directly from read-only memory
|
||||||
|
* (and so cannot store a value in a global virt_offset variable).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read virtual address offset held in thread pointer register
|
||||||
|
*
|
||||||
|
* @ret virt_offset Virtual address offset
|
||||||
|
*/
|
||||||
|
static inline __attribute__ (( const, always_inline )) unsigned long
|
||||||
|
tp_virt_offset ( void ) {
|
||||||
|
register unsigned long tp asm ( "tp" );
|
||||||
|
|
||||||
|
__asm__ ( "" : "=r" ( tp ) );
|
||||||
|
return tp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Always read thread pointer register to get virtual address offset */
|
||||||
|
#define virt_offset tp_virt_offset()
|
||||||
|
|
||||||
|
#endif /* _BITS_VIRT_OFFSET_H */
|
||||||
@@ -41,13 +41,6 @@ prefix_virt:
|
|||||||
.dword _prefix
|
.dword _prefix
|
||||||
.size prefix_virt, . - prefix_virt
|
.size prefix_virt, . - prefix_virt
|
||||||
|
|
||||||
/* Current virtual address offset */
|
|
||||||
.section ".data.virt_offset", "aw", @progbits
|
|
||||||
.globl virt_offset
|
|
||||||
virt_offset:
|
|
||||||
.space ( __riscv_xlen / 8 )
|
|
||||||
.size virt_offset, . - virt_offset
|
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
*
|
*
|
||||||
* Print message to debug console
|
* Print message to debug console
|
||||||
@@ -311,7 +304,7 @@ apply_relocs_done:
|
|||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
*
|
*
|
||||||
* a0 - Virtual address offset
|
* tp - Virtual address offset
|
||||||
* pc - Updated to a virtual address if paging enabled
|
* pc - Updated to a virtual address if paging enabled
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@@ -418,10 +411,11 @@ paging_mode_names:
|
|||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
*
|
*
|
||||||
* a0 - Virtual address offset
|
* tp - Virtual address offset
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
*
|
*
|
||||||
|
* tp - Virtual address offset (zeroed)
|
||||||
* pc - Updated to a physical address
|
* pc - Updated to a physical address
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@@ -449,7 +443,7 @@ paging_mode_names:
|
|||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
*
|
*
|
||||||
* a0 - Virtual address offset
|
* tp - Virtual address offset
|
||||||
* pc - Updated to a virtual address if paging enabled
|
* pc - Updated to a virtual address if paging enabled
|
||||||
*
|
*
|
||||||
* A 4kB 64-bit page table contains 512 8-byte PTEs. We choose to use
|
* A 4kB 64-bit page table contains 512 8-byte PTEs. We choose to use
|
||||||
@@ -510,21 +504,20 @@ paging_mode_names:
|
|||||||
enable_paging_64:
|
enable_paging_64:
|
||||||
/* Register usage:
|
/* Register usage:
|
||||||
*
|
*
|
||||||
* a0 - return value (virtual address offset)
|
* tp - return value (virtual address offset)
|
||||||
|
* a0 - page table base address
|
||||||
* a1 - currently attempted paging level
|
* a1 - currently attempted paging level
|
||||||
* a2 - enabled paging level
|
* a2 - enabled paging level
|
||||||
* a3 - page table base address
|
* a3 - PTE pointer
|
||||||
* a4 - PTE pointer
|
* a4 - PTE stride
|
||||||
* a5 - PTE stride
|
|
||||||
*/
|
*/
|
||||||
progress " paging:"
|
progress " paging:"
|
||||||
mv a3, a0
|
|
||||||
li a1, SATP_MODE_SV57
|
li a1, SATP_MODE_SV57
|
||||||
|
|
||||||
/* Calculate virtual address offset */
|
/* Calculate virtual address offset */
|
||||||
LOADN t0, prefix_virt
|
LOADN t0, prefix_virt
|
||||||
la t1, _prefix
|
la t1, _prefix
|
||||||
sub a0, t1, t0
|
sub tp, t1, t0
|
||||||
|
|
||||||
enable_paging_64_loop:
|
enable_paging_64_loop:
|
||||||
|
|
||||||
@@ -534,50 +527,50 @@ enable_paging_64_loop:
|
|||||||
* a1 == 9 == Sv48: PPN[3] LSB is PTE bit 37 => stride := 1 << 37
|
* a1 == 9 == Sv48: PPN[3] LSB is PTE bit 37 => stride := 1 << 37
|
||||||
* a1 == 8 == Sv39: PPN[2] LSB is PTE bit 28 => stride := 1 << 28
|
* a1 == 8 == Sv39: PPN[2] LSB is PTE bit 28 => stride := 1 << 28
|
||||||
*
|
*
|
||||||
* and so we calculate stride a5 := ( 1 << ( 9 * a1 - 44 ) )
|
* and so we calculate stride a4 := ( 1 << ( 9 * a1 - 44 ) )
|
||||||
*/
|
*/
|
||||||
slli a5, a1, 3
|
slli a4, a1, 3
|
||||||
add a5, a5, a1
|
add a4, a4, a1
|
||||||
addi a5, a5, -44
|
addi a4, a4, -44
|
||||||
li t0, 1
|
li t0, 1
|
||||||
sll a5, t0, a5
|
sll a4, t0, a4
|
||||||
|
|
||||||
/* Construct PTE[0-255] for identity map */
|
/* Construct PTE[0-255] for identity map */
|
||||||
mv a4, a3
|
mv a3, a0
|
||||||
li t0, ( PTE_COUNT / 2 )
|
li t0, ( PTE_COUNT / 2 )
|
||||||
li t1, PTE_LEAF
|
li t1, PTE_LEAF
|
||||||
1: STOREN t1, (a4)
|
1: STOREN t1, (a3)
|
||||||
addi a4, a4, PTE_SIZE
|
addi a3, a3, PTE_SIZE
|
||||||
add t1, t1, a5
|
add t1, t1, a4
|
||||||
addi t0, t0, -1
|
addi t0, t0, -1
|
||||||
bgtz t0, 1b
|
bgtz t0, 1b
|
||||||
|
|
||||||
/* Zero PTE[256-511] */
|
/* Zero PTE[256-511] */
|
||||||
li t0, ( PTE_COUNT / 2 )
|
li t0, ( PTE_COUNT / 2 )
|
||||||
1: STOREN zero, (a4)
|
1: STOREN zero, (a3)
|
||||||
addi a4, a4, PTE_SIZE
|
addi a3, a3, PTE_SIZE
|
||||||
addi t0, t0, -1
|
addi t0, t0, -1
|
||||||
bgtz t0, 1b
|
bgtz t0, 1b
|
||||||
|
|
||||||
/* Construct PTE[511] as next level page table pointer */
|
/* Construct PTE[511] as next level page table pointer */
|
||||||
srli t0, a3, PTE_PPN_SHIFT
|
srli t0, a0, PTE_PPN_SHIFT
|
||||||
ori t0, t0, PTE_V
|
ori t0, t0, PTE_V
|
||||||
STOREN t0, -PTE_SIZE(a4)
|
STOREN t0, -PTE_SIZE(a3)
|
||||||
|
|
||||||
/* Calculate PTE[x] address for iPXE virtual address map */
|
/* Calculate PTE[x] address for iPXE virtual address map */
|
||||||
LOADN t0, prefix_virt
|
LOADN t0, prefix_virt
|
||||||
srli t0, t0, VPN1_LSB
|
srli t0, t0, VPN1_LSB
|
||||||
andi t0, t0, ( PTE_COUNT - 1 )
|
andi t0, t0, ( PTE_COUNT - 1 )
|
||||||
slli t0, t0, PTE_SIZE_LOG2
|
slli t0, t0, PTE_SIZE_LOG2
|
||||||
add a4, a3, t0
|
add a3, a0, t0
|
||||||
|
|
||||||
/* Calculate PTE stride for iPXE virtual address map
|
/* Calculate PTE stride for iPXE virtual address map
|
||||||
*
|
*
|
||||||
* PPN[1] LSB is PTE bit 19 in all paging modes, and so the
|
* PPN[1] LSB is PTE bit 19 in all paging modes, and so the
|
||||||
* stride is always ( 1 << 19 )
|
* stride is always ( 1 << 19 )
|
||||||
*/
|
*/
|
||||||
li a5, 1
|
li a4, 1
|
||||||
slli a5, a5, PTE_PPN1_LSB
|
slli a4, a4, PTE_PPN1_LSB
|
||||||
|
|
||||||
/* Construct PTE[x-y] for iPXE virtual address map */
|
/* Construct PTE[x-y] for iPXE virtual address map */
|
||||||
la t0, _prefix
|
la t0, _prefix
|
||||||
@@ -585,14 +578,14 @@ enable_paging_64_loop:
|
|||||||
ori t0, t0, PTE_LEAF
|
ori t0, t0, PTE_LEAF
|
||||||
la t1, _ebss
|
la t1, _ebss
|
||||||
srli t1, t1, PTE_PPN_SHIFT
|
srli t1, t1, PTE_PPN_SHIFT
|
||||||
1: STOREN t0, (a4)
|
1: STOREN t0, (a3)
|
||||||
addi a4, a4, PTE_SIZE
|
addi a3, a3, PTE_SIZE
|
||||||
add t0, t0, a5
|
add t0, t0, a4
|
||||||
ble t0, t1, 1b
|
ble t0, t1, 1b
|
||||||
|
|
||||||
/* Attempt to enable paging, and read back active paging level */
|
/* Attempt to enable paging, and read back active paging level */
|
||||||
slli t0, a1, SATP_MODE_SHIFT
|
slli t0, a1, SATP_MODE_SHIFT
|
||||||
srli t1, a3, PAGE_SHIFT
|
srli t1, a0, PAGE_SHIFT
|
||||||
or t0, t0, t1
|
or t0, t0, t1
|
||||||
csrrw zero, satp, t0
|
csrrw zero, satp, t0
|
||||||
sfence.vma
|
sfence.vma
|
||||||
@@ -604,10 +597,10 @@ enable_paging_64_loop:
|
|||||||
addi a1, a1, -1
|
addi a1, a1, -1
|
||||||
li t0, SATP_MODE_SV39
|
li t0, SATP_MODE_SV39
|
||||||
bge a1, t0, enable_paging_64_loop
|
bge a1, t0, enable_paging_64_loop
|
||||||
mv a0, zero
|
mv tp, zero
|
||||||
1:
|
1:
|
||||||
/* Adjust return address to a virtual address */
|
/* Adjust return address to a virtual address */
|
||||||
sub ra, ra, a0
|
sub ra, ra, tp
|
||||||
|
|
||||||
/* Return, with or without paging enabled */
|
/* Return, with or without paging enabled */
|
||||||
paging_mode_name a2
|
paging_mode_name a2
|
||||||
@@ -625,10 +618,11 @@ enable_paging_64_loop:
|
|||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
*
|
*
|
||||||
* a0 - Virtual address offset
|
* tp - Virtual address offset
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
*
|
*
|
||||||
|
* tp - Virtual address offset (zeroed)
|
||||||
* pc - Updated to a physical address
|
* pc - Updated to a physical address
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@@ -637,13 +631,13 @@ enable_paging_64_loop:
|
|||||||
disable_paging_64:
|
disable_paging_64:
|
||||||
/* Register usage:
|
/* Register usage:
|
||||||
*
|
*
|
||||||
* a0 - virtual address offset
|
* tp - virtual address offset
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Jump to physical address */
|
/* Jump to physical address */
|
||||||
la t0, 1f
|
la t0, 1f
|
||||||
bgez t0, 1f
|
bgez t0, 1f
|
||||||
add t0, t0, a0
|
add t0, t0, tp
|
||||||
jr t0
|
jr t0
|
||||||
1:
|
1:
|
||||||
/* Disable paging */
|
/* Disable paging */
|
||||||
@@ -652,9 +646,10 @@ disable_paging_64:
|
|||||||
|
|
||||||
/* Update return address to a physical address */
|
/* Update return address to a physical address */
|
||||||
bgez ra, 1f
|
bgez ra, 1f
|
||||||
add ra, ra, a0
|
add ra, ra, tp
|
||||||
1:
|
1:
|
||||||
/* Return with paging disabled */
|
/* Return with paging disabled and virtual offset zeroed */
|
||||||
|
mv tp, zero
|
||||||
ret
|
ret
|
||||||
.size disable_paging_64, . - disable_paging_64
|
.size disable_paging_64, . - disable_paging_64
|
||||||
|
|
||||||
@@ -677,7 +672,7 @@ disable_paging_64:
|
|||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
*
|
*
|
||||||
* a0 - Virtual address offset
|
* tp - Virtual address offset
|
||||||
* pc - Updated to a virtual address if paging enabled
|
* pc - Updated to a virtual address if paging enabled
|
||||||
*
|
*
|
||||||
* A 4kB 32-bit page table contains 1024 4-byte PTEs. We choose to
|
* A 4kB 32-bit page table contains 1024 4-byte PTEs. We choose to
|
||||||
@@ -698,29 +693,28 @@ disable_paging_64:
|
|||||||
enable_paging_32:
|
enable_paging_32:
|
||||||
/* Register usage:
|
/* Register usage:
|
||||||
*
|
*
|
||||||
* a0 - return value (virtual address offset)
|
* tp - return value (virtual address offset)
|
||||||
|
* a0 - page table base address
|
||||||
* a1 - enabled paging level
|
* a1 - enabled paging level
|
||||||
* a2 - page table base address
|
* a2 - PTE pointer
|
||||||
* a3 - PTE pointer
|
* a3 - saved content of temporarily modified PTE
|
||||||
* a4 - saved content of temporarily modified PTE
|
|
||||||
*/
|
*/
|
||||||
progress " paging:"
|
progress " paging:"
|
||||||
mv a2, a0
|
|
||||||
|
|
||||||
/* Calculate virtual address offset */
|
/* Calculate virtual address offset */
|
||||||
LOADN t0, prefix_virt
|
LOADN t0, prefix_virt
|
||||||
la t1, _prefix
|
la t1, _prefix
|
||||||
sub a0, t1, t0
|
sub tp, t1, t0
|
||||||
|
|
||||||
/* Construct PTEs for circular map */
|
/* Construct PTEs for circular map */
|
||||||
mv a3, a2
|
mv a2, a0
|
||||||
li t0, PTE_COUNT
|
li t0, PTE_COUNT
|
||||||
mv t1, a0
|
mv t1, tp
|
||||||
ori t1, t1, ( PTE_LEAF << PTE_PPN_SHIFT )
|
ori t1, t1, ( PTE_LEAF << PTE_PPN_SHIFT )
|
||||||
li t2, ( 1 << ( PTE_PPN1_LSB + PTE_PPN_SHIFT ) )
|
li t2, ( 1 << ( PTE_PPN1_LSB + PTE_PPN_SHIFT ) )
|
||||||
1: srli t3, t1, PTE_PPN_SHIFT
|
1: srli t3, t1, PTE_PPN_SHIFT
|
||||||
STOREN t3, (a3)
|
STOREN t3, (a2)
|
||||||
addi a3, a3, PTE_SIZE
|
addi a2, a2, PTE_SIZE
|
||||||
add t1, t1, t2
|
add t1, t1, t2
|
||||||
addi t0, t0, -1
|
addi t0, t0, -1
|
||||||
bgtz t0, 1b
|
bgtz t0, 1b
|
||||||
@@ -729,20 +723,20 @@ enable_paging_32:
|
|||||||
la t0, enable_paging_32_xstart
|
la t0, enable_paging_32_xstart
|
||||||
srli t0, t0, VPN1_LSB
|
srli t0, t0, VPN1_LSB
|
||||||
slli t1, t0, PTE_SIZE_LOG2
|
slli t1, t0, PTE_SIZE_LOG2
|
||||||
add a3, a2, t1
|
add a2, a0, t1
|
||||||
LOADN a4, (a3)
|
LOADN a3, (a2)
|
||||||
slli t0, t0, PTE_PPN1_LSB
|
slli t0, t0, PTE_PPN1_LSB
|
||||||
ori t0, t0, PTE_LEAF
|
ori t0, t0, PTE_LEAF
|
||||||
STOREN t0, (a3)
|
STOREN t0, (a2)
|
||||||
|
|
||||||
/* Adjust PTE pointer to a virtual address */
|
/* Adjust PTE pointer to a virtual address */
|
||||||
sub a3, a3, a0
|
sub a2, a2, tp
|
||||||
|
|
||||||
/* Attempt to enable paging, and read back active paging level */
|
/* Attempt to enable paging, and read back active paging level */
|
||||||
la t0, 1f
|
la t0, 1f
|
||||||
sub t0, t0, a0
|
sub t0, t0, tp
|
||||||
li t1, ( SATP_MODE_SV32 << SATP_MODE_SHIFT )
|
li t1, ( SATP_MODE_SV32 << SATP_MODE_SHIFT )
|
||||||
srli t2, a2, PAGE_SHIFT
|
srli t2, a0, PAGE_SHIFT
|
||||||
or t1, t1, t2
|
or t1, t1, t2
|
||||||
.balign enable_paging_32_xalign
|
.balign enable_paging_32_xalign
|
||||||
/* Start of transition code */
|
/* Start of transition code */
|
||||||
@@ -753,7 +747,7 @@ enable_paging_32_xstart:
|
|||||||
beqz a1, 2f
|
beqz a1, 2f
|
||||||
jr t0
|
jr t0
|
||||||
1: /* Restore temporarily modified PTE */
|
1: /* Restore temporarily modified PTE */
|
||||||
STOREN a4, (a3)
|
STOREN a3, (a2)
|
||||||
sfence.vma
|
sfence.vma
|
||||||
/* End of transition code */
|
/* End of transition code */
|
||||||
.equ enable_paging_32_xlen, . - enable_paging_32_xstart
|
.equ enable_paging_32_xlen, . - enable_paging_32_xstart
|
||||||
@@ -761,10 +755,10 @@ enable_paging_32_xstart:
|
|||||||
|
|
||||||
/* Clear virtual address offset if paging is not enabled */
|
/* Clear virtual address offset if paging is not enabled */
|
||||||
bnez a1, 1f
|
bnez a1, 1f
|
||||||
mv a0, zero
|
mv tp, zero
|
||||||
1:
|
1:
|
||||||
/* Adjust return address to a virtual address */
|
/* Adjust return address to a virtual address */
|
||||||
sub ra, ra, a0
|
sub ra, ra, tp
|
||||||
|
|
||||||
/* Return, with or without paging enabled */
|
/* Return, with or without paging enabled */
|
||||||
paging_mode_name a1
|
paging_mode_name a1
|
||||||
@@ -786,10 +780,11 @@ enable_paging_32_xstart:
|
|||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
*
|
*
|
||||||
* a0 - Virtual address offset
|
* tp - Virtual address offset
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
*
|
*
|
||||||
|
* tp - Virtual address offset (zeroed)
|
||||||
* pc - Updated to a physical address
|
* pc - Updated to a physical address
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@@ -800,34 +795,34 @@ enable_paging_32_xstart:
|
|||||||
disable_paging_32:
|
disable_paging_32:
|
||||||
/* Register usage:
|
/* Register usage:
|
||||||
*
|
*
|
||||||
* a0 - virtual address offset
|
* tp - virtual address offset
|
||||||
* a1 - page table address
|
* a0 - page table address
|
||||||
* a2 - transition PTE pointer
|
* a1 - transition PTE pointer
|
||||||
* a3 - transition PTE content
|
* a2 - transition PTE content
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Get page table address, and exit if paging is already disabled */
|
/* Get page table address, and exit if paging is already disabled */
|
||||||
csrr a1, satp
|
csrr a0, satp
|
||||||
beqz a1, 99f
|
beqz a0, 99f
|
||||||
slli a1, a1, PAGE_SHIFT
|
slli a0, a0, PAGE_SHIFT
|
||||||
sub a1, a1, a0
|
sub a0, a0, tp
|
||||||
|
|
||||||
/* Prepare for modifying transition PTE */
|
/* Prepare for modifying transition PTE */
|
||||||
la t0, disable_paging_32_xstart
|
la t0, disable_paging_32_xstart
|
||||||
add t0, t0, a0
|
add t0, t0, tp
|
||||||
srli t0, t0, VPN1_LSB
|
srli t0, t0, VPN1_LSB
|
||||||
slli a2, t0, PTE_SIZE_LOG2
|
slli a1, t0, PTE_SIZE_LOG2
|
||||||
add a2, a2, a1
|
add a1, a1, a0
|
||||||
slli a3, t0, PTE_PPN1_LSB
|
slli a2, t0, PTE_PPN1_LSB
|
||||||
ori a3, a3, PTE_LEAF
|
ori a2, a2, PTE_LEAF
|
||||||
|
|
||||||
/* Jump to physical address in transition PTE, and disable paging */
|
/* Jump to physical address in transition PTE, and disable paging */
|
||||||
la t0, 1f
|
la t0, 1f
|
||||||
add t0, t0, a0
|
add t0, t0, tp
|
||||||
.balign disable_paging_32_xalign
|
.balign disable_paging_32_xalign
|
||||||
/* Start of transition code */
|
/* Start of transition code */
|
||||||
disable_paging_32_xstart:
|
disable_paging_32_xstart:
|
||||||
STOREN a3, (a2)
|
STOREN a2, (a1)
|
||||||
sfence.vma
|
sfence.vma
|
||||||
jr t0
|
jr t0
|
||||||
1: csrw satp, zero
|
1: csrw satp, zero
|
||||||
@@ -836,9 +831,10 @@ disable_paging_32_xstart:
|
|||||||
.equ disable_paging_32_xlen, . - disable_paging_32_xstart
|
.equ disable_paging_32_xlen, . - disable_paging_32_xstart
|
||||||
|
|
||||||
/* Update return address to a physical address */
|
/* Update return address to a physical address */
|
||||||
add ra, ra, a0
|
add ra, ra, tp
|
||||||
|
|
||||||
99: /* Return with paging disabled */
|
99: /* Return with paging disabled and virtual offset zeroed */
|
||||||
|
mv tp, zero
|
||||||
ret
|
ret
|
||||||
.size disable_paging_32, . - disable_paging_32
|
.size disable_paging_32, . - disable_paging_32
|
||||||
|
|
||||||
|
|||||||
@@ -57,6 +57,9 @@ progress_\@:
|
|||||||
.org 0
|
.org 0
|
||||||
.globl _sbi_start
|
.globl _sbi_start
|
||||||
_sbi_start:
|
_sbi_start:
|
||||||
|
/* Initialise virtual address offset */
|
||||||
|
mv tp, zero
|
||||||
|
|
||||||
/* Preserve arguments */
|
/* Preserve arguments */
|
||||||
mv s0, a0
|
mv s0, a0
|
||||||
mv s1, a1
|
mv s1, a1
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
|||||||
#include <byteswap.h>
|
#include <byteswap.h>
|
||||||
#include <ipxe/netdevice.h>
|
#include <ipxe/netdevice.h>
|
||||||
#include <ipxe/image.h>
|
#include <ipxe/image.h>
|
||||||
|
#include <ipxe/uaccess.h>
|
||||||
#include <ipxe/umalloc.h>
|
#include <ipxe/umalloc.h>
|
||||||
#include <ipxe/fdt.h>
|
#include <ipxe/fdt.h>
|
||||||
|
|
||||||
@@ -734,8 +735,9 @@ int fdt_parse ( struct fdt *fdt, struct fdt_header *hdr, size_t max_len ) {
|
|||||||
fdt->len, max_len );
|
fdt->len, max_len );
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
DBGC ( fdt, "FDT version %d at %p+%#04zx\n",
|
DBGC ( fdt, "FDT version %d at %p+%#04zx (phys %#08lx)\n",
|
||||||
be32_to_cpu ( hdr->version ), fdt->hdr, fdt->len );
|
be32_to_cpu ( hdr->version ), fdt->hdr, fdt->len,
|
||||||
|
virt_to_phys ( hdr ) );
|
||||||
|
|
||||||
/* Check signature */
|
/* Check signature */
|
||||||
if ( hdr->magic != cpu_to_be32 ( FDT_MAGIC ) ) {
|
if ( hdr->magic != cpu_to_be32 ( FDT_MAGIC ) ) {
|
||||||
|
|||||||
@@ -34,3 +34,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
|||||||
/* Flat address space user access API */
|
/* Flat address space user access API */
|
||||||
PROVIDE_UACCESS_INLINE ( flat, phys_to_virt );
|
PROVIDE_UACCESS_INLINE ( flat, phys_to_virt );
|
||||||
PROVIDE_UACCESS_INLINE ( flat, virt_to_phys );
|
PROVIDE_UACCESS_INLINE ( flat, virt_to_phys );
|
||||||
|
|
||||||
|
/* Virtual address offset user access API */
|
||||||
|
PROVIDE_UACCESS_INLINE ( offset, phys_to_virt );
|
||||||
|
PROVIDE_UACCESS_INLINE ( offset, virt_to_phys );
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2025 Michael Brown <mbrown@fensystems.co.uk>.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License as
|
|
||||||
* published by the Free Software Foundation; either version 2 of the
|
|
||||||
* License, or any later version.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
||||||
* General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with this program; if not, write to the Free Software
|
|
||||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
||||||
* 02110-1301, USA.
|
|
||||||
*
|
|
||||||
* You can also choose to distribute this program under the terms of
|
|
||||||
* the Unmodified Binary Distribution Licence (as given in the file
|
|
||||||
* COPYING.UBDL), provided that you have satisfied its requirements.
|
|
||||||
*/
|
|
||||||
|
|
||||||
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
|
||||||
|
|
||||||
#include <ipxe/uaccess.h>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @file
|
|
||||||
*
|
|
||||||
* Virtual offset memory model
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
PROVIDE_UACCESS_INLINE ( offset, phys_to_virt );
|
|
||||||
PROVIDE_UACCESS_INLINE ( offset, virt_to_phys );
|
|
||||||
15
src/include/bits/virt_offset.h
Normal file
15
src/include/bits/virt_offset.h
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#ifndef _BITS_VIRT_OFFSET_H
|
||||||
|
#define _BITS_VIRT_OFFSET_H
|
||||||
|
|
||||||
|
/** @file
|
||||||
|
*
|
||||||
|
* Dummy architecture-specific virtual address offset
|
||||||
|
*
|
||||||
|
* This file is included only if the architecture does not provide its
|
||||||
|
* own version of this file.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
||||||
|
|
||||||
|
#endif /* _BITS_VIRT_OFFSET_H */
|
||||||
@@ -54,24 +54,20 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
|||||||
* This is defined to be the value to be added to an address within
|
* This is defined to be the value to be added to an address within
|
||||||
* iPXE's own image in order to obtain its physical address, as
|
* iPXE's own image in order to obtain its physical address, as
|
||||||
* described above.
|
* described above.
|
||||||
*
|
|
||||||
* Note that if iPXE's image is not yet writable (i.e. during early
|
|
||||||
* startup, prior to physical relocation), then this value may not yet
|
|
||||||
* be valid. Under these circumstances, callers must use
|
|
||||||
* offset_phys_to_virt() and offset_virt_to_phys() instead (and so
|
|
||||||
* provide the virtual address offset as a function parameter).
|
|
||||||
*/
|
*/
|
||||||
extern const unsigned long virt_offset;
|
extern const unsigned long virt_offset;
|
||||||
|
|
||||||
|
/** Allow for architecture-specific overrides of virt_offset */
|
||||||
|
#include <bits/virt_offset.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert physical address to virtual address
|
* Convert physical address to virtual address
|
||||||
*
|
*
|
||||||
* @v phys Physical address
|
* @v phys Physical address
|
||||||
* @v offset Virtual address offset
|
|
||||||
* @ret virt Virtual address
|
* @ret virt Virtual address
|
||||||
*/
|
*/
|
||||||
static inline __always_inline void *
|
static inline __always_inline void *
|
||||||
offset_phys_to_virt ( unsigned long phys, unsigned long offset ) {
|
UACCESS_INLINE ( offset, phys_to_virt ) ( unsigned long phys ) {
|
||||||
|
|
||||||
/* In a 64-bit build, any valid physical address is directly
|
/* In a 64-bit build, any valid physical address is directly
|
||||||
* usable as a virtual address, since physical addresses are
|
* usable as a virtual address, since physical addresses are
|
||||||
@@ -81,18 +77,17 @@ offset_phys_to_virt ( unsigned long phys, unsigned long offset ) {
|
|||||||
return ( ( void * ) phys );
|
return ( ( void * ) phys );
|
||||||
|
|
||||||
/* In a 32-bit build: subtract virt_offset */
|
/* In a 32-bit build: subtract virt_offset */
|
||||||
return ( ( void * ) ( phys - offset ) );
|
return ( ( void * ) ( phys - virt_offset ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert virtual address to physical address
|
* Convert virtual address to physical address
|
||||||
*
|
*
|
||||||
* @v virt Virtual address
|
* @v virt Virtual address
|
||||||
* @v offset Virtual address offset
|
|
||||||
* @ret phys Physical address
|
* @ret phys Physical address
|
||||||
*/
|
*/
|
||||||
static inline __always_inline physaddr_t
|
static inline __always_inline physaddr_t
|
||||||
offset_virt_to_phys ( volatile const void *virt, unsigned long offset ) {
|
UACCESS_INLINE ( offset, virt_to_phys ) ( volatile const void *virt ) {
|
||||||
physaddr_t addr = ( ( physaddr_t ) virt );
|
physaddr_t addr = ( ( physaddr_t ) virt );
|
||||||
|
|
||||||
/* In a 64-bit build, any valid virtual address with the MSB
|
/* In a 64-bit build, any valid virtual address with the MSB
|
||||||
@@ -110,31 +105,7 @@ offset_virt_to_phys ( volatile const void *virt, unsigned long offset ) {
|
|||||||
/* In a 32-bit build or in a 64-bit build with a virtual
|
/* In a 32-bit build or in a 64-bit build with a virtual
|
||||||
* address with the MSB set: add virt_offset
|
* address with the MSB set: add virt_offset
|
||||||
*/
|
*/
|
||||||
return ( addr + offset );
|
return ( addr + virt_offset );
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert physical address to virtual address
|
|
||||||
*
|
|
||||||
* @v phys Physical address
|
|
||||||
* @ret virt Virtual address
|
|
||||||
*/
|
|
||||||
static inline __always_inline void *
|
|
||||||
UACCESS_INLINE ( offset, phys_to_virt ) ( unsigned long phys ) {
|
|
||||||
|
|
||||||
return offset_phys_to_virt ( phys, virt_offset );
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert virtual address to physical address
|
|
||||||
*
|
|
||||||
* @v virt Virtual address
|
|
||||||
* @ret phys Physical address
|
|
||||||
*/
|
|
||||||
static inline __always_inline physaddr_t
|
|
||||||
UACCESS_INLINE ( offset, virt_to_phys ) ( volatile const void *virt ) {
|
|
||||||
|
|
||||||
return offset_virt_to_phys ( virt, virt_offset );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _IPXE_VIRT_OFFSET_H */
|
#endif /* _IPXE_VIRT_OFFSET_H */
|
||||||
|
|||||||
Reference in New Issue
Block a user