Initial revision

This commit is contained in:
Michael Brown
2005-03-08 18:53:11 +00:00
commit 3d6123e69a
373 changed files with 114041 additions and 0 deletions

View File

@@ -0,0 +1,144 @@
/* a.out */
struct exec {
unsigned long a_midmag; /* flags<<26 | mid<<16 | magic */
unsigned long a_text; /* text segment size */
unsigned long a_data; /* initialized data size */
unsigned long a_bss; /* uninitialized data size */
unsigned long a_syms; /* symbol table size */
unsigned long a_entry; /* entry point */
unsigned long a_trsize; /* text relocation size */
unsigned long a_drsize; /* data relocation size */
};
struct aout_state {
struct exec head;
unsigned long curaddr;
int segment; /* current segment number, -1 for none */
unsigned long loc; /* start offset of current block */
unsigned long skip; /* padding to be skipped to current segment */
unsigned long toread; /* remaining data to be read in the segment */
};
static struct aout_state astate;
static sector_t aout_download(unsigned char *data, unsigned int len, int eof);
static inline os_download_t aout_probe(unsigned char *data, unsigned int len)
{
unsigned long start, mid, end, istart, iend;
if (len < sizeof(astate.head)) {
return 0;
}
memcpy(&astate.head, data, sizeof(astate.head));
if ((astate.head.a_midmag & 0xffff) != 0x010BL) {
return 0;
}
printf("(a.out");
aout_freebsd_probe();
printf(")... ");
/* Check the aout image */
start = astate.head.a_entry;
mid = (((start + astate.head.a_text) + 4095) & ~4095) + astate.head.a_data;
end = ((mid + 4095) & ~4095) + astate.head.a_bss;
istart = 4096;
iend = istart + (mid - start);
if (!prep_segment(start, mid, end, istart, iend))
return dead_download;
astate.segment = -1;
astate.loc = 0;
astate.skip = 0;
astate.toread = 0;
return aout_download;
}
static sector_t aout_download(unsigned char *data, unsigned int len, int eof)
{
unsigned int offset; /* working offset in the current data block */
offset = 0;
#ifdef AOUT_LYNX_KDI
astate.segment++;
if (astate.segment == 0) {
astate.curaddr = 0x100000;
astate.head.a_entry = astate.curaddr + 0x20;
}
memcpy(phys_to_virt(astate.curaddr), data, len);
astate.curaddr += len;
return 0;
#endif
do {
if (astate.segment != -1) {
if (astate.skip) {
if (astate.skip >= len - offset) {
astate.skip -= len - offset;
break;
}
offset += astate.skip;
astate.skip = 0;
}
if (astate.toread) {
if (astate.toread >= len - offset) {
memcpy(phys_to_virt(astate.curaddr), data+offset,
len - offset);
astate.curaddr += len - offset;
astate.toread -= len - offset;
break;
}
memcpy(phys_to_virt(astate.curaddr), data+offset, astate.toread);
offset += astate.toread;
astate.toread = 0;
}
}
/* Data left, but current segment finished - look for the next
* segment. This is quite simple for a.out files. */
astate.segment++;
switch (astate.segment) {
case 0:
/* read text */
astate.curaddr = astate.head.a_entry;
astate.skip = 4096;
astate.toread = astate.head.a_text;
break;
case 1:
/* read data */
/* skip and curaddr may be wrong, but I couldn't find
* examples where this failed. There is no reasonable
* documentation for a.out available. */
astate.skip = ((astate.curaddr + 4095) & ~4095) - astate.curaddr;
astate.curaddr = (astate.curaddr + 4095) & ~4095;
astate.toread = astate.head.a_data;
break;
case 2:
/* initialize bss and start kernel */
astate.curaddr = (astate.curaddr + 4095) & ~4095;
astate.skip = 0;
astate.toread = 0;
memset(phys_to_virt(astate.curaddr), '\0', astate.head.a_bss);
goto aout_startkernel;
default:
break;
}
} while (offset < len);
astate.loc += len;
if (eof) {
unsigned long entry;
aout_startkernel:
entry = astate.head.a_entry;
done(1);
aout_freebsd_boot();
#ifdef AOUT_LYNX_KDI
xstart32(entry);
#endif
printf("unexpected a.out variant\n");
longjmp(restart_etherboot, -2);
}
return 0;
}

View File

@@ -0,0 +1,107 @@
/* Callout/callback interface for Etherboot
*
* This file provides the mechanisms for making calls from Etherboot
* to external programs and vice-versa.
*
* Initial version by Michael Brown <mbrown@fensystems.co.uk>, January 2004.
*/
#include "etherboot.h"
#include "callbacks.h"
#include "realmode.h"
#include "segoff.h"
#include <stdarg.h>
/* Maximum amount of stack data that prefix may request to be passed
* to its exit routine
*/
#define MAX_PREFIX_STACK_DATA 16
/* Prefix exit routine is defined in prefix object */
extern void prefix_exit ( void );
extern void prefix_exit_end ( void );
/*****************************************************************************
*
* IN_CALL INTERFACE
*
*****************************************************************************
*/
/* in_call(): entry point for calls in to Etherboot from external code.
*
* Parameters: some set up by assembly code _in_call(), others as
* passed from external code.
*/
uint32_t i386_in_call ( va_list ap, i386_pm_in_call_data_t pm_data,
uint32_t opcode ) {
uint32_t ret;
i386_rm_in_call_data_t rm_data;
in_call_data_t in_call_data = { &pm_data, NULL };
struct {
int data[MAX_PREFIX_STACK_DATA/4];
} in_stack;
/* Fill out rm_data if we were called from real mode */
if ( opcode & EB_CALL_FROM_REAL_MODE ) {
in_call_data.rm = &rm_data;
rm_data = va_arg ( ap, typeof(rm_data) );
/* Null return address indicates to use the special
* prefix exit mechanism, and that there are
* parameters on the stack that the prefix wants
* handed to its exit routine.
*/
if ( rm_data.ret_addr.offset == 0 ) {
int n = va_arg ( ap, int ) / 4;
int i;
for ( i = 0; i < n; i++ ) {
in_stack.data[i] = va_arg ( ap, int );
}
}
}
/* Hand off to main in_call() routine */
ret = in_call ( &in_call_data, opcode, ap );
/* If real-mode return address is null, it means that we
* should exit via the prefix's exit path, which is part of
* our image. (This arrangement is necessary since the prefix
* code itself may have been vapourised by the time we want to
* return.)
*/
if ( ( opcode & EB_CALL_FROM_REAL_MODE ) &&
( rm_data.ret_addr.offset == 0 ) ) {
real_call ( prefix_exit, &in_stack, NULL );
/* Should never return */
}
return ret;
}
#ifdef CODE16
/* install_rm_callback_interface(): install real-mode callback
* interface at specified address.
*
* Real-mode code may then call to this address (or lcall to this
* address plus RM_IN_CALL_FAR) in order to make an in_call() to
* Etherboot.
*
* Returns the size of the installed code, or 0 if the code could not
* be installed.
*/
int install_rm_callback_interface ( void *address, size_t available ) {
if ( available &&
( available < rm_callback_interface_size ) ) return 0;
/* Inform RM code where to find Etherboot */
rm_etherboot_location = virt_to_phys(_text);
/* Install callback interface */
memcpy ( address, &rm_callback_interface,
rm_callback_interface_size );
return rm_callback_interface_size;
}
#endif /* CODE16 */

86
src/arch/i386/core/cpu.c Normal file
View File

@@ -0,0 +1,86 @@
#ifdef CONFIG_X86_64
#include "stdint.h"
#include "string.h"
#include "bits/cpu.h"
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(uint32_t flag)
{
uint32_t f1, f2;
asm("pushfl\n\t"
"pushfl\n\t"
"popl %0\n\t"
"movl %0,%1\n\t"
"xorl %2,%0\n\t"
"pushl %0\n\t"
"popfl\n\t"
"pushfl\n\t"
"popl %0\n\t"
"popfl\n\t"
: "=&r" (f1), "=&r" (f2)
: "ir" (flag));
return ((f1^f2) & flag) != 0;
}
/* Probe for the CPUID instruction */
static inline int have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
static void identify_cpu(struct cpuinfo_x86 *c)
{
unsigned xlvl;
c->cpuid_level = -1; /* CPUID not detected */
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (!have_cpuid_p()) {
/* CPU doesn'thave CPUID */
/* If there are any capabilities, they'r vendor-specific */
/* enable_cpuid() would have set c->x86 for us. */
}
else {
/* CPU does have CPUID */
/* Get vendor name */
cpuid(0x00000000, &c->cpuid_level,
(int *)&c->x86_vendor_id[0],
(int *)&c->x86_vendor_id[8],
(int *)&c->x86_vendor_id[4]);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if ( c->cpuid_level >= 0x00000001 ) {
unsigned tfms, junk;
cpuid(0x00000001, &tfms, &junk, &junk,
&c->x86_capability[0]);
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
c->x86_mask = tfms & 15;
}
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
if ( (xlvl & 0xffff0000) == 0x80000000 ) {
if ( xlvl >= 0x80000001 )
c->x86_capability[1] = cpuid_edx(0x80000001);
}
}
}
struct cpuinfo_x86 cpu_info;
void cpu_setup(void)
{
identify_cpu(&cpu_info);
}
#endif /* CONFIG_X86_64 */

135
src/arch/i386/core/elf.c Normal file
View File

@@ -0,0 +1,135 @@
#include "etherboot.h"
#include "elf.h"
#define NAME "Etherboot"
#if defined(PCBIOS)
#define FIRMWARE "PCBIOS"
#endif
#if defined(LINUXBIOS)
#define FIRMWARE "LinuxBIOS"
#endif
#if !defined(FIRMWARE)
#error "No BIOS selected"
#endif
#define SZ(X) ((sizeof(X)+3) & ~3)
#define CP(D,S) (memcpy(&(D), &(S), sizeof(S)))
struct elf_notes {
/* The note header */
struct Elf_Bhdr hdr;
/* First the Fixed sized entries that must be well aligned */
/* Pointer to bootp data */
Elf_Nhdr nf1;
char nf1_name[SZ(EB_PARAM_NOTE)];
uint32_t nf1_bootp_data;
/* Pointer to ELF header */
Elf_Nhdr nf2;
char nf2_name[SZ(EB_PARAM_NOTE)];
uint32_t nf2_header;
/* A copy of the i386 memory map */
Elf_Nhdr nf3;
char nf3_name[SZ(EB_PARAM_NOTE)];
struct meminfo nf3_meminfo;
/* Then the variable sized data string data where alignment does not matter */
/* The bootloader name */
Elf_Nhdr nv1;
char nv1_desc[SZ(NAME)];
/* The bootloader version */
Elf_Nhdr nv2;
char nv2_desc[SZ(VERSION)];
/* The firmware type */
Elf_Nhdr nv3;
char nv3_desc[SZ(FIRMWARE)];
/* Name of the loaded image */
Elf_Nhdr nv4;
char nv4_loaded_image[128];
/* An empty command line */
Elf_Nhdr nv5;
char nv5_cmdline[SZ("")];
};
#define ELF_NOTE_COUNT (3 + 5)
static struct elf_notes notes;
struct Elf_Bhdr *prepare_boot_params(void *header)
{
memset(&notes, 0, sizeof(notes));
notes.hdr.b_signature = ELF_BHDR_MAGIC;
notes.hdr.b_size = sizeof(notes);
notes.hdr.b_checksum = 0;
notes.hdr.b_records = ELF_NOTE_COUNT;
/* Initialize the fixed length entries. */
notes.nf1.n_namesz = sizeof(EB_PARAM_NOTE);
notes.nf1.n_descsz = sizeof(notes.nf1_bootp_data);
notes.nf1.n_type = EB_BOOTP_DATA;
CP(notes.nf1_name, EB_PARAM_NOTE);
notes.nf1_bootp_data = virt_to_phys(BOOTP_DATA_ADDR);
notes.nf2.n_namesz = sizeof(EB_PARAM_NOTE);
notes.nf2.n_descsz = sizeof(notes.nf2_header);
notes.nf2.n_type = EB_HEADER;
CP(notes.nf2_name, EB_PARAM_NOTE);
notes.nf2_header = virt_to_phys(header);
notes.nf3.n_namesz = sizeof(EB_PARAM_NOTE);
notes.nf3.n_descsz = sizeof(notes.nf3_meminfo);
notes.nf3.n_type = EB_I386_MEMMAP;
CP(notes.nf3_name, EB_PARAM_NOTE);
memcpy(&notes.nf3_meminfo, &meminfo, sizeof(meminfo));
/* Initialize the variable length entries */
notes.nv1.n_namesz = 0;
notes.nv1.n_descsz = sizeof(NAME);
notes.nv1.n_type = EBN_BOOTLOADER_NAME;
CP(notes.nv1_desc, NAME);
notes.nv2.n_namesz = 0;
notes.nv2.n_descsz = sizeof(VERSION);
notes.nv2.n_type = EBN_BOOTLOADER_VERSION;
CP(notes.nv2_desc, VERSION);
notes.nv3.n_namesz = 0;
notes.nv3.n_descsz = sizeof(FIRMWARE);
notes.nv3.n_type = EBN_FIRMWARE_TYPE;
CP(notes.nv3_desc, FIRMWARE);
/* Attempt to pass the name of the loaded image */
notes.nv4.n_namesz = 0;
notes.nv4.n_descsz = sizeof(notes.nv4_loaded_image);
notes.nv4.n_type = EBN_LOADED_IMAGE;
memcpy(&notes.nv4_loaded_image, KERNEL_BUF, sizeof(notes.nv4_loaded_image));
/* Pass an empty command line for now */
notes.nv5.n_namesz = 0;
notes.nv5.n_descsz = sizeof("");
notes.nv5.n_type = EBN_COMMAND_LINE;
CP(notes.nv5_cmdline, "");
notes.hdr.b_checksum = ipchksum(&notes, sizeof(notes));
/* Like UDP invert a 0 checksum to show that a checksum is present */
if (notes.hdr.b_checksum == 0) {
notes.hdr.b_checksum = 0xffff;
}
return &notes.hdr;
}
int elf_start(unsigned long machine __unused_i386, unsigned long entry, unsigned long params)
{
#if defined(CONFIG_X86_64)
if (machine == EM_X86_64) {
return xstart_lm(entry, params);
}
#endif
return xstart32(entry, params);
}

View File

@@ -0,0 +1,90 @@
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
ENTRY(_text)
SECTIONS {
. = ALIGN(16);
/* Start address of Etherboot in the virtual address space */
_virt_start = 0;
_text = . ;
.text.nocompress : {
*(.text*.nocompress)
. = ALIGN(16);
} = 0x9090
.text16 : {
_text16 = .;
*(.text16)
*(.text16.*)
_etext16 = . ;
}
.text.compress : {
*(.text)
*(.text.*)
} = 0x9090
.rodata : {
. = ALIGN(4);
*(.rodata)
*(.rodata.*)
}
. = ALIGN(4);
.drivers.pci : {
pci_drivers = . ;
*(.drivers.pci);
pci_drivers_end = . ;
}
. = ALIGN(4);
.drivers.isa : {
isa_drivers = . ;
*(.drivers.isa);
isa_drivers_end = .;
}
_etext = . ;
_data = . ;
.data : {
*(.data)
*(.data.*)
}
_edata = . ;
_uncompressed_verbatim_end = . ;
. = ALIGN(16);
.bss.preserve : {
*(.bss.preserve)
*(.bss.preserve.*)
}
_bss = . ;
.bss : {
*(.bss)
*(.bss.*)
}
. = ALIGN(16);
_ebss = .;
_stack = . ;
.stack : {
_stack_start = . ;
*(.stack)
*(.stack.*)
_stack_end = . ;
}
_bss_size = _ebss - _bss;
_stack_offset = _stack - _text ;
_stack_offset_pgh = _stack_offset / 16 ;
_stack_size = _stack_end - _stack_start ;
. = ALIGN(16);
_end = . ;
/DISCARD/ : {
*(.comment)
*(.note)
}
/* PXE-specific symbol calculations. The results of these are
* needed in romprefix.S, which is why they must be calculated
* here.
*/
_pxe_stack_size = _pxe_stack_t_size
+ _pxe_callback_interface_size
+ _rm_callback_interface_size
+ _e820mangler_size + 15 ;
}

View File

@@ -0,0 +1,100 @@
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
ENTRY(_prefix_start)
SECTIONS {
/* Prefix */
.prefix : {
_verbatim_start = . ;
_prefix_start = . ;
*(.prefix)
. = ALIGN(16);
_prefix_end = . ;
} = 0x9090
_prefix_size = _prefix_end - _prefix_start;
.text.nocompress : {
*(.prefix.udata)
} = 0x9090
decompress_to = . ;
.prefix.zdata : {
_compressed = . ;
*(.prefix.zdata)
_compressed_end = . ;
}
_compressed_size = _compressed_end - _compressed;
. = ALIGN(16);
_verbatim_end = . ;
/* Size of the core of etherboot in memory */
_base_size = _end - _text;
/* _prefix_size is the length of the non-core etherboot prefix */
_prefix_size = _prefix_end - _prefix_start;
/* _verbatim_size is the actual amount that has to be copied to base memory */
_verbatim_size = _verbatim_end - _verbatim_start;
/* _image_size is the amount of base memory needed to run */
_image_size = _base_size + _prefix_size;
/* Standard sizes rounded up to paragraphs */
_prefix_size_pgh = (_prefix_size + 15) / 16;
_verbatim_size_pgh = (_verbatim_size + 15) / 16;
_image_size_pgh = (_image_size + 15) / 16 ;
/* Standard sizes in sectors */
_prefix_size_sct = (_prefix_size + 511) / 512;
_verbatim_size_sct = (_verbatim_size + 511) / 512;
_image_size_sct = (_image_size + 511) / 512;
/* Symbol offsets and sizes for the exe prefix */
_exe_hdr_size = 32;
_exe_size = _verbatim_size; /* Should this be - 32 to exclude the header? */
_exe_size_tail = (_exe_size) % 512;
_exe_size_pages = ((_exe_size) + 511) / 512;
_exe_bss_size = ((_image_size - _verbatim_size) + 15) / 16;
_exe_ss_offset = (_stack_offset + _prefix_size - _exe_hdr_size + 15) / 16 ;
/* This is where we copy the compressed image before decompression.
* Prepare to decompress in place. The end mark is about 8.25 bytes long,
* and the worst case symbol is about 16.5 bytes long. Therefore
* We need to reserve at least 25 bytes of slack here.
* Currently I reserve 2048 bytes of just slack to be safe :)
* 2048 bytes easily falls within the BSS (the defualt stack is 4096 bytes)
* so we really are decompressing in place.
*
* Hmm. I missed a trick. In the very worst case (no compression)
* the encoded data is 9/8 the size as it started out so to be completely
* safe I need to be 1/8 of the uncompressed code size past the end.
* This will still fit compfortably into our bss in any conceivable scenario.
*/
_compressed_copy = _edata + _prefix_size - _compressed_size +
/* The amount to overflow _edata */
MAX( ((_edata - _text + 7) / 8) , 2016 ) + 32;
_assert = ASSERT( ( _compressed_copy - _prefix_size ) < _ebss , "Cannot decompress in place" ) ;
decompress = DEFINED(decompress) ? decompress : 0;
/DISCARD/ : {
*(.comment)
*(.note)
}
/* Symbols used by the prefixes whose addresses are inconvinient
* to compute, at runtime in the code.
*/
image_basemem_size = DEFINED(image_basemem_size)? image_basemem_size : 65536;
image_basemem = DEFINED(image_basemem)? image_basemem : 65536;
_prefix_real_to_prot = _real_to_prot + _prefix_size ;
_prefix_prot_to_real = _prot_to_real + _prefix_size ;
_prefix_image_basemem_size = image_basemem_size + _prefix_size ;
_prefix_image_basemem = image_basemem + _prefix_size ;
_prefix_rm_in_call = _rm_in_call + _prefix_size ;
_prefix_in_call = _in_call + _prefix_size ;
_prefix_rom = rom + _prefix_size ;
_prefix_rm_etherboot_location = rm_etherboot_location + _prefix_size ;
_prefix_stack_end = _stack_end + _prefix_size ;
}

View File

@@ -0,0 +1,377 @@
/* bootinfo */
#define BOOTINFO_VERSION 1
#define NODEV (-1) /* non-existent device */
#define PAGE_SHIFT 12 /* LOG2(PAGE_SIZE) */
#define PAGE_SIZE (1<<PAGE_SHIFT) /* bytes/page */
#define PAGE_MASK (PAGE_SIZE-1)
#define N_BIOS_GEOM 8
struct bootinfo {
unsigned int bi_version;
const unsigned char *bi_kernelname;
struct nfs_diskless *bi_nfs_diskless;
/* End of fields that are always present. */
#define bi_endcommon bi_n_bios_used
unsigned int bi_n_bios_used;
unsigned long bi_bios_geom[N_BIOS_GEOM];
unsigned int bi_size;
unsigned char bi_memsizes_valid;
unsigned char bi_pad[3];
unsigned long bi_basemem;
unsigned long bi_extmem;
unsigned long bi_symtab;
unsigned long bi_esymtab;
/* Note that these are in the FreeBSD headers but were not here... */
unsigned long bi_kernend; /* end of kernel space */
unsigned long bi_envp; /* environment */
unsigned long bi_modulep; /* preloaded modules */
};
static struct bootinfo bsdinfo;
#ifdef ELF_IMAGE
static Elf32_Shdr *shdr; /* To support the FreeBSD kludge! */
static Address symtab_load;
static Address symstr_load;
static int symtabindex;
static int symstrindex;
#endif
static enum {
Unknown, Tagged, Aout, Elf, Aout_FreeBSD, Elf_FreeBSD,
} image_type = Unknown;
static unsigned int off;
#ifdef ELF_IMAGE
static void elf_freebsd_probe(void)
{
image_type = Elf;
if ( (estate.e.elf32.e_entry & 0xf0000000) &&
(estate.e.elf32.e_type == ET_EXEC))
{
image_type = Elf_FreeBSD;
printf("/FreeBSD");
off = -(estate.e.elf32.e_entry & 0xff000000);
estate.e.elf32.e_entry += off;
}
/* Make sure we have a null to start with... */
shdr = 0;
/* Clear the symbol index values... */
symtabindex = -1;
symstrindex = -1;
/* ...and the load addresses of the symbols */
symtab_load = 0;
symstr_load = 0;
}
static void elf_freebsd_fixup_segment(void)
{
if (image_type == Elf_FreeBSD) {
estate.p.phdr32[estate.segment].p_paddr += off;
}
}
static void elf_freebsd_find_segment_end(void)
{
/* Count the bytes read even for the last block
* as we will need to know where the last block
* ends in order to load the symbols correctly.
* (plus it could be useful elsewhere...)
* Note that we need to count the actual size,
* not just the end of the disk image size.
*/
estate.curaddr +=
(estate.p.phdr32[estate.segment].p_memsz -
estate.p.phdr32[estate.segment].p_filesz);
}
static int elf_freebsd_debug_loader(unsigned int offset)
{
/* No more segments to be loaded - time to start the
* nasty state machine to support the loading of
* FreeBSD debug symbols due to the fact that FreeBSD
* uses/exports the kernel's debug symbols in order
* to make much of the system work! Amazing (arg!)
*
* We depend on the fact that for the FreeBSD kernel,
* there is only one section of debug symbols and that
* the section is after all of the loaded sections in
* the file. This assumes a lot but is somewhat required
* to make this code not be too annoying. (Where do you
* load symbols when the code has not loaded yet?)
* Since this function is actually just a callback from
* the network data transfer code, we need to be able to
* work with the data as it comes in. There is no chance
* for doing a seek other than forwards.
*
* The process we use is to first load the section
* headers. Once they are loaded (shdr != 0) we then
* look for where the symbol table and symbol table
* strings are and setup some state that we found
* them and fall into processing the first one (which
* is the symbol table) and after that has been loaded,
* we try the symbol strings. Note that the order is
* actually required as the memory image depends on
* the symbol strings being loaded starting at the
* end of the symbol table. The kernel assumes this
* layout of the image.
*
* At any point, if we get to the end of the load file
* or the section requested is earlier in the file than
* the current file pointer, we just end up falling
* out of this and booting the kernel without this
* information.
*/
/* Make sure that the next address is long aligned... */
/* Assumes size of long is a power of 2... */
estate.curaddr = (estate.curaddr + sizeof(long) - 1) & ~(sizeof(long) - 1);
/* If we have not yet gotten the shdr loaded, try that */
if (shdr == 0)
{
estate.toread = estate.e.elf32.e_shnum * estate.e.elf32.e_shentsize;
estate.skip = estate.e.elf32.e_shoff - (estate.loc + offset);
if (estate.toread)
{
#if ELF_DEBUG
printf("shdr *, size %lX, curaddr %lX\n",
estate.toread, estate.curaddr);
#endif
/* Start reading at the curaddr and make that the shdr */
shdr = (Elf32_Shdr *)phys_to_virt(estate.curaddr);
/* Start to read... */
return 1;
}
}
else
{
/* We have the shdr loaded, check if we have found
* the indexs where the symbols are supposed to be */
if ((symtabindex == -1) && (symstrindex == -1))
{
int i;
/* Make sure that the address is page aligned... */
/* Symbols need to start in their own page(s)... */
estate.curaddr = (estate.curaddr + 4095) & ~4095;
/* Need to make new indexes... */
for (i=0; i < estate.e.elf32.e_shnum; i++)
{
if (shdr[i].sh_type == SHT_SYMTAB)
{
int j;
for (j=0; j < estate.e.elf32.e_phnum; j++)
{
/* Check only for loaded sections */
if ((estate.p.phdr32[j].p_type | 0x80) == (PT_LOAD | 0x80))
{
/* Only the extra symbols */
if ((shdr[i].sh_offset >= estate.p.phdr32[j].p_offset) &&
((shdr[i].sh_offset + shdr[i].sh_size) <=
(estate.p.phdr32[j].p_offset + estate.p.phdr32[j].p_filesz)))
{
shdr[i].sh_offset=0;
shdr[i].sh_size=0;
break;
}
}
}
if ((shdr[i].sh_offset != 0) && (shdr[i].sh_size != 0))
{
symtabindex = i;
symstrindex = shdr[i].sh_link;
}
}
}
}
/* Check if we have a symbol table index and have not loaded it */
if ((symtab_load == 0) && (symtabindex >= 0))
{
/* No symbol table yet? Load it first... */
/* This happens to work out in a strange way.
* If we are past the point in the file already,
* we will skip a *large* number of bytes which
* ends up bringing us to the end of the file and
* an old (default) boot. Less code and lets
* the state machine work in a cleaner way but this
* is a nasty side-effect trick... */
estate.skip = shdr[symtabindex].sh_offset - (estate.loc + offset);
/* And we need to read this many bytes... */
estate.toread = shdr[symtabindex].sh_size;
if (estate.toread)
{
#if ELF_DEBUG
printf("db sym, size %lX, curaddr %lX\n",
estate.toread, estate.curaddr);
#endif
/* Save where we are loading this... */
symtab_load = phys_to_virt(estate.curaddr);
*((long *)phys_to_virt(estate.curaddr)) = estate.toread;
estate.curaddr += sizeof(long);
/* Start to read... */
return 1;
}
}
else if ((symstr_load == 0) && (symstrindex >= 0))
{
/* We have already loaded the symbol table, so
* now on to the symbol strings... */
/* Same nasty trick as above... */
estate.skip = shdr[symstrindex].sh_offset - (estate.loc + offset);
/* And we need to read this many bytes... */
estate.toread = shdr[symstrindex].sh_size;
if (estate.toread)
{
#if ELF_DEBUG
printf("db str, size %lX, curaddr %lX\n",
estate.toread, estate.curaddr);
#endif
/* Save where we are loading this... */
symstr_load = phys_to_virt(estate.curaddr);
*((long *)phys_to_virt(estate.curaddr)) = estate.toread;
estate.curaddr += sizeof(long);
/* Start to read... */
return 1;
}
}
}
/* all done */
return 0;
}
static void elf_freebsd_boot(unsigned long entry)
{
if (image_type != Elf_FreeBSD)
return;
memset(&bsdinfo, 0, sizeof(bsdinfo));
bsdinfo.bi_basemem = meminfo.basememsize;
bsdinfo.bi_extmem = meminfo.memsize;
bsdinfo.bi_memsizes_valid = 1;
bsdinfo.bi_version = BOOTINFO_VERSION;
bsdinfo.bi_kernelname = virt_to_phys(KERNEL_BUF);
bsdinfo.bi_nfs_diskless = NULL;
bsdinfo.bi_size = sizeof(bsdinfo);
#define RB_BOOTINFO 0x80000000 /* have `struct bootinfo *' arg */
if(freebsd_kernel_env[0] != '\0'){
freebsd_howto |= RB_BOOTINFO;
bsdinfo.bi_envp = (unsigned long)freebsd_kernel_env;
}
/* Check if we have symbols loaded, and if so,
* made the meta_data needed to pass those to
* the kernel. */
if ((symtab_load !=0) && (symstr_load != 0))
{
unsigned long *t;
bsdinfo.bi_symtab = symtab_load;
/* End of symbols (long aligned...) */
/* Assumes size of long is a power of 2... */
bsdinfo.bi_esymtab = (symstr_load +
sizeof(long) +
*((long *)symstr_load) +
sizeof(long) - 1) & ~(sizeof(long) - 1);
/* Where we will build the meta data... */
t = phys_to_virt(bsdinfo.bi_esymtab);
#if ELF_DEBUG
printf("Metadata at %lX\n",t);
#endif
/* Set up the pointer to the memory... */
bsdinfo.bi_modulep = virt_to_phys(t);
/* The metadata structure is an array of 32-bit
* words where we store some information about the
* system. This is critical, as FreeBSD now looks
* only for the metadata for the extended symbol
* information rather than in the bootinfo.
*/
/* First, do the kernel name and the kernel type */
/* Note that this assumed x86 byte order... */
/* 'kernel\0\0' */
*t++=MODINFO_NAME; *t++= 7; *t++=0x6E72656B; *t++=0x00006C65;
/* 'elf kernel\0\0' */
*t++=MODINFO_TYPE; *t++=11; *t++=0x20666C65; *t++=0x6E72656B; *t++ = 0x00006C65;
/* Now the symbol start/end - note that they are
* here in local/physical address - the Kernel
* boot process will relocate the addresses. */
*t++=MODINFOMD_SSYM | MODINFO_METADATA; *t++=sizeof(*t); *t++=bsdinfo.bi_symtab;
*t++=MODINFOMD_ESYM | MODINFO_METADATA; *t++=sizeof(*t); *t++=bsdinfo.bi_esymtab;
*t++=MODINFO_END; *t++=0; /* end of metadata */
/* Since we have symbols we need to make
* sure that the kernel knows its own end
* of memory... It is not _end but after
* the symbols and the metadata... */
bsdinfo.bi_kernend = virt_to_phys(t);
/* Signal locore.s that we have a valid bootinfo
* structure that was completely filled in. */
freebsd_howto |= 0x80000000;
}
xstart32(entry, freebsd_howto, NODEV, 0, 0, 0,
virt_to_phys(&bsdinfo), 0, 0, 0);
longjmp(restart_etherboot, -2);
}
#endif
#ifdef AOUT_IMAGE
static void aout_freebsd_probe(void)
{
image_type = Aout;
if (((astate.head.a_midmag >> 16) & 0xffff) == 0) {
/* Some other a.out variants have a different
* value, and use other alignments (e.g. 1K),
* not the 4K used by FreeBSD. */
image_type = Aout_FreeBSD;
printf("/FreeBSD");
off = -(astate.head.a_entry & 0xff000000);
astate.head.a_entry += off;
}
}
static void aout_freebsd_boot(void)
{
if (image_type == Aout_FreeBSD) {
memset(&bsdinfo, 0, sizeof(bsdinfo));
bsdinfo.bi_basemem = meminfo.basememsize;
bsdinfo.bi_extmem = meminfo.memsize;
bsdinfo.bi_memsizes_valid = 1;
bsdinfo.bi_version = BOOTINFO_VERSION;
bsdinfo.bi_kernelname = virt_to_phys(KERNEL_BUF);
bsdinfo.bi_nfs_diskless = NULL;
bsdinfo.bi_size = sizeof(bsdinfo);
xstart32(astate.head.a_entry, freebsd_howto, NODEV, 0, 0, 0,
virt_to_phys(&bsdinfo), 0, 0, 0);
longjmp(restart_etherboot, -2);
}
}
#endif

View File

@@ -0,0 +1,35 @@
#include "etherboot.h"
#include "callbacks.h"
#include <stdarg.h>
void arch_main ( in_call_data_t *data __unused, va_list params __unused )
{
#ifdef PCBIOS
/* Deallocate base memory used for the prefix, if applicable
*/
forget_prefix_base_memory();
#endif
}
void arch_relocated_from (unsigned long old_addr )
{
#ifdef PCBIOS
/* Deallocate base memory used for the Etherboot runtime,
* if applicable
*/
forget_runtime_base_memory( old_addr );
#endif
}
void arch_on_exit ( int exit_status __unused )
{
#ifdef PCBIOS
/* Deallocate the real-mode stack now. We will reallocate
* the stack if are going to use it after this point.
*/
forget_real_mode_stack();
#endif
}

View File

@@ -0,0 +1,191 @@
/* A couple of routines to implement a low-overhead timer for drivers */
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*/
#include "etherboot.h"
#include "timer.h"
#include "latch.h"
void __load_timer2(unsigned int ticks)
{
/*
* Now let's take care of PPC channel 2
*
* Set the Gate high, program PPC channel 2 for mode 0,
* (interrupt on terminal count mode), binary count,
* load 5 * LATCH count, (LSB and MSB) to begin countdown.
*
* Note some implementations have a bug where the high bits byte
* of channel 2 is ignored.
*/
/* Set up the timer gate, turn off the speaker */
/* Set the Gate high, disable speaker */
outb((inb(PPC_PORTB) & ~PPCB_SPKR) | PPCB_T2GATE, PPC_PORTB);
/* binary, mode 0, LSB/MSB, Ch 2 */
outb(TIMER2_SEL|WORD_ACCESS|MODE0|BINARY_COUNT, TIMER_MODE_PORT);
/* LSB of ticks */
outb(ticks & 0xFF, TIMER2_PORT);
/* MSB of ticks */
outb(ticks >> 8, TIMER2_PORT);
}
static int __timer2_running(void)
{
return ((inb(PPC_PORTB) & PPCB_T2OUT) == 0);
}
#if !defined(CONFIG_TSC_CURRTICKS)
void setup_timers(void)
{
return;
}
void load_timer2(unsigned int ticks)
{
return __load_timer2(ticks);
}
int timer2_running(void)
{
return __timer2_running();
}
void ndelay(unsigned int nsecs)
{
waiton_timer2((nsecs * CLOCK_TICK_RATE)/1000000000);
}
void udelay(unsigned int usecs)
{
waiton_timer2((usecs * TICKS_PER_MS)/1000);
}
#endif /* !defined(CONFIG_TSC_CURRTICKS) */
#if defined(CONFIG_TSC_CURRTICKS)
#define rdtsc(low,high) \
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
#define rdtscll(val) \
__asm__ __volatile__ ("rdtsc" : "=A" (val))
/* Number of clock ticks to time with the rtc */
#define LATCH 0xFF
#define LATCHES_PER_SEC ((CLOCK_TICK_RATE + (LATCH/2))/LATCH)
#define TICKS_PER_LATCH ((LATCHES_PER_SEC + (TICKS_PER_SEC/2))/TICKS_PER_SEC)
static void sleep_latch(void)
{
__load_timer2(LATCH);
while(__timer2_running());
}
/* ------ Calibrate the TSC -------
* Time how long it takes to excute a loop that runs in known time.
* And find the convertion needed to get to CLOCK_TICK_RATE
*/
static unsigned long long calibrate_tsc(void)
{
unsigned long startlow, starthigh;
unsigned long endlow, endhigh;
rdtsc(startlow,starthigh);
sleep_latch();
rdtsc(endlow,endhigh);
/* 64-bit subtract - gcc just messes up with long longs */
__asm__("subl %2,%0\n\t"
"sbbl %3,%1"
:"=a" (endlow), "=d" (endhigh)
:"g" (startlow), "g" (starthigh),
"0" (endlow), "1" (endhigh));
/* Error: ECPUTOOFAST */
if (endhigh)
goto bad_ctc;
endlow *= TICKS_PER_LATCH;
return endlow;
/*
* The CTC wasn't reliable: we got a hit on the very first read,
* or the CPU was so fast/slow that the quotient wouldn't fit in
* 32 bits..
*/
bad_ctc:
printf("bad_ctc\n");
return 0;
}
static unsigned long clocks_per_tick;
void setup_timers(void)
{
if (!clocks_per_tick) {
clocks_per_tick = calibrate_tsc();
/* Display the CPU Mhz to easily test if the calibration was bad */
printf("CPU %ld Mhz\n", (clocks_per_tick/1000 * TICKS_PER_SEC)/1000);
}
}
unsigned long currticks(void)
{
unsigned long clocks_high, clocks_low;
unsigned long currticks;
/* Read the Time Stamp Counter */
rdtsc(clocks_low, clocks_high);
/* currticks = clocks / clocks_per_tick; */
__asm__("divl %1"
:"=a" (currticks)
:"r" (clocks_per_tick), "0" (clocks_low), "d" (clocks_high));
return currticks;
}
static unsigned long long timer_timeout;
static int __timer_running(void)
{
unsigned long long now;
rdtscll(now);
return now < timer_timeout;
}
void udelay(unsigned int usecs)
{
unsigned long long now;
rdtscll(now);
timer_timeout = now + usecs * ((clocks_per_tick * TICKS_PER_SEC)/(1000*1000));
while(__timer_running());
}
void ndelay(unsigned int nsecs)
{
unsigned long long now;
rdtscll(now);
timer_timeout = now + nsecs * ((clocks_per_tick * TICKS_PER_SEC)/(1000*1000*1000));
while(__timer_running());
}
void load_timer2(unsigned int timer2_ticks)
{
unsigned long long now;
unsigned long clocks;
rdtscll(now);
clocks = timer2_ticks * ((clocks_per_tick * TICKS_PER_SEC)/CLOCK_TICK_RATE);
timer_timeout = now + clocks;
}
int timer2_running(void)
{
return __timer_running();
}
#endif /* RTC_CURRTICKS */

305
src/arch/i386/core/init.S Normal file
View File

@@ -0,0 +1,305 @@
#include "callbacks.h"
.equ CR0_PE, 1
.text
.arch i386
.section ".prefix", "ax", @progbits
#undef CODE16
#if defined(PCBIOS)
#define CODE16
#endif
/* We have two entry points: "conventional" (at the start of the file)
* and "callback" (at _entry, 2 bytes in). The "callback" entry
* should be used if the caller wishes to provide a specific opcode.
* It is equivalent to a call to in_call. Using the "conventional"
* entry point is equivalent to using the "callback" entry point with
* an opcode of EB_OPCODE_MAIN.
*
* Both entry points can be called in either 16-bit real or 32-bit
* protected mode with flat physical addresses. We detect which mode
* the processor is in and call either in_call or rm_in_call as
* appropriate. Note that the mode detection code must therefore be
* capable of doing the same thing in either mode, even though the
* machine code instructions will be interpreted differently.
*
* The decompressor will be invoked if necessary to decompress
* Etherboot before attempting to jump to it.
*/
/******************************************************************************
* Entry points and mode detection code
******************************************************************************
*/
.code32
/* "Conventional" entry point: caller provides no opcode */
.globl _start
_start:
/* Set flag to indicate conventional entry point used */
pushl $0 /* "pushw $0" in 16-bit code */
/* Fall through to "callback" entry point */
/* "Callback" entry point */
.globl _entry
_entry:
#ifdef CODE16
/* CPU mode detection code */
pushl %eax /* "pushw %ax" in 16-bit code */
pushw %ax /* "pushl %eax" in 16-bit code */
movl %cr0, %eax /* Test protected mode bit */
testb $CR0_PE, %al
popw %ax /* "popl %eax" in 16-bit code */
popl %eax /* "popw %eax" in 16-bit code */
jz rmode
#endif /* CODE16 */
/******************************************************************************
* Entered in protected mode
******************************************************************************
*/
.code32
pmode:
cmpl $0, 0(%esp) /* Conventional entry point used? */
jne 1f
/* Entered via conventional entry point: set up stack */
xchgl %eax, 4(%esp) /* %eax = return addr, store %eax */
movl %eax, 0(%esp) /* 0(%esp) = return address */
movl $(EB_OPCODE_MAIN|EB_USE_INTERNAL_STACK|EB_SKIP_OPCODE), %eax
xchgl %eax, 4(%esp) /* 4(%esp) = opcode, restore %eax */
1:
/* Run decompressor if necessary */
pushl %eax
movl $decompress, %eax
testl %eax, %eax
jz 1f
call decompress
1: popl %eax
/* Make in_call to Etherboot */
jmp _prefix_in_call
/******************************************************************************
* Entered in real mode
******************************************************************************
*/
#ifdef CODE16
.code16
rmode:
pushw %ax /* Padding */
pushw %bp
movw %sp, %bp
cmpw $0, 6(%bp) /* Conventional entry point used? */
jne 1f
/* Entered via conventional entry point: set up stack */
pushw %ax
movw 6(%bp), %ax
movw %ax, 2(%bp) /* Move return address down */
movl $(EB_OPCODE_MAIN|EB_USE_INTERNAL_STACK|EB_SKIP_OPCODE), 4(%bp)
popw %ax
popw %bp
jmp 2f
1: /* Entered via callback entry point: do nothing */
popw %bp
popw %ax
2:
/* Preserve registers */
pushw %ds
pushl %eax
/* Run decompressor if necessary. Decompressor is 32-bit
* code, so we must switch to pmode first. Save and restore
* GDT over transition to pmode.
*/
movl $decompress, %eax
testl %eax, %eax
jz 1f
pushw %ds
pushw %es
pushw %fs
pushw %gs
subw $8, %sp
pushw %bp
movw %sp, %bp
sgdt 2(%bp)
pushw %ss /* Store params for _prot_to_real */
pushw %cs
call _prefix_real_to_prot
.code32
call decompress
call _prefix_prot_to_real
.code16
popw %ax /* skip */
popw %ax /* skip */
lgdt 2(%bp)
popw %bp
addw $8, %sp
popw %gs
popw %fs
popw %es
popw %ds
1:
/* Set rm_etherboot_location */
xorl %eax, %eax
movw %cs, %ax
movw %ax, %ds
shll $4, %eax
addl $_prefix_size, %eax
movl %eax, _prefix_rm_etherboot_location
/* Restore registers */
popl %eax
popw %ds
/* Make real-mode in_call to Etherboot */
jmp _prefix_rm_in_call
#endif /* CODE16 */
/******************************************************************************
* Utility routines that can be called by the "prefix".
******************************************************************************
*/
#ifdef CODE16
/* Prelocate code: either to an area at the top of free base memory.
* Switch stacks to use the stack within the resulting
* Etherboot image.
*
* On entry, %cs:0000 must be the start of the prefix: this is used to
* locate the code to be copied.
*
* This routine takes a single word parameter: the number of bytes to
* be transferred from the old stack to the new stack (excluding the
* return address and this parameter itself, which will always be
* copied). If this value is negative, the stacks will not be
* switched.
*
* Control will "return" to the appropriate point in the relocated
* image.
*/
#define PRELOC_PRESERVE ( 20 )
#define PRELOC_OFFSET_RETADDR ( PRELOC_PRESERVE )
#define PRELOC_OFFSET_RETADDR_E ( PRELOC_OFFSET_RETADDR + 4 )
#define PRELOC_OFFSET_COPY ( PRELOC_OFFSET_RETADDR_E )
#define PRELOC_OFFSET_COPY_E ( PRELOC_OFFSET_COPY + 2 )
#define PRELOC_ALWAYS_COPY ( PRELOC_OFFSET_COPY_E )
.code16
.globl prelocate
prelocate:
/* Pad to allow for expansion of return address */
pushw %ax
/* Preserve registers */
pushaw
pushw %ds
pushw %es
/* Claim an area of base memory from the BIOS and put the
* payload there.
*/
movw $0x40, %bx
movw %bx, %es
movw %es:(0x13), %bx /* FBMS in kb to %ax */
shlw $6, %bx /* ... in paragraphs */
subw $_image_size_pgh, %bx /* Subtract space for image */
shrw $6, %bx /* Round down to nearest kb */
movw %bx, %es:(0x13) /* ...and claim memory from BIOS */
shlw $6, %bx
/* At this point %bx contains the segment address for the
* start of the image (image = prefix + runtime).
*/
/* Switch stacks */
movw %ss, %ax
movw %ax, %ds
movw %sp, %si /* %ds:si = current %ss:sp */
movw %ss:PRELOC_OFFSET_COPY(%si), %cx
testw %cx, %cx
js 1f
leaw _stack_offset_pgh(%bx), %ax /* %ax = new %ss */
movw %ax, %es
movw $_stack_size, %di
addw $PRELOC_ALWAYS_COPY, %cx
subw %cx, %di /* %es:di = new %ss:sp */
movw %ax, %ss /* Set new %ss:sp */
movw %di, %sp
cld
rep movsb /* Copy stack contents */
1:
/* Do the image copy backwards, since if there's overlap with
* a forward copy then it means we're going to get trashed
* during the copy anyway...
*/
pushal /* Preserve 32-bit registers */
movw %bx, %es /* Destination base for copy */
pushw %cs
popw %ds /* Source base for copy */
movl $_verbatim_size-1, %ecx /* Offset to last byte */
movl %ecx, %esi
movl %ecx, %edi
incl %ecx /* Length */
std /* Backwards copy of binary */
ADDR32 rep movsb
cld
popal /* Restore 32-bit registers */
/* Store (%bx<<4) as image_basemem to be picked up by
* basemem.c. Also store image_size, since there's no other
* way that we can later know how much memory we allocated.
* (_zfile_size is unavailable when rt2 is linked).
*/
pushl %eax
xorl %eax, %eax
movw %bx, %ax
shll $4, %eax
movl %eax, %es:_prefix_image_basemem
movl $_image_size, %es:_prefix_image_basemem_size
popl %eax
/* Expand original near return address into far return to new
* code location.
*/
movw %sp, %bp
xchgw %bx, (PRELOC_OFFSET_RETADDR+2)(%bp)
movw %bx, (PRELOC_OFFSET_RETADDR+0)(%bp)
/* Restore registers and return */
popw %es
popw %ds
popaw
lret /* Jump to relocated code */
/* Utility routine to free base memory allocated by prelocate.
* Ensure that said memory is not in use (e.g. for the CPU
* stack) before calling this routine.
*/
.globl deprelocate
deprelocate:
/* Claim an area of base memory from the BIOS and put the
* payload there.
*/
pushw %ax
pushw %es
movw $0x40, %ax
movw %ax, %es
movw %es:(0x13), %ax /* FBMS in kb to %ax */
shlw $6, %ax /* ... in paragraphs */
addw $_image_size_pgh+0x40-1, %ax /* Add space for image and... */
shrw $6, %ax /* ...round up to nearest kb */
movw %ax, %es:(0x13) /* Give memory back to BIOS */
popw %es
popw %ax
ret
#endif /* CODE16 */

View File

@@ -0,0 +1,143 @@
/* Multiboot support
*
* 2003-07-02 mmap fix and header probe by SONE Takeshi
*/
struct multiboot_mods {
unsigned mod_start;
unsigned mod_end;
unsigned char *string;
unsigned reserved;
};
struct multiboot_mmap {
unsigned int size;
unsigned int base_addr_low;
unsigned int base_addr_high;
unsigned int length_low;
unsigned int length_high;
unsigned int type;
};
/* The structure of a Multiboot 0.6 parameter block. */
struct multiboot_info {
unsigned int flags;
#define MULTIBOOT_MEM_VALID 0x01
#define MULTIBOOT_BOOT_DEV_VALID 0x02
#define MULTIBOOT_CMDLINE_VALID 0x04
#define MULTIBOOT_MODS_VALID 0x08
#define MULTIBOOT_AOUT_SYMS_VALID 0x10
#define MULTIBOOT_ELF_SYMS_VALID 0x20
#define MULTIBOOT_MMAP_VALID 0x40
unsigned int memlower;
unsigned int memupper;
unsigned int bootdev;
unsigned int cmdline; /* physical address of the command line */
unsigned mods_count;
struct multiboot_mods *mods_addr;
unsigned syms_num;
unsigned syms_size;
unsigned syms_addr;
unsigned syms_shndx;
unsigned mmap_length;
unsigned mmap_addr;
/* The structure actually ends here, so I might as well put
* the ugly e820 parameters here...
*/
struct multiboot_mmap mmap[E820MAX];
};
/* Multiboot image header (minimal part) */
struct multiboot_header {
unsigned int magic;
#define MULTIBOOT_HEADER_MAGIC 0x1BADB002
unsigned int flags;
unsigned int checksum;
};
static struct multiboot_header *mbheader;
static struct multiboot_info mbinfo;
static void multiboot_probe(unsigned char *data, int len)
{
int offset;
struct multiboot_header *h;
/* Multiboot spec requires the header to be in first 8KB of the image */
if (len > 8192)
len = 8192;
for (offset = 0; offset < len; offset += 4) {
h = (struct multiboot_header *) (data + offset);
if (h->magic == MULTIBOOT_HEADER_MAGIC
&& h->magic + h->flags + h->checksum == 0) {
printf("/Multiboot");
mbheader = h;
return;
}
}
mbheader = 0;
}
static inline void multiboot_boot(unsigned long entry)
{
unsigned char cmdline[512], *c;
int i;
if (!mbheader)
return;
/* Etherboot limits the command line to the kernel name,
* default parameters and user prompted parameters. All of
* them are shorter than 256 bytes. As the kernel name and
* the default parameters come from the same BOOTP/DHCP entry
* (or if they don't, the parameters are empty), only two
* strings of the maximum size are possible. Note this buffer
* can overrun if a stupid file name is chosen. Oh well. */
c = cmdline;
for (i = 0; KERNEL_BUF[i] != 0; i++) {
switch (KERNEL_BUF[i]) {
case ' ':
case '\\':
case '"':
*c++ = '\\';
break;
default:
break;
}
*c++ = KERNEL_BUF[i];
}
(void)sprintf(c, " -retaddr %#lX", virt_to_phys(xend32));
mbinfo.flags = MULTIBOOT_MMAP_VALID | MULTIBOOT_MEM_VALID |MULTIBOOT_CMDLINE_VALID;
mbinfo.memlower = meminfo.basememsize;
mbinfo.memupper = meminfo.memsize;
mbinfo.bootdev = 0; /* not booted from disk */
mbinfo.cmdline = virt_to_phys(cmdline);
for (i = 0; i < (int) meminfo.map_count; i++) {
mbinfo.mmap[i].size = sizeof(struct multiboot_mmap)
- sizeof(unsigned int);
mbinfo.mmap[i].base_addr_low =
(unsigned int) meminfo.map[i].addr;
mbinfo.mmap[i].base_addr_high =
(unsigned int) (meminfo.map[i].addr >> 32);
mbinfo.mmap[i].length_low =
(unsigned int) meminfo.map[i].size;
mbinfo.mmap[i].length_high =
(unsigned int) (meminfo.map[i].size >> 32);
mbinfo.mmap[i].type = meminfo.map[i].type;
}
mbinfo.mmap_length = meminfo.map_count * sizeof(struct multiboot_mmap);
mbinfo.mmap_addr = virt_to_phys(mbinfo.mmap);
/* The Multiboot 0.6 spec requires all segment registers to be
* loaded with an unrestricted, writeable segment.
* xstart32 does this for us.
*/
/* Start the kernel, passing the Multiboot information record
* and the magic number. */
os_regs.eax = 0x2BADB002;
os_regs.ebx = virt_to_phys(&mbinfo);
xstart32(entry);
longjmp(restart_etherboot, -2);
}

352
src/arch/i386/core/pci_io.c Normal file
View File

@@ -0,0 +1,352 @@
/*
** Support for NE2000 PCI clones added David Monro June 1997
** Generalised to other NICs by Ken Yap July 1997
**
** Most of this is taken from:
**
** /usr/src/linux/drivers/pci/pci.c
** /usr/src/linux/include/linux/pci.h
** /usr/src/linux/arch/i386/bios32.c
** /usr/src/linux/include/linux/bios32.h
** /usr/src/linux/drivers/net/ne.c
*/
#ifdef CONFIG_PCI
#include "etherboot.h"
#include "pci.h"
#ifdef CONFIG_PCI_DIRECT
#define PCIBIOS_SUCCESSFUL 0x00
#define DEBUG 0
/*
* Functions for accessing PCI configuration space with type 1 accesses
*/
#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (bus << 16) | (device_fn << 8) | (where & ~3))
int pcibios_read_config_byte(unsigned int bus, unsigned int device_fn,
unsigned int where, uint8_t *value)
{
outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
*value = inb(0xCFC + (where&3));
return PCIBIOS_SUCCESSFUL;
}
int pcibios_read_config_word (unsigned int bus,
unsigned int device_fn, unsigned int where, uint16_t *value)
{
outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
*value = inw(0xCFC + (where&2));
return PCIBIOS_SUCCESSFUL;
}
int pcibios_read_config_dword (unsigned int bus, unsigned int device_fn,
unsigned int where, uint32_t *value)
{
outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
*value = inl(0xCFC);
return PCIBIOS_SUCCESSFUL;
}
int pcibios_write_config_byte (unsigned int bus, unsigned int device_fn,
unsigned int where, uint8_t value)
{
outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
outb(value, 0xCFC + (where&3));
return PCIBIOS_SUCCESSFUL;
}
int pcibios_write_config_word (unsigned int bus, unsigned int device_fn,
unsigned int where, uint16_t value)
{
outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
outw(value, 0xCFC + (where&2));
return PCIBIOS_SUCCESSFUL;
}
int pcibios_write_config_dword (unsigned int bus, unsigned int device_fn, unsigned int where, uint32_t value)
{
outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
outl(value, 0xCFC);
return PCIBIOS_SUCCESSFUL;
}
#undef CONFIG_CMD
#else /* CONFIG_PCI_DIRECT not defined */
#if !defined(PCBIOS)
#error "The pcibios can only be used when the PCBIOS support is compiled in"
#endif
/* Macro for calling the BIOS32 service. This replaces the old
* bios32_call function. Use in a statement such as
* __asm__ ( BIOS32_CALL,
* : <output registers>
* : "S" ( bios32_entry ), <other input registers> );
*/
#define BIOS32_CALL "call _virt_to_phys\n\t" \
"pushl %%cs\n\t" \
"call *%%esi\n\t" \
"cli\n\t" \
"cld\n\t" \
"call _phys_to_virt\n\t"
static unsigned long bios32_entry;
static unsigned long pcibios_entry;
static unsigned long bios32_service(unsigned long service)
{
unsigned char return_code; /* %al */
unsigned long address; /* %ebx */
unsigned long length; /* %ecx */
unsigned long entry; /* %edx */
__asm__(BIOS32_CALL
: "=a" (return_code),
"=b" (address),
"=c" (length),
"=d" (entry)
: "0" (service),
"1" (0),
"S" (bios32_entry));
switch (return_code) {
case 0:
return address + entry;
case 0x80: /* Not present */
printf("bios32_service(%d) : not present\n", service);
return 0;
default: /* Shouldn't happen */
printf("bios32_service(%d) : returned %#X????\n",
service, return_code);
return 0;
}
}
int pcibios_read_config_byte(unsigned int bus,
unsigned int device_fn, unsigned int where, uint8_t *value)
{
unsigned long ret;
unsigned long bx = (bus << 8) | device_fn;
__asm__(BIOS32_CALL
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=c" (*value),
"=a" (ret)
: "1" (PCIBIOS_READ_CONFIG_BYTE),
"b" (bx),
"D" ((long) where),
"S" (pcibios_entry));
return (int) (ret & 0xff00) >> 8;
}
int pcibios_read_config_word(unsigned int bus,
unsigned int device_fn, unsigned int where, uint16_t *value)
{
unsigned long ret;
unsigned long bx = (bus << 8) | device_fn;
__asm__(BIOS32_CALL
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=c" (*value),
"=a" (ret)
: "1" (PCIBIOS_READ_CONFIG_WORD),
"b" (bx),
"D" ((long) where),
"S" (pcibios_entry));
return (int) (ret & 0xff00) >> 8;
}
int pcibios_read_config_dword(unsigned int bus,
unsigned int device_fn, unsigned int where, uint32_t *value)
{
unsigned long ret;
unsigned long bx = (bus << 8) | device_fn;
__asm__(BIOS32_CALL
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=c" (*value),
"=a" (ret)
: "1" (PCIBIOS_READ_CONFIG_DWORD),
"b" (bx),
"D" ((long) where),
"S" (pcibios_entry));
return (int) (ret & 0xff00) >> 8;
}
int pcibios_write_config_byte (unsigned int bus,
unsigned int device_fn, unsigned int where, uint8_t value)
{
unsigned long ret;
unsigned long bx = (bus << 8) | device_fn;
__asm__(BIOS32_CALL
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (ret)
: "0" (PCIBIOS_WRITE_CONFIG_BYTE),
"c" (value),
"b" (bx),
"D" ((long) where),
"S" (pcibios_entry));
return (int) (ret & 0xff00) >> 8;
}
int pcibios_write_config_word (unsigned int bus,
unsigned int device_fn, unsigned int where, uint16_t value)
{
unsigned long ret;
unsigned long bx = (bus << 8) | device_fn;
__asm__(BIOS32_CALL
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (ret)
: "0" (PCIBIOS_WRITE_CONFIG_WORD),
"c" (value),
"b" (bx),
"D" ((long) where),
"S" (pcibios_entry));
return (int) (ret & 0xff00) >> 8;
}
int pcibios_write_config_dword (unsigned int bus,
unsigned int device_fn, unsigned int where, uint32_t value)
{
unsigned long ret;
unsigned long bx = (bus << 8) | device_fn;
__asm__(BIOS32_CALL
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (ret)
: "0" (PCIBIOS_WRITE_CONFIG_DWORD),
"c" (value),
"b" (bx),
"D" ((long) where),
"S" (pcibios_entry));
return (int) (ret & 0xff00) >> 8;
}
static void check_pcibios(void)
{
unsigned long signature;
unsigned char present_status;
unsigned char major_revision;
unsigned char minor_revision;
int pack;
if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
__asm__(BIOS32_CALL
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:\tshl $8, %%eax\n\t"
"movw %%bx, %%ax"
: "=d" (signature),
"=a" (pack)
: "1" (PCIBIOS_PCI_BIOS_PRESENT),
"S" (pcibios_entry)
: "bx", "cx");
present_status = (pack >> 16) & 0xff;
major_revision = (pack >> 8) & 0xff;
minor_revision = pack & 0xff;
if (present_status || (signature != PCI_SIGNATURE)) {
printf("ERROR: BIOS32 says PCI BIOS, but no PCI "
"BIOS????\n");
pcibios_entry = 0;
}
#if DEBUG
if (pcibios_entry) {
printf ("pcibios_init : PCI BIOS revision %hhX.%hhX"
" entry at %#X\n", major_revision,
minor_revision, pcibios_entry);
}
#endif
}
}
static void pcibios_init(void)
{
union bios32 *check;
unsigned char sum;
int i, length;
bios32_entry = 0;
/*
* Follow the standard procedure for locating the BIOS32 Service
* directory by scanning the permissible address range from
* 0xe0000 through 0xfffff for a valid BIOS32 structure.
*
*/
for (check = phys_to_virt(0xe0000); (void *)check <= phys_to_virt(0xffff0); ++check) {
if (check->fields.signature != BIOS32_SIGNATURE)
continue;
length = check->fields.length * 16;
if (!length)
continue;
sum = 0;
for (i = 0; i < length ; ++i)
sum += check->chars[i];
if (sum != 0)
continue;
if (check->fields.revision != 0) {
printf("pcibios_init : unsupported revision %d at %#X, mail drew@colorado.edu\n",
check->fields.revision, check);
continue;
}
#if DEBUG
printf("pcibios_init : BIOS32 Service Directory "
"structure at %#X\n", check);
#endif
if (!bios32_entry) {
if (check->fields.entry >= 0x100000) {
printf("pcibios_init: entry in high "
"memory, giving up\n");
return;
} else {
bios32_entry = check->fields.entry;
#if DEBUG
printf("pcibios_init : BIOS32 Service Directory"
" entry at %#X\n", bios32_entry);
#endif
}
}
}
if (bios32_entry)
check_pcibios();
}
#endif /* CONFIG_PCI_DIRECT not defined*/
unsigned long pcibios_bus_base(unsigned int bus __unused)
{
/* architecturally this must be 0 */
return 0;
}
void find_pci(int type, struct pci_device *dev)
{
#ifndef CONFIG_PCI_DIRECT
if (!pcibios_entry) {
pcibios_init();
}
if (!pcibios_entry) {
printf("pci_init: no BIOS32 detected\n");
return;
}
#endif
return scan_pci_bus(type, dev);
}
#endif /* CONFIG_PCI */

View File

@@ -0,0 +1,331 @@
/*
* Basic support for controlling the 8259 Programmable Interrupt Controllers.
*
* Initially written by Michael Brown (mcb30).
*/
#include <etherboot.h>
#include "pic8259.h"
#include "realmode.h"
#ifdef DEBUG_IRQ
#define DBG(...) printf ( __VA_ARGS__ )
#else
#define DBG(...)
#endif
/* State of trivial IRQ handler */
irq_t trivial_irq_installed_on = IRQ_NONE;
static uint16_t trivial_irq_previous_trigger_count = 0;
/* The actual trivial IRQ handler
*
* Note: we depend on the C compiler not realising that we're putting
* variables in the ".text16" section and therefore not forcing them
* back to the ".data" section. I don't see any reason to expect this
* behaviour to change.
*
* These must *not* be the first variables to appear in this file; the
* first variable to appear gets the ".data" directive.
*/
RM_FRAGMENT(_trivial_irq_handler,
"pushw %bx\n\t"
"call 1f\n1:\tpopw %bx\n\t" /* PIC access to variables */
"incw %cs:(_trivial_irq_trigger_count-1b)(%bx)\n\t"
"popw %bx\n\t"
"iret\n\t"
"\n\t"
".globl _trivial_irq_trigger_count\n\t"
"_trivial_irq_trigger_count: .short 0\n\t"
"\n\t"
".globl _trivial_irq_chain_to\n\t"
"_trivial_irq_chain_to: .short 0,0\n\t"
"\n\t"
".globl _trivial_irq_chain\n\t"
"_trivial_irq_chain: .byte 0\n\t"
);
extern volatile uint16_t _trivial_irq_trigger_count;
extern segoff_t _trivial_irq_chain_to;
extern int8_t _trivial_irq_chain;
/* Current locations of trivial IRQ handler. These will change at
* runtime when relocation is used; the handler needs to be copied to
* base memory before being installed.
*/
void (*trivial_irq_handler)P((void)) = _trivial_irq_handler;
uint16_t volatile *trivial_irq_trigger_count = &_trivial_irq_trigger_count;
segoff_t *trivial_irq_chain_to = &_trivial_irq_chain_to;
uint8_t *trivial_irq_chain = &_trivial_irq_chain;
/* Install a handler for the specified IRQ. Address of previous
* handler will be stored in previous_handler. Enabled/disabled state
* of IRQ will be preserved across call, therefore if the handler does
* chaining, ensure that either (a) IRQ is disabled before call, or
* (b) previous_handler points directly to the place that the handler
* picks up its chain-to address.
*/
int install_irq_handler ( irq_t irq, segoff_t *handler,
uint8_t *previously_enabled,
segoff_t *previous_handler ) {
segoff_t *irq_vector = IRQ_VECTOR ( irq );
*previously_enabled = irq_enabled ( irq );
if ( irq > IRQ_MAX ) {
DBG ( "Invalid IRQ number %d\n" );
return 0;
}
previous_handler->segment = irq_vector->segment;
previous_handler->offset = irq_vector->offset;
if ( *previously_enabled ) disable_irq ( irq );
DBG ( "Installing handler at %hx:%hx for IRQ %d (vector 0000:%hx),"
" leaving %s\n",
handler->segment, handler->offset, irq, virt_to_phys(irq_vector),
( *previously_enabled ? "enabled" : "disabled" ) );
DBG ( "...(previous handler at %hx:%hx)\n",
previous_handler->segment, previous_handler->offset );
irq_vector->segment = handler->segment;
irq_vector->offset = handler->offset;
if ( *previously_enabled ) enable_irq ( irq );
return 1;
}
/* Remove handler for the specified IRQ. Routine checks that another
* handler has not been installed that chains to handler before
* uninstalling handler. Enabled/disabled state of the IRQ will be
* restored to that specified by previously_enabled.
*/
int remove_irq_handler ( irq_t irq, segoff_t *handler,
uint8_t *previously_enabled,
segoff_t *previous_handler ) {
segoff_t *irq_vector = IRQ_VECTOR ( irq );
if ( irq > IRQ_MAX ) {
DBG ( "Invalid IRQ number %d\n" );
return 0;
}
if ( ( irq_vector->segment != handler->segment ) ||
( irq_vector->offset != handler->offset ) ) {
DBG ( "Cannot remove handler for IRQ %d\n" );
return 0;
}
DBG ( "Removing handler for IRQ %d\n", irq );
disable_irq ( irq );
irq_vector->segment = previous_handler->segment;
irq_vector->offset = previous_handler->offset;
if ( *previously_enabled ) enable_irq ( irq );
return 1;
}
/* Install the trivial IRQ handler. This routine installs the
* handler, tests it and enables the IRQ.
*/
int install_trivial_irq_handler ( irq_t irq ) {
segoff_t trivial_irq_handler_segoff = SEGOFF(trivial_irq_handler);
if ( trivial_irq_installed_on != IRQ_NONE ) {
DBG ( "Can install trivial IRQ handler only once\n" );
return 0;
}
if ( SEGMENT(trivial_irq_handler) > 0xffff ) {
DBG ( "Trivial IRQ handler not in base memory\n" );
return 0;
}
DBG ( "Installing trivial IRQ handler on IRQ %d\n", irq );
if ( ! install_irq_handler ( irq, &trivial_irq_handler_segoff,
trivial_irq_chain,
trivial_irq_chain_to ) )
return 0;
trivial_irq_installed_on = irq;
DBG ( "Testing trivial IRQ handler\n" );
disable_irq ( irq );
*trivial_irq_trigger_count = 0;
trivial_irq_previous_trigger_count = 0;
fake_irq ( irq );
if ( ! trivial_irq_triggered ( irq ) ) {
DBG ( "Installation of trivial IRQ handler failed\n" );
remove_trivial_irq_handler ( irq );
return 0;
}
/* Send EOI just in case there was a leftover interrupt */
send_specific_eoi ( irq );
DBG ( "Trivial IRQ handler installed successfully\n" );
enable_irq ( irq );
return 1;
}
/* Remove the trivial IRQ handler.
*/
int remove_trivial_irq_handler ( irq_t irq ) {
segoff_t trivial_irq_handler_segoff = SEGOFF(trivial_irq_handler);
if ( trivial_irq_installed_on == IRQ_NONE ) return 1;
if ( irq != trivial_irq_installed_on ) {
DBG ( "Cannot uninstall trivial IRQ handler from IRQ %d; "
"is installed on IRQ %d\n", irq,
trivial_irq_installed_on );
return 0;
}
if ( ! remove_irq_handler ( irq, &trivial_irq_handler_segoff,
trivial_irq_chain,
trivial_irq_chain_to ) )
return 0;
if ( trivial_irq_triggered ( trivial_irq_installed_on ) ) {
DBG ( "Sending EOI for unwanted trivial IRQ\n" );
send_specific_eoi ( trivial_irq_installed_on );
}
trivial_irq_installed_on = IRQ_NONE;
return 1;
}
/* Safe method to detect whether or not trivial IRQ has been
* triggered. Using this call avoids potential race conditions. This
* call will return success only once per trigger.
*/
int trivial_irq_triggered ( irq_t irq ) {
uint16_t trivial_irq_this_trigger_count = *trivial_irq_trigger_count;
int triggered = ( trivial_irq_this_trigger_count -
trivial_irq_previous_trigger_count );
/* irq is not used at present, but we have it in the API for
* future-proofing; in case we want the facility to have
* multiple trivial IRQ handlers installed simultaneously.
*
* Avoid compiler warning about unused variable.
*/
if ( irq == IRQ_NONE ) {};
trivial_irq_previous_trigger_count = trivial_irq_this_trigger_count;
return triggered ? 1 : 0;
}
/* Copy trivial IRQ handler to a new location. Typically used to copy
* the handler into base memory; when relocation is being used we need
* to do this before installing the handler.
*
* Call with target=NULL in order to restore the handler to its
* original location.
*/
int copy_trivial_irq_handler ( void *target, size_t target_size ) {
irq_t currently_installed_on = trivial_irq_installed_on;
uint32_t offset = ( target == NULL ? 0 :
target - (void*)_trivial_irq_handler );
if (( target != NULL ) && ( target_size < TRIVIAL_IRQ_HANDLER_SIZE )) {
DBG ( "Insufficient space to copy trivial IRQ handler\n" );
return 0;
}
if ( currently_installed_on != IRQ_NONE ) {
DBG ("WARNING: relocating trivial IRQ handler while in use\n");
if ( ! remove_trivial_irq_handler ( currently_installed_on ) )
return 0;
}
/* Do the actual copy */
if ( target != NULL ) {
DBG ( "Copying trivial IRQ handler to %hx:%hx\n",
SEGMENT(target), OFFSET(target) );
memcpy ( target, _trivial_irq_handler,
TRIVIAL_IRQ_HANDLER_SIZE );
} else {
DBG ( "Restoring trivial IRQ handler to original location\n" );
}
/* Update all the pointers to structures within the handler */
trivial_irq_handler = ( void (*)P((void)) )
( (void*)_trivial_irq_handler + offset );
trivial_irq_trigger_count = (uint16_t*)
( (void*)&_trivial_irq_trigger_count + offset );
trivial_irq_chain_to = (segoff_t*)
( (void*)&_trivial_irq_chain_to + offset );
trivial_irq_chain = (uint8_t*)
( (void*)&_trivial_irq_chain + offset );
if ( currently_installed_on != IRQ_NONE ) {
if ( ! install_trivial_irq_handler ( currently_installed_on ) )
return 0;
}
return 1;
}
/* Send non-specific EOI(s). This seems to be inherently unsafe.
*/
void send_nonspecific_eoi ( irq_t irq ) {
DBG ( "Sending non-specific EOI for IRQ %d\n", irq );
if ( irq >= IRQ_PIC_CUTOFF ) {
outb ( ICR_EOI_NON_SPECIFIC, PIC2_ICR );
}
outb ( ICR_EOI_NON_SPECIFIC, PIC1_ICR );
}
/* Send specific EOI(s).
*/
void send_specific_eoi ( irq_t irq ) {
DBG ( "Sending specific EOI for IRQ %d\n", irq );
outb ( ICR_EOI_SPECIFIC | ICR_VALUE(irq), ICR_REG(irq) );
if ( irq >= IRQ_PIC_CUTOFF ) {
outb ( ICR_EOI_SPECIFIC | ICR_VALUE(CHAINED_IRQ),
ICR_REG(CHAINED_IRQ) );
}
}
/* Fake an IRQ
*/
void fake_irq ( irq_t irq ) {
struct {
uint16_t int_number;
} PACKED in_stack;
/* Convert IRQ to INT number:
*
* subb $0x08,%cl Invert bit 3, set bits 4-7 iff irq < 8
* xorb $0x70,%cl Invert bits 4-6
* andb $0x7f,%cl Clear bit 7
*
* No, it's not the most intuitive method, but I was proud to
* get it down to three lines of assembler when this routine
* was originally implemented in pcbios.S.
*/
in_stack.int_number = ( ( irq - 8 ) ^ 0x70 ) & 0x7f;
RM_FRAGMENT(rm_fake_irq,
"popw %ax\n\t" /* %ax = INT number */
"call 1f\n1:\tpop %bx\n\t"
"movb %al, %cs:(2f-1b+1)(%bx)\n\t" /* Overwrite INT number..*/
"\n2:\tint $0x00\n\t" /* ..in this instruction */
);
real_call ( rm_fake_irq, &in_stack, NULL );
}
/* Dump current 8259 status: enabled IRQs and handler addresses.
*/
#ifdef DEBUG_IRQ
void dump_irq_status ( void ) {
int irq = 0;
for ( irq = 0; irq < 16; irq++ ) {
if ( irq_enabled ( irq ) ) {
printf ( "IRQ%d enabled, ISR at %hx:%hx\n", irq,
IRQ_VECTOR(irq)->segment,
IRQ_VECTOR(irq)->offset );
}
}
}
#endif

View File

@@ -0,0 +1,8 @@
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
SECTIONS {
.prefix.udata : {
*(*)
}
}

View File

@@ -0,0 +1,8 @@
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
SECTIONS {
.prefix.zdata : {
*(*)
}
}

View File

@@ -0,0 +1,364 @@
/* PXE callback mechanisms. This file contains only the portions
* specific to i386: i.e. the low-level mechanisms for calling in from
* an NBP to the PXE stack and for starting an NBP from the PXE stack.
*/
#ifdef PXE_EXPORT
#include "etherboot.h"
#include "callbacks.h"
#include "realmode.h"
#include "pxe.h"
#include "pxe_callbacks.h"
#include "pxe_export.h"
#include "hidemem.h"
#include <stdarg.h>
#define INSTALLED(x) ( (typeof(&x)) ( (void*)(&x) \
- &pxe_callback_interface \
+ (void*)&pxe_stack->arch_data ) )
#define pxe_intercept_int1a INSTALLED(_pxe_intercept_int1a)
#define pxe_intercepted_int1a INSTALLED(_pxe_intercepted_int1a)
#define pxe_pxenv_location INSTALLED(_pxe_pxenv_location)
#define INT1A_VECTOR ( (segoff_t*) ( phys_to_virt( 4 * 0x1a ) ) )
/* The overall size of the PXE stack is ( sizeof(pxe_stack_t) +
* pxe_callback_interface_size + rm_callback_interface_size ).
* Unfortunately, this isn't a compile-time constant, since
* {pxe,rm}_callback_interface_size depend on the length of the
* assembly code in these interfaces.
*
* We used to have a function pxe_stack_size() which returned this
* value. However, it actually needs to be a link-time constant, so
* that it can appear in the UNDIROMID structure in romprefix.S. We
* therefore export the three component sizes as absolute linker
* symbols, get the linker to add them together and generate a new
* absolute symbol _pxe_stack_size. We then import this value into a
* C variable pxe_stack_size, for access from C code.
*/
/* gcc won't let us use extended asm outside a function (compiler
* bug), ao we have to put these asm statements inside a dummy
* function.
*/
static void work_around_gcc_bug ( void ) __attribute__ ((used));
static void work_around_gcc_bug ( void ) {
/* Export sizeof(pxe_stack_t) as absolute linker symbol */
__asm__ ( ".globl _pxe_stack_t_size" );
__asm__ ( ".equ _pxe_stack_t_size, %c0"
: : "i" (sizeof(pxe_stack_t)) );
}
/* Import _pxe_stack_size absolute linker symbol into C variable */
extern int pxe_stack_size;
__asm__ ( "pxe_stack_size: .long _pxe_stack_size" );
/* Utility routine: byte checksum
*/
uint8_t byte_checksum ( void *address, size_t size ) {
unsigned int i, sum = 0;
for ( i = 0; i < size; i++ ) {
sum += ((uint8_t*)address)[i];
}
return (uint8_t)sum;
}
/* install_pxe_stack(): install PXE stack.
*
* Use base = NULL for auto-allocation of base memory
*
* IMPORTANT: no further allocation of base memory should take place
* before the PXE stack is removed. This is to work around a small
* but important deficiency in the PXE specification.
*/
pxe_stack_t * install_pxe_stack ( void *base ) {
pxe_t *pxe;
pxenv_t *pxenv;
void *pxe_callback_code;
void (*pxe_in_call_far)(void);
void (*pxenv_in_call_far)(void);
void *rm_callback_code;
void *e820mangler_code;
void *end;
/* If already installed, just return */
if ( pxe_stack != NULL ) return pxe_stack;
/* Allocate base memory if requested to do so
*/
if ( base == NULL ) {
base = allot_base_memory ( pxe_stack_size );
if ( base == NULL ) return NULL;
}
/* Round address up to 16-byte physical alignment */
pxe_stack = (pxe_stack_t *)
( phys_to_virt ( ( virt_to_phys(base) + 0xf ) & ~0xf ) );
/* Zero out allocated stack */
memset ( pxe_stack, 0, sizeof(*pxe_stack) );
/* Calculate addresses for portions of the stack */
pxe = &(pxe_stack->pxe);
pxenv = &(pxe_stack->pxenv);
pxe_callback_code = &(pxe_stack->arch_data);
pxe_in_call_far = _pxe_in_call_far +
( pxe_callback_code - &pxe_callback_interface );
pxenv_in_call_far = _pxenv_in_call_far +
( pxe_callback_code - &pxe_callback_interface );
rm_callback_code = pxe_callback_code + pxe_callback_interface_size;
e820mangler_code = (void*)(((int)rm_callback_code +
rm_callback_interface_size + 0xf ) & ~0xf);
end = e820mangler_code + e820mangler_size;
/* Initialise !PXE data structures */
memcpy ( pxe->Signature, "!PXE", 4 );
pxe->StructLength = sizeof(*pxe);
pxe->StructRev = 0;
pxe->reserved_1 = 0;
/* We don't yet have an UNDI ROM ID structure */
pxe->UNDIROMID.segment = 0;
pxe->UNDIROMID.offset = 0;
/* or a BC ROM ID structure */
pxe->BaseROMID.segment = 0;
pxe->BaseROMID.offset = 0;
pxe->EntryPointSP.segment = SEGMENT(pxe_stack);
pxe->EntryPointSP.offset = (void*)pxe_in_call_far - (void*)pxe_stack;
/* No %esp-compatible entry point yet */
pxe->EntryPointESP.segment = 0;
pxe->EntryPointESP.offset = 0;
pxe->StatusCallout.segment = -1;
pxe->StatusCallout.offset = -1;
pxe->reserved_2 = 0;
pxe->SegDescCn = 7;
pxe->FirstSelector = 0;
/* PXE specification doesn't say anything about when the stack
* space should get freed. We work around this by claiming it
* as our data segment as well.
*/
pxe->Stack.Seg_Addr = pxe->UNDIData.Seg_Addr = real_mode_stack >> 4;
pxe->Stack.Phy_Addr = pxe->UNDIData.Phy_Addr = real_mode_stack;
pxe->Stack.Seg_Size = pxe->UNDIData.Seg_Size = real_mode_stack_size;
/* Code segment has to be the one containing the data structures... */
pxe->UNDICode.Seg_Addr = SEGMENT(pxe_stack);
pxe->UNDICode.Phy_Addr = virt_to_phys(pxe_stack);
pxe->UNDICode.Seg_Size = end - (void*)pxe_stack;
/* No base code loaded */
pxe->BC_Data.Seg_Addr = 0;
pxe->BC_Data.Phy_Addr = 0;
pxe->BC_Data.Seg_Size = 0;
pxe->BC_Code.Seg_Addr = 0;
pxe->BC_Code.Phy_Addr = 0;
pxe->BC_Code.Seg_Size = 0;
pxe->BC_CodeWrite.Seg_Addr = 0;
pxe->BC_CodeWrite.Phy_Addr = 0;
pxe->BC_CodeWrite.Seg_Size = 0;
pxe->StructCksum -= byte_checksum ( pxe, sizeof(*pxe) );
/* Initialise PXENV+ data structures */
memcpy ( pxenv->Signature, "PXENV+", 6 );
pxenv->Version = 0x201;
pxenv->Length = sizeof(*pxenv);
pxenv->RMEntry.segment = SEGMENT(pxe_stack);
pxenv->RMEntry.offset = (void*)pxenv_in_call_far - (void*)pxe_stack;
pxenv->PMOffset = 0; /* "Do not use" says the PXE spec */
pxenv->PMSelector = 0; /* "Do not use" says the PXE spec */
pxenv->StackSeg = pxenv->UNDIDataSeg = real_mode_stack >> 4;
pxenv->StackSize = pxenv->UNDIDataSize = real_mode_stack_size;
pxenv->BC_CodeSeg = 0;
pxenv->BC_CodeSize = 0;
pxenv->BC_DataSeg = 0;
pxenv->BC_DataSize = 0;
/* UNDIData{Seg,Size} set above */
pxenv->UNDICodeSeg = SEGMENT(pxe_stack);
pxenv->UNDICodeSize = end - (void*)pxe_stack;
pxenv->PXEPtr.segment = SEGMENT(pxe);
pxenv->PXEPtr.offset = OFFSET(pxe);
pxenv->Checksum -= byte_checksum ( pxenv, sizeof(*pxenv) );
/* Mark stack as inactive */
pxe_stack->state = CAN_UNLOAD;
/* Install PXE and RM callback code and E820 mangler */
memcpy ( pxe_callback_code, &pxe_callback_interface,
pxe_callback_interface_size );
install_rm_callback_interface ( rm_callback_code, 0 );
install_e820mangler ( e820mangler_code );
return pxe_stack;
}
/* Use the UNDI data segment as our real-mode stack. This is for when
* we have been loaded via the UNDI loader
*/
void use_undi_ds_for_rm_stack ( uint16_t ds ) {
forget_real_mode_stack();
real_mode_stack = virt_to_phys ( VIRTUAL ( ds, 0 ) );
lock_real_mode_stack = 1;
}
/* Activate PXE stack (i.e. hook interrupt vectors). The PXE stack
* *can* be used before it is activated, but it really shoudln't.
*/
int hook_pxe_stack ( void ) {
if ( pxe_stack == NULL ) return 0;
if ( pxe_stack->state >= MIDWAY ) return 1;
/* Hook INT15 handler */
hide_etherboot();
/* Hook INT1A handler */
*pxe_intercepted_int1a = *INT1A_VECTOR;
pxe_pxenv_location->segment = SEGMENT(pxe_stack);
pxe_pxenv_location->offset = (void*)&pxe_stack->pxenv
- (void*)pxe_stack;
INT1A_VECTOR->segment = SEGMENT(&pxe_stack->arch_data);
INT1A_VECTOR->offset = (void*)pxe_intercept_int1a
- (void*)&pxe_stack->arch_data;
/* Mark stack as active */
pxe_stack->state = MIDWAY;
return 1;
}
/* Deactivate the PXE stack (i.e. unhook interrupt vectors).
*/
int unhook_pxe_stack ( void ) {
if ( pxe_stack == NULL ) return 0;
if ( pxe_stack->state <= CAN_UNLOAD ) return 1;
/* Restore original INT15 and INT1A handlers */
*INT1A_VECTOR = *pxe_intercepted_int1a;
if ( !unhide_etherboot() ) {
/* Cannot unhook INT15. We're up the creek without
* even a suitable log out of which to fashion a
* paddle. There are some very badly behaved NBPs
* that will ignore plaintive pleas such as
* PXENV_KEEP_UNDI and just zero out our code anyway.
* This means they end up vapourising an active INT15
* handler, which is generally not a good thing to do.
*/
return 0;
}
/* Mark stack as inactive */
pxe_stack->state = CAN_UNLOAD;
return 1;
}
/* remove_pxe_stack(): remove PXE stack installed by install_pxe_stack()
*/
void remove_pxe_stack ( void ) {
/* Ensure stack is deactivated, then free up the memory */
if ( ensure_pxe_state ( CAN_UNLOAD ) ) {
forget_base_memory ( pxe_stack, pxe_stack_size );
pxe_stack = NULL;
} else {
printf ( "Cannot remove PXE stack!\n" );
}
}
/* xstartpxe(): start up a PXE image
*/
int xstartpxe ( void ) {
int nbp_exit;
struct {
reg16_t bx;
reg16_t es;
segoff_t pxe;
} PACKED in_stack;
/* Set up registers and stack parameters to pass to PXE NBP */
in_stack.es.word = SEGMENT(&(pxe_stack->pxenv));
in_stack.bx.word = OFFSET(&(pxe_stack->pxenv));
in_stack.pxe.segment = SEGMENT(&(pxe_stack->pxe));
in_stack.pxe.offset = OFFSET(&(pxe_stack->pxe));
/* Real-mode trampoline fragment used to jump to PXE NBP
*/
RM_FRAGMENT(jump_to_pxe_nbp,
"popw %bx\n\t"
"popw %es\n\t"
"lcall $" RM_STR(PXE_LOAD_SEGMENT) ", $" RM_STR(PXE_LOAD_OFFSET) "\n\t"
);
/* Call to PXE image */
gateA20_unset();
nbp_exit = real_call ( jump_to_pxe_nbp, &in_stack, NULL );
gateA20_set();
return nbp_exit;
}
int pxe_in_call ( in_call_data_t *in_call_data, va_list params ) {
/* i386 calling conventions; the only two defined by Intel's
* PXE spec.
*
* Assembly code must pass a long containing the PXE version
* code (i.e. 0x201 for !PXE, 0x200 for PXENV+) as the first
* parameter after the in_call opcode. This is used to decide
* whether to take parameters from the stack (!PXE) or from
* registers (PXENV+).
*/
uint32_t api_version = va_arg ( params, typeof(api_version) );
uint16_t opcode;
segoff_t segoff;
t_PXENV_ANY *structure;
if ( api_version >= 0x201 ) {
/* !PXE calling convention */
pxe_call_params_t pxe_params
= va_arg ( params, typeof(pxe_params) );
opcode = pxe_params.opcode;
segoff = pxe_params.segoff;
} else {
/* PXENV+ calling convention */
opcode = in_call_data->pm->regs.bx;
segoff.segment = in_call_data->rm->seg_regs.es;
segoff.offset = in_call_data->pm->regs.di;
}
structure = VIRTUAL ( segoff.segment, segoff.offset );
return pxe_api_call ( opcode, structure );
}
#ifdef TEST_EXCLUDE_ALGORITHM
/* This code retained because it's a difficult algorithm to tweak with
* confidence
*/
int ___test_exclude ( int start, int len, int estart, int elen, int fixbase );
void __test_exclude ( int start, int len, int estart, int elen, int fixbase ) {
int newrange = ___test_exclude ( start, len, estart, elen, fixbase );
int newstart = ( newrange >> 16 ) & 0xffff;
int newlen = ( newrange & 0xffff );
printf ( "[%x,%x): excluding [%x,%x) %s gives [%x,%x)\n",
start, start + len,
estart, estart + elen,
( fixbase == 0 ) ? " " : "fb",
newstart, newstart + newlen );
}
void _test_exclude ( int start, int len, int estart, int elen ) {
__test_exclude ( start, len, estart, elen, 0 );
__test_exclude ( start, len, estart, elen, 1 );
}
void test_exclude ( void ) {
_test_exclude ( 0x8000, 0x1000, 0x0400, 0x200 ); /* before */
_test_exclude ( 0x8000, 0x1000, 0x9000, 0x200 ); /* after */
_test_exclude ( 0x8000, 0x1000, 0x7f00, 0x200 ); /* before overlap */
_test_exclude ( 0x8000, 0x1000, 0x8f00, 0x200 ); /* after overlap */
_test_exclude ( 0x8000, 0x1000, 0x8000, 0x200 ); /* align start */
_test_exclude ( 0x8000, 0x1000, 0x8e00, 0x200 ); /* align end */
_test_exclude ( 0x8000, 0x1000, 0x8100, 0x200 ); /* early overlap */
_test_exclude ( 0x8000, 0x1000, 0x8d00, 0x200 ); /* late overlap */
_test_exclude ( 0x8000, 0x1000, 0x7000, 0x3000 ); /* total overlap */
_test_exclude ( 0x8000, 0x1000, 0x8000, 0x1000 ); /* exact overlap */
}
#endif /* TEST_EXCLUDE_ALGORITHM */
#else /* PXE_EXPORT */
/* Define symbols used by the linker scripts, to prevent link errors */
__asm__ ( ".globl _pxe_stack_t_size" );
__asm__ ( ".equ _pxe_stack_t_size, 0" );
#endif /* PXE_EXPORT */

View File

@@ -0,0 +1,94 @@
/*
* PXE image loader for Etherboot.
*
* Note: There is no signature check for PXE images because there is
* no signature. Well done, Intel! Consequently, pxe_probe() must be
* called last of all the image_probe() routines, because it will
* *always* claim the image.
*/
#ifndef PXE_EXPORT
#error PXE_IMAGE requires PXE_EXPORT
#endif
#include "etherboot.h"
#include "pxe_callbacks.h"
#include "pxe_export.h"
#include "pxe.h"
unsigned long pxe_load_offset;
static sector_t pxe_download ( unsigned char *data,
unsigned int len, int eof );
static inline os_download_t pxe_probe ( unsigned char *data __unused,
unsigned int len __unused ) {
printf("(PXE)");
pxe_load_offset = 0;
return pxe_download;
}
static sector_t pxe_download ( unsigned char *data,
unsigned int len, int eof ) {
unsigned long block_address = PXE_LOAD_ADDRESS + pxe_load_offset;
PXENV_STATUS_t nbp_exit;
/* Check segment will fit. We can't do this in probe()
* because there's nothing in the non-existent header to tell
* us how long the image is.
*/
if ( ! prep_segment ( block_address, block_address + len,
block_address + len,
pxe_load_offset, pxe_load_offset + len ) ) {
longjmp ( restart_etherboot, -2 );
}
/* Load block into memory, continue loading until eof */
memcpy ( phys_to_virt ( block_address ), data, len );
pxe_load_offset += len;
if ( ! eof ) {
return 0;
}
/* Start up PXE NBP */
done ( 0 );
/* Install and activate a PXE stack */
pxe_stack = install_pxe_stack ( NULL );
if ( ensure_pxe_state ( READY ) ) {
/* Invoke the NBP */
nbp_exit = xstartpxe();
} else {
/* Fake success so we tear down the stack */
nbp_exit = PXENV_STATUS_SUCCESS;
}
/* NBP has three exit codes:
* PXENV_STATUS_KEEP_UNDI : keep UNDI and boot next device
* PXENV_STATUS_KEEP_ALL : keep all and boot next device
* anything else : remove all and boot next device
*
* Strictly, we're meant to hand back to the BIOS, but this
* would prevent the useful combination of "PXE NBP fails, so
* let Etherboot try to boot its next device". We therefore
* take liberties.
*/
if ( nbp_exit != PXENV_STATUS_KEEP_UNDI &&
nbp_exit != PXENV_STATUS_KEEP_ALL ) {
/* Tear down PXE stack */
remove_pxe_stack();
}
/* Boot next device. Under strict PXE compliance, exit back
* to the BIOS, otherwise let Etherboot move to the next
* device.
*/
#ifdef PXE_STRICT
longjmp ( restart_etherboot, 255 );
#else
longjmp ( restart_etherboot, 4 );
#endif
/* Never reached; avoid compiler warning */
return ( 0 );
}

View File

@@ -0,0 +1,148 @@
/* Real-mode interface: C portions.
*
* Initial version by Michael Brown <mbrown@fensystems.co.uk>, January 2004.
*/
#include "etherboot.h"
#include "realmode.h"
#include "segoff.h"
#define RM_STACK_SIZE ( 0x1000 )
/* gcc won't let us use extended asm outside a function (compiler
* bug), ao we have to put these asm statements inside a dummy
* function.
*/
static void work_around_gcc_bug ( void ) __attribute__ ((used));
static void work_around_gcc_bug ( void ) {
/* Export _real_mode_stack_size as absolute linker symbol */
__asm__ ( ".globl _real_mode_stack_size" );
__asm__ ( ".equ _real_mode_stack_size, %c0" : : "i" (RM_STACK_SIZE) );
}
/* While Etherboot remains in base memory the real-mode stack is
* placed in the Etherboot main stack. The first allocation or
* deallocation of base memory will cause a 'proper' real-mode stack
* to be allocated. This will happen before Etherboot is relocated to
* high memory.
*/
uint32_t real_mode_stack = 0;
size_t real_mode_stack_size = RM_STACK_SIZE;
int lock_real_mode_stack = 0; /* Set to make stack immobile */
/* Make a call to a real-mode code block.
*/
/* These is the structure that exists on the stack as the paramters
* passed in to _real_call. We pass a pointer to this struct to
* prepare_real_call(), to save stack space.
*/
typedef struct {
void *fragment;
int fragment_len;
void *in_stack;
int in_stack_len;
void *out_stack;
int out_stack_len;
} real_call_params_t;
uint32_t prepare_real_call ( real_call_params_t *p,
int local_stack_len, char *local_stack ) {
char *stack_base;
char *stack_end;
char *stack;
char *s;
prot_to_real_params_t *p2r_params;
real_to_prot_params_t *r2p_params;
/* Work out where we're putting the stack */
if ( virt_to_phys(local_stack) < 0xa0000 ) {
/* Current stack is in base memory. We can therefore
* use it directly, with a constraint on the size that
* we don't know; assume that we can use up to
* real_mode_stack_size. (Not a valid assumption, but
* it will do).
*/
stack_end = local_stack + local_stack_len;
stack_base = stack_end - real_mode_stack_size;
} else {
if (!real_mode_stack) {
allot_real_mode_stack();
}
/* Use the allocated real-mode stack in base memory.
* This has fixed start and end points.
*/
stack_base = phys_to_virt(real_mode_stack);
stack_end = stack_base + real_mode_stack_size;
}
s = stack = stack_end - local_stack_len;
/* Compile input stack and trampoline code to stack */
if ( p->in_stack_len ) {
memcpy ( s, p->in_stack, p->in_stack_len );
s += p->in_stack_len;
}
memcpy ( s, _prot_to_real_prefix, prot_to_real_prefix_size );
s += prot_to_real_prefix_size;
p2r_params = (prot_to_real_params_t*) ( s - sizeof(*p2r_params) );
memcpy ( s, p->fragment, p->fragment_len );
s += p->fragment_len;
memcpy ( s, _real_to_prot_suffix, real_to_prot_suffix_size );
s += real_to_prot_suffix_size;
r2p_params = (real_to_prot_params_t*) ( s - sizeof(*r2p_params) );
/* Set parameters within compiled stack */
p2r_params->ss = p2r_params->cs = SEGMENT ( stack_base );
p2r_params->esp = virt_to_phys ( stack );
p2r_params->r2p_params = virt_to_phys ( r2p_params );
r2p_params->out_stack = ( p->out_stack == NULL ) ?
0 : virt_to_phys ( p->out_stack );
r2p_params->out_stack_len = p->out_stack_len;
return virt_to_phys ( stack + p->in_stack_len );
}
/* Parameters are not genuinely unused; they are passed to
* prepare_real_call() as part of a real_call_params_t struct.
*/
uint16_t _real_call ( void *fragment, int fragment_len,
void *in_stack __unused, int in_stack_len,
void *out_stack __unused, int out_stack_len __unused ) {
uint16_t retval;
/* This code is basically equivalent to
*
* uint32_t trampoline;
* char local_stack[ in_stack_len + prot_to_real_prefix_size +
* fragment_len + real_to_prot_suffix_size ];
* trampoline = prepare_real_call ( &fragment, local_stack );
* __asm__ ( "call _virt_to_phys\n\t"
* "call %%eax\n\t"
* "call _phys_to_virt\n\t"
* : "=a" (retval) : "0" (trampoline) );
*
* but implemented in assembly to avoid problems with not
* being certain exactly how gcc handles %esp.
*/
__asm__ ( "pushl %%ebp\n\t"
"movl %%esp, %%ebp\n\t" /* %esp preserved via %ebp */
"subl %%ecx, %%esp\n\t" /* space for inline RM stack */
"pushl %%esp\n\t" /* set up RM stack */
"pushl %%ecx\n\t"
"pushl %%eax\n\t"
"call prepare_real_call\n\t" /* %eax = trampoline addr */
"addl $12, %%esp\n\t"
"call _virt_to_phys\n\t" /* switch to phys addr */
"call *%%eax\n\t" /* call to trampoline */
"call _phys_to_virt\n\t" /* switch to virt addr */
"movl %%ebp, %%esp\n\t" /* restore %esp & %ebp */
"popl %%ebp\n\t"
: "=a" ( retval )
: "0" ( &fragment )
, "c" ( ( ( in_stack_len + prot_to_real_prefix_size +
fragment_len + real_to_prot_suffix_size )
+ 0x3 ) & ~0x3 ) );
return retval;
}

View File

@@ -0,0 +1,695 @@
/* Real-mode interface: assembly-language portions.
*
* Initial version by Michael Brown <mbrown@fensystems.co.uk>, January 2004.
*/
#include "realmode.h"
#include "callbacks.h"
#if 1 /* CODE16 */
#define BOCHSBP xchgw %bx,%bx
#define NUM_PUSHA_REGS (8)
#define NUM_SEG_REGS (6)
.text
.arch i386
.section ".text16.nocompress", "ax", @progbits
.code16
.equ CR0_PE,1
#ifdef GAS291
#define DATA32 data32;
#define ADDR32 addr32;
#define LJMPI(x) ljmp x
#else
#define DATA32 data32
#define ADDR32 addr32
/* newer GAS295 require #define LJMPI(x) ljmp *x */
#define LJMPI(x) ljmp x
#endif
/****************************************************************************
* REAL-MODE CALLBACK INTERFACE
*
* This must be copied down to base memory in order for external
* programs to be able to make calls in to Etherboot. Store the
* current physical address of Etherboot (i.e. virt_to_phys(_text)) in
* (uint32_t)rm_etherboot_location, then copy
* (uint16_t)rm_callback_interface_size bytes starting at
* &((void)rm_callback_interface).
*
* There are two defined entry points:
* Offset RM_IN_CALL = 0 Near call entry point
* Offset RM_IN_CALL_FAR = 2 Far call entry point
*
* Note that the routines _prot_to_real and _real_to_prot double as
* trampoline fragments for external calls (calls from Etherboot to
* real-mode code). _prot_to_real does not automatically re-enable
* interrupts; this is to allow for the potential of using Etherboot
* code as an ISR. _real_to_prot does automatically disable
* interrupts, since we don't have a protected-mode IDT.
****************************************************************************
*/
.globl rm_callback_interface
.code16
rm_callback_interface:
.globl _rm_in_call
_rm_in_call:
jmp _real_in_call
.globl _rm_in_call_far
_rm_in_call_far:
jmp _real_in_call_far
/****************************************************************************
* _real_in_call
*
* Parameters:
* 16-bit real-mode near/far return address (implicit from [l]call
* to routine) Other parameters as for _in_call_far().
*
* This routine will convert the 16-bit real-mode far return address
* to a 32-bit real-mode far return address, switch to protected mode
* using _real_to_prot and call in to _in_call_far.
****************************************************************************
*/
#define RIC_PRESERVE ( 8 )
#define RIC_OFFSET_CALLADDR ( RIC_PRESERVE )
#define RIC_OFFSET_CALLADDR_E ( RIC_OFFSET_CALLADDR + 4 )
#define RIC_OFFSET_CONTADDR ( RIC_OFFSET_CALLADDR_E )
#define RIC_OFFSET_CONTADDR_E ( RIC_OFFSET_CONTADDR + 4 )
#define RIC_OFFSET_OPCODE ( RIC_OFFSET_CONTADDR_E )
#define RIC_OFFSET_OPCODE_E ( RIC_OFFSET_OPCODE + 4 )
#define RIC_OFFSET_SEG_REGS ( RIC_OFFSET_OPCODE_E )
#define RIC_OFFSET_SEG_REGS_E ( RIC_OFFSET_SEG_REGS + ( NUM_SEG_REGS * 2 ) )
#define RIC_OFFSET_PAD ( RIC_OFFSET_SEG_REGS_E )
#define RIC_OFFSET_PAD_E ( RIC_OFFSET_PAD + 2 )
#define RIC_OFFSET_FLAGS ( RIC_OFFSET_PAD_E )
#define RIC_OFFSET_FLAGS_E ( RIC_OFFSET_FLAGS + 2 )
#define RIC_OFFSET_RETADDR ( RIC_OFFSET_FLAGS_E )
#define RIC_OFFSET_RETADDR_E ( RIC_OFFSET_RETADDR + 4 )
#define RIC_OFFSET_ORIG_OPCODE ( RIC_OFFSET_RETADDR_E )
#define RIC_INSERT_LENGTH ( RIC_OFFSET_OPCODE_E - RIC_OFFSET_CALLADDR )
.code16
_real_in_call:
/* Expand near return address to far return address
*/
pushw %ax /* Extend stack, store %ax */
pushfw
pushw %bp
movw %sp, %bp
movw %cs, %ax
xchgw %ax, 6(%bp)
xchgw %ax, 4(%bp) /* also restores %ax */
popw %bp
popfw
/* Fall through to _real_in_call_far */
_real_in_call_far:
/* Store flags and pad */
pushfw
pushw %ax
/* Store segment registers. Order matches that of seg_regs_t */
pushw %gs
pushw %fs
pushw %es
pushw %ds
pushw %ss
pushw %cs
/* Switch to protected mode */
call _real_to_prot
.code32
/* Allow space for expanded stack */
subl $RIC_INSERT_LENGTH, %esp
/* Store temporary registers */
pushl %ebp
pushl %eax
/* Copy opcode, set EB_CALL_FROM_REAL_MODE and EP_SKIP_OPCODE.
* Copy it because _in_call() and i386_in_call() expect it at
* a fixed position, not as part of the va_list.
*/
movl RIC_OFFSET_ORIG_OPCODE(%esp), %eax
orl $(EB_CALL_FROM_REAL_MODE|EB_SKIP_OPCODE), %eax
movl %eax, RIC_OFFSET_OPCODE(%esp)
/* Set up call and return addresses */
call 1f
1: popl %ebp
subl $1b, %ebp /* %ebp = offset */
movl rm_etherboot_location(%ebp), %eax /* Etherboot phys addr */
subl $_text, %eax
addl $_in_call, %eax /* _in_call phys addr */
movl %eax, RIC_OFFSET_CALLADDR(%esp)
leal 2f(%ebp), %eax /* continuation address */
movl %eax, RIC_OFFSET_CONTADDR(%esp)
/* Restore temporary registers */
popl %eax
popl %ebp
/* Call to _in_call */
ret
/* opcode will be popped automatically thanks to EB_SKIP_OPCODE */
2: /* Continuation point */
call _prot_to_real /* Return to real mode */
/* Note: the first two words of our segment register store
* happens to be exactly what we need to pass as parameters to
* _prot_to_real.
*/
.code16
popw %ds /* Restore segment registers */
popw %ds /* (skip cs&ss since these */
popw %ds /* have already been set by */
popw %es /* _prot_to_real */
popw %fs
popw %gs
addw $2, %sp /* skip pad */
/* Check for EB_SKIP_OPCODE */
pushw %bp
movw %sp, %bp
testl $EB_SKIP_OPCODE, 6(%bp)
popw %bp
jnz 1f
/* Normal return */
popfw /* Restore interrupt status */
lret /* Back to caller */
1: /* Return and skip opcode */
popfw
lret $4
/****************************************************************************
* rm_etherboot_location: the current physical location of Etherboot.
* Needed so that real-mode callback routines can locate Etherboot.
****************************************************************************
*/
.globl rm_etherboot_location
rm_etherboot_location: .long 0
/****************************************************************************
* _prot_to_real_prefix
*
* Trampoline fragment. Switch from 32-bit protected mode with flat
* physical addresses to 16-bit real mode. Store registers in the
* trampoline for restoration by _real_to_prot_suffix. Switch to
* stack in base memory.
****************************************************************************
*/
.globl _prot_to_real_prefix
.code32
_prot_to_real_prefix:
/* Registers to preserve */
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
/* Calculate offset */
call 1f
1: popl %ebp
subl $1b, %ebp /* %ebp = offset for labels in p2r*/
/* Preserve registers and return address in r2p_params */
movl p2r_r2p_params(%ebp), %ebx
subl $r2p_params, %ebx /* %ebx = offset for labels in r2p */
popl r2p_ebp(%ebx)
popl r2p_edi(%ebx)
popl r2p_esi(%ebx)
popl r2p_ebx(%ebx)
popl r2p_ret_addr(%ebx)
movl %esp, r2p_esp(%ebx)
/* Switch stacks */
movl p2r_esp(%ebp), %esp
/* Switch to real mode */
pushl p2r_segments(%ebp)
call _prot_to_real
.code16
addw $4, %sp
/* Fall through to next trampoline fragment */
jmp _prot_to_real_prefix_end
/****************************************************************************
* _prot_to_real
*
* Switch from 32-bit protected mode with flat physical addresses to
* 16-bit real mode. Stack and code must be in base memory when
* called. %cs, %ss, %eip, %esp are changed to real-mode values,
* other segment registers are destroyed, all other registers are
* preserved. Interrupts are *not* enabled.
*
* Parameters:
* %cs Real-mode code segment (word)
* %ss Real-mode stack segment (word)
****************************************************************************
*/
#define P2R_PRESERVE ( 12 )
#define P2R_OFFSET_RETADDR ( P2R_PRESERVE )
#define P2R_OFFSET_RETADDR_E ( P2R_OFFSET_RETADDR + 4 )
#define P2R_OFFSET_CS ( P2R_OFFSET_RETADDR_E )
#define P2R_OFFSET_CS_E ( P2R_OFFSET_CS + 2 )
#define P2R_OFFSET_SS ( P2R_OFFSET_CS_E )
#define P2R_OFFSET_SS_E ( P2R_OFFSET_SS + 2 )
.globl _prot_to_real
.code32
_prot_to_real:
/* Preserve registers */
pushl %ebp
pushl %ebx
pushl %eax
/* Calculate offset */
call 1f
1: popl %ebp
subl $1b, %ebp /* %ebp = offset for labels in p2r*/
/* Set up GDT with real-mode limits and appropriate bases for
* real-mode %cs and %ss. Set up protected-mode continuation
* point on stack.
*/
/* Fixup GDT */
leal p2r_gdt(%ebp), %eax
movl %eax, p2r_gdt_addr(%ebp)
/* Calculate CS base address: set GDT code segment, adjust
* return address, set up continuation address on stack.
*/
movzwl P2R_OFFSET_CS(%esp), %eax
shll $4, %eax
/* Change return address to real-mode far address */
subl %eax, P2R_OFFSET_RETADDR(%esp)
movl %eax, %ebx
shrl $4, %ebx
movw %bx, (P2R_OFFSET_RETADDR+2)(%esp)
/* First real mode address */
movl %eax, %ebx
shrl $4, %ebx
pushw %bx
leal 3f(%ebp), %ebx
subl %eax, %ebx
pushw %bx
/* Continuation address */
pushl $(p2r_rmcs - p2r_gdt)
leal 2f(%ebp), %ebx
subl %eax, %ebx
pushl %ebx
/* Code segment in GDT */
movw %ax, (p2r_rmcs+2)(%ebp)
shrl $16, %eax /* Remainder of cs base addr */
movb %al, (p2r_rmcs+4)(%ebp)
movb %ah, (p2r_rmcs+7)(%ebp)
/* Calculate SS base address: set GDT data segment, retain to
* use for adjusting %esp.
*/
movzwl (12+P2R_OFFSET_SS)(%esp), %eax /* Set ss base address */
shll $4, %eax
movw %ax, (p2r_rmds+2)(%ebp)
movl %eax, %ebx
shrl $16, %ebx
movb %bl, (p2r_rmds+4)(%ebp)
movb %bh, (p2r_rmds+7)(%ebp)
/* Load GDT */
lgdt p2r_gdt(%ebp)
/* Reload all segment registers and adjust %esp */
movw $(p2r_rmds - p2r_gdt), %bx /* Pmode DS */
movw %bx, %ss
subl %eax, %esp /* %esp now less than 0x10000 */
movw %bx, %ds
movw %bx, %es
movw %bx, %fs
movw %bx, %gs
lret /* %cs:eip */
2: /* Segment registers now have 16-bit limits. */
.code16
/* Switch to real mode */
movl %cr0, %ebx
andb $0!CR0_PE, %bl
movl %ebx, %cr0
/* Make intersegment jmp to flush the processor pipeline
* and reload %cs:%eip (to clear upper 16 bits of %eip).
*/
lret
3:
/* Load real-mode segment value to %ss. %sp already OK */
shrl $4, %eax
movw %ax, %ss
/* Restore registers */
popl %eax
popl %ebx
popl %ebp
/* Return to caller in real-mode */
lret
#ifdef FLATTEN_REAL_MODE
#define RM_LIMIT_16_19__AVL__SIZE__GRANULARITY 0x8f
#else
#define RM_LIMIT_16_19__AVL__SIZE__GRANULARITY 0x00
#endif
p2r_gdt:
p2r_gdtarg:
p2r_gdt_limit: .word p2r_gdt_end - p2r_gdt - 1
p2r_gdt_addr: .long 0
p2r_gdt_padding: .word 0
p2r_rmcs:
/* 16 bit real mode code segment */
.word 0xffff,(0&0xffff)
.byte (0>>16),0x9b,RM_LIMIT_16_19__AVL__SIZE__GRANULARITY,(0>>24)
p2r_rmds:
/* 16 bit real mode data segment */
.word 0xffff,(0&0xffff)
.byte (0>>16),0x93,RM_LIMIT_16_19__AVL__SIZE__GRANULARITY,(0>>24)
p2r_gdt_end:
/* This is the end of the trampoline prefix code. When used
* as a prefix, fall through to the following code in the
* trampoline.
*/
p2r_params: /* Structure must match prot_to_real_params_t in realmode.h */
p2r_esp: .long 0
p2r_segments:
p2r_cs: .word 0
p2r_ss: .word 0
p2r_r2p_params: .long 0
.globl _prot_to_real_prefix_end
_prot_to_real_prefix_end:
.globl _prot_to_real_prefix_size
.equ _prot_to_real_prefix_size, _prot_to_real_prefix_end - _prot_to_real_prefix
.globl prot_to_real_prefix_size
prot_to_real_prefix_size:
.word _prot_to_real_prefix_size
/****************************************************************************
* _real_to_prot_suffix
*
* Trampoline fragment. Switch from 16-bit real-mode to 32-bit
* protected mode with flat physical addresses. Copy returned stack
* parameters to output_stack. Restore registers preserved by
* _prot_to_real_prefix. Restore stack to previous location.
****************************************************************************
*/
.globl _real_to_prot_suffix
.code16
_real_to_prot_suffix:
/* Switch to protected mode */
call _real_to_prot
.code32
/* Calculate offset */
call 1f
1: popl %ebp
subl $1b, %ebp /* %ebp = offset for labels in r2p */
/* Copy stack to out_stack */
movl r2p_out_stack(%ebp), %edi
movl r2p_out_stack_len(%ebp), %ecx
movl %esp, %esi
cld
rep movsb
/* Switch back to original stack */
movl r2p_esp(%ebp), %esp
/* Restore registers and return */
pushl r2p_ret_addr(%ebp) /* Set up return address on stack */
movl r2p_ebx(%ebp), %ebx
movl r2p_esi(%ebp), %esi
movl r2p_edi(%ebp), %edi
movl r2p_ebp(%ebp), %ebp
ret
/****************************************************************************
* _real_to_prot
*
* Switch from 16-bit real-mode to 32-bit protected mode with flat
* physical addresses. All segment registers are destroyed, %eip and
* %esp are changed to flat physical values, all other registers are
* preserved. Interrupts are disabled.
*
* Parameters: none
****************************************************************************
*/
#define R2P_PRESERVE ( 12 )
#define R2P_OFFSET_RETADDR ( R2P_PRESERVE )
#define R2P_OFFSET_ORIG_RETADDR ( R2P_OFFSET_RETADDR + 2 )
.globl _real_to_prot
.code16
_real_to_prot:
/* Disable interrupts */
cli
/* zero extend the return address */
pushw $0
/* Preserve registers */
pushl %ebp
pushl %ebx
pushl %eax
/* Convert 16-bit real-mode near return address to
* 32-bit pmode physical near return address
*/
movw %sp, %bp
xorl %ebx, %ebx
push %cs
popw %bx
movw %bx, %ds
shll $4, %ebx
movzwl %ss:R2P_OFFSET_ORIG_RETADDR(%bp), %eax
addl %ebx, %eax
movl %eax, %ss:(R2P_OFFSET_RETADDR)(%bp)
/* Store the code segment physical base address in %ebp */
movl %ebx, %ebp
/* Find the offset within the code segment that I am running at */
xorl %ebx, %ebx
call 1f
1: popw %bx
/* Set up GDT */
leal (r2p_gdt-1b)(%bx), %eax /* %ds:ebx = %ds:bx = &(r2p_gdt) */
addl %ebp, %eax /* %eax = &r2p_gdt (physical) */
movl %eax, %ds:(r2p_gdt-1b+2)(%bx) /* Set phys. addr. in r2p_gdt */
/* Compute the first protected mode physical address */
leal (2f-1b)(%bx), %eax
addl %ebp, %eax
movl %eax, %ds:(r2p_paddr-1b)(%bx)
/* Calculate new %esp */
xorl %eax, %eax
push %ss
popw %ax
shll $4, %eax
movzwl %sp, %ebp
addl %eax, %ebp /* %ebp = new %esp */
/* Load GDT */
DATA32 lgdt %ds:(r2p_gdt-1b)(%bx) /* Load GDT */
/* Switch to protected mode */
movl %cr0, %eax
orb $CR0_PE, %al
movl %eax, %cr0
/* flush prefetch queue, and reload %cs:%eip */
DATA32 ljmp %ds:(r2p_paddr-1b)(%bx)
.code32
2:
/* Load segment registers, adjust %esp */
movw $(r2p_pmds-r2p_gdt), %ax
movw %ax, %ss
movl %ebp, %esp
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
/* Restore registers */
popl %eax
popl %ebx
popl %ebp
/* return to caller */
ret
r2p_gdt:
.word r2p_gdt_end - r2p_gdt - 1 /* limit */
.long 0 /* addr */
.word 0
r2p_pmcs:
/* 32 bit protected mode code segment, physical addresses */
.word 0xffff, 0
.byte 0, 0x9f, 0xcf, 0
r2p_pmds:
/* 32 bit protected mode data segment, physical addresses */
.word 0xffff,0
.byte 0,0x93,0xcf,0
r2p_gdt_end:
r2p_paddr:
.long 2b
.word r2p_pmcs - r2p_gdt, 0
/* This is the end of the trampoline suffix code.
*/
r2p_params: /* Structure must match real_to_prot_params_t in realmode.h */
r2p_ret_addr: .long 0
r2p_esp: .long 0
r2p_ebx: .long 0
r2p_esi: .long 0
r2p_edi: .long 0
r2p_ebp: .long 0
r2p_out_stack: .long 0
r2p_out_stack_len: .long 0
.globl _real_to_prot_suffix_end
_real_to_prot_suffix_end:
.globl _real_to_prot_suffix_size
.equ _real_to_prot_suffix_size, _real_to_prot_suffix_end - _real_to_prot_suffix
.globl real_to_prot_suffix_size
real_to_prot_suffix_size:
.word _real_to_prot_suffix_size
rm_callback_interface_end:
.globl _rm_callback_interface_size
.equ _rm_callback_interface_size, rm_callback_interface_end - rm_callback_interface
.globl rm_callback_interface_size
rm_callback_interface_size:
.word _rm_callback_interface_size
/****************************************************************************
* END OF REAL-MODE CALLBACK INTERFACE
****************************************************************************
*/
#ifdef PXE_EXPORT
/****************************************************************************
* PXE CALLBACK INTERFACE
*
* Prepend this to rm_callback_interface to create a real-mode PXE
* callback interface.
****************************************************************************
*/
.section ".text16", "ax", @progbits
.globl pxe_callback_interface
.code16
pxe_callback_interface:
/* Macro to calculate offset of labels within code segment in
* installed copy of code.
*/
#define INSTALLED(x) ( (x) - pxe_callback_interface )
/****************************************************************************
* PXE entry points (!PXE and PXENV+ APIs)
****************************************************************************
*/
/* in_call mechanism for !PXE API calls */
.globl _pxe_in_call_far
_pxe_in_call_far:
/* Prepend "PXE API call" and "API version 0x201" to stack */
pushl $0x201
jmp 1f
/* in_call mechanism for PXENV+ API calls */
.globl _pxenv_in_call_far
_pxenv_in_call_far:
/* Prepend "PXE API call" and "API version 0x200" to stack */
pushl $0x200
1: pushl $EB_OPCODE_PXE
/* Perform real-mode in_call */
call pxe_rm_in_call
/* Return */
addw $8, %sp
lret
/****************************************************************************
* PXE installation check (INT 1A) code
****************************************************************************
*/
.globl _pxe_intercept_int1a
_pxe_intercept_int1a:
pushfw
cmpw $0x5650, %ax
jne 2f
1: /* INT 1A,5650 - Intercept */
popfw
/* Set up return values according to PXE spec: */
movw $0x564e, %ax /* AX := 564Eh (VN) */
pushw %cs:INSTALLED(_pxe_pxenv_segment)
popw %es /* ES:BX := &(PXENV+ structure) */
movw %cs:INSTALLED(_pxe_pxenv_offset), %bx
clc /* CF is cleared */
lret $2 /* 'iret' without reloading flags */
2: /* INT 1A,other - Do not intercept */
popfw
ljmp %cs:*INSTALLED(_pxe_intercepted_int1a)
.globl _pxe_intercepted_int1a
_pxe_intercepted_int1a: .word 0,0
.globl _pxe_pxenv_location
_pxe_pxenv_location:
_pxe_pxenv_offset: .word 0
_pxe_pxenv_segment: .word 0
pxe_rm_in_call:
pxe_attach_rm:
/* rm_callback_interface must be appended here */
pxe_callback_interface_end:
.globl _pxe_callback_interface_size
.equ _pxe_callback_interface_size, pxe_callback_interface_end - pxe_callback_interface
.globl pxe_callback_interface_size
pxe_callback_interface_size:
.word _pxe_callback_interface_size
#else /* PXE_EXPORT */
/* Define symbols used by the linker scripts, to prevent link errors */
.globl _pxe_callback_interface_size
.equ _pxe_callback_interface_size, 0
#endif /* PXE_EXPORT */
#else /* CODE16 */
/* Define symbols used by the linker scripts, to prevent link errors */
.globl _rm_callback_interface_size
.equ _rm_callback_interface_size, 0
.globl _pxe_callback_interface_size
.equ _pxe_callback_interface_size, 0
#endif /* CODE16 */

View File

@@ -0,0 +1,285 @@
/*****************************************************************************
*
* THIS FILE IS NOW OBSOLETE.
*
* The functions of this file are now placed in init.S.
*
*****************************************************************************
*/
#ifndef PCBIOS
#error "16bit code is only supported with the PCBIOS"
#endif
#define CODE_SEG 0x08
#define DATA_SEG 0x10
#define EXEC_IN_SITU_MAGIC 0x45524548 /* 'HERE' */
.equ CR0_PE, 1
#ifdef GAS291
#define DATA32 data32;
#define ADDR32 addr32;
#define LJMPI(x) ljmp x
#else
#define DATA32 data32
#define ADDR32 addr32
/* newer GAS295 require #define LJMPI(x) ljmp *x */
#define LJMPI(x) ljmp x
#endif
/*****************************************************************************
*
* start16 : move payload to desired area of memory, set up for exit
* back to prefix, set up for 32-bit code.
*
* Enter (from prefix) with es:di = 0x4552:0x4548 if you want to
* prevent start16 from moving the payload. There are three
* motivations for moving the payload:
*
* 1. It may be in ROM, in which case we need to move it to RAM.
* 2. Whatever loaded us probably didn't know about our memory usage
* beyond the end of the image file. We should claim this memory
* before using it.
*
* Unless the prefix instructs us otherwise we will move the payload to:
*
* An area of memory claimed from the BIOS via 40:13.
*
* We use the main Etherboot stack (within the image target) as our
* stack; we don't rely on the prefix passing us a stack usable for
* anything other than the prefix's return address. The (first 512
* bytes of the) prefix code segment is copied to a safe archive
* location.
*
* When we return to the prefix (from start32), we copy this code back
* to a new area of memory, restore the prefix's ss:sp and ljmp back
* to the copy of the prefix. The prefix will see a return from
* start16 *but* may be executing at a new location. Code following
* the lcall to start16 must therefore be position-independent and
* must also be within [cs:0000,cs:01ff]. We make absolutely no
* guarantees about the stack contents when the prefix regains
* control.
*
* Trashes just about all registers, including all the segment
* registers.
*
*****************************************************************************
*/
.text
.code16
.arch i386
.org 0
.globl _start16
_start16:
/*****************************************************************************
* Work out where we are going to place our image (image = optional
* decompressor + runtime). Exit this stage with %ax containing the
* runtime target address divided by 16 (i.e. a real-mode segment
* address).
*****************************************************************************
*/
movw %es, %ax
cmpw $(EXEC_IN_SITU_MAGIC >> 16), %ax
jne exec_moved
cmpw $(EXEC_IN_SITU_MAGIC & 0xffff), %di
jne exec_moved
exec_in_situ:
/* Prefix has warned us not to move the payload. Simply
* calculate where the image is going to end up, so we can
* work out where to put our stack.
*/
movw %cs, %ax
addw $((payload-_start16)/16), %ax
jmp 99f
exec_moved:
/* Claim an area of base memory from the BIOS and put the
* payload there. arch_relocated_to() will deal with freeing
* up this memory once we've relocated to high memory.
*/
movw $0x40, %ax
movw %ax, %es
movw %es:(0x13), %ax /* FBMS in kb to %ax */
shlw $6, %ax /* ... in paragraphs */
subw $__image_size_pgh, %ax /* Subtract space for image */
shrw $6, %ax /* Round down to nearest kb */
movw %ax, %es:(0x13) /* ...and claim memory from BIOS */
shlw $6, %ax
99:
/* At this point %ax contains the segment address for the
* start of the image (image = optional decompressor + runtime).
*/
/*****************************************************************************
* Set up stack in start32's stack space within the place we're going
* to copy Etherboot to, reserve space for GDT, copy return address
* from prefix stack, store prefix stack address
*****************************************************************************
*/
popl %esi /* Return address */
mov %ss, %bx /* %es:di = prefix stack address */
mov %bx, %es /* (*after* pop of return address) */
movw %sp, %di
movw $__offset_stack_pgh, %bx /* Set up Etherboot stack */
addw %ax, %bx
movw %bx, %ss
movw $__stack_size, %sp
subw $(_gdt_end - _gdt), %sp /* Reserve space for GDT */
movw %sp, %bp /* Record GDT location */
/* Set up i386_rm_in_call_data_t structure on stack. This is
* the same structure as is set up by rm_in_call.
*/
pushl $0 /* Dummy opcode */
pushl %esi /* Prefix return address */
pushfw /* Flags */
pushw %di /* Prefix %sp */
pushw %gs /* Segment registers */
pushw %fs
pushw %es
pushw %ds
pushw %es /* Prefix %ss */
pushw %cs
/* Stack is now 32-bit aligned */
/* %ax still contains image target segment address */
/*****************************************************************************
* Calculate image target and prefix code physical addresses, store on stack
* for use in copy routine.
*****************************************************************************
*/
movzwl %es:-2(%di), %ebx /* Prefix code segment */
shll $4, %ebx
pushl %ebx /* Prefix code physical address */
movzwl %ax, %edi /* Image target segment */
shll $4, %edi
pushl %edi /* Image target physical address */
/*****************************************************************************
* Transition to 32-bit protected mode. Set up all segment
* descriptors to use flat physical addresses.
*****************************************************************************
*/
/* Copy gdt to area reserved on stack
*/
push %cs /* GDT source location -> %ds:%si */
pop %ds
mov $(_gdt - _start16), %si
push %ss /* GDT target location -> %es:%di */
pop %es
mov %bp, %di
mov $(_gdt_end - _gdt), %cx
cld
rep movsb /* Copy GDT to stack */
movl %ss, %eax
shll $4, %eax
movzwl %bp, %ebx
addl %eax, %ebx /* Physical addr of GDT copy -> %ebx */
movl %ebx, 2(%bp) /* Fill in addr field in GDT */
/* Compute the offset I am running at.
*/
movl %cs, %ebx
shll $4, %ebx /* %ebx = offset for start16 symbols */
/* Switch to 32bit protected mode.
*/
cli /* Disable interrupts */
lgdt (%bp) /* Load GDT from stack */
movl %cr0, %eax /* Set protected mode bit */
orb $CR0_PE, %al
movl %eax, %cr0
movl %ss, %eax /* Convert stack pointer to 32bit */
shll $4, %eax
movzwl %sp, %esp
addl %eax, %esp
movl $DATA_SEG, %eax /* Reload the segment registers */
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
movl %eax, %fs
movl %eax, %gs
/* Flush prefetch queue, and reload %cs:%eip by effectively ljmping
* to code32_start. Do the jump via pushl and lret because the text
* may not be writable/
*/
pushl $CODE_SEG
ADDR32 leal (code32_start-_start16)(%ebx), %eax
pushl %eax
DATA32 lret /* DATA32 needed, because we're still in 16-bit mode */
_gdt:
gdtarg:
.word _gdt_end - _gdt - 1 /* limit */
.long 0 /* addr */
.word 0
_pmcs:
/* 32 bit protected mode code segment */
.word 0xffff, 0
.byte 0, 0x9f, 0xcf, 0
_pmds:
/* 32 bit protected mode data segment */
.word 0xffff,0
.byte 0,0x93,0xcf,0
_gdt_end:
.code32
code32_start:
/*****************************************************************************
* Copy payload to target location. Do the copy backwards, since if
* there's overlap with a forward copy then it means start16 is going
* to get trashed during the copy anyway...
*****************************************************************************
*/
popl %edi /* Image target physical address */
pushl %edi
leal (payload-_start16)(%ebx), %esi /* Image source physical addr */
movl $__payload_size, %ecx /* Payload size (not image size) */
addl %ecx, %edi /* Start at last byte (length - 1) */
decl %edi
addl %ecx, %esi
decl %esi
std /* Backward copy of image */
rep movsb
cld
popl %edi /* Restore image target physical address */
leal __decompressor_uncompressed(%edi), %ebx
subl $_text, %ebx /* %ebx = offset for runtime symbols */
/*****************************************************************************
* Copy prefix to storage area within Etherboot image.
*****************************************************************************
*/
popl %esi /* Prefix source physical address */
pushl %edi
leal _prefix_copy(%ebx), %edi /* Prefix copy phys. addr. */
leal _eprefix_copy(%ebx), %ecx
subl %edi, %ecx /* Prefix copy size */
rep movsb /* Forward copy of prefix */
popl %edi /* Restore image target physical address */
/*****************************************************************************
* Record base memory used by Etherboot image
*****************************************************************************
*/
movl %edi, _prefix_image_basemem (%ebx)
/*****************************************************************************
* Jump to start of the image (i.e. the decompressor, or start32 if
* non-compressed).
*****************************************************************************
*/
pushl $0 /* Inform start32 that exit path is 16-bit */
jmpl *%edi /* Jump to image */
.balign 16
/* Etherboot needs to be 16byte aligned or data that
* is virtually aligned is no longer physically aligned
* which is just nasty in general. 16byte alignment
* should be sufficient though.
*/
payload:

View File

@@ -0,0 +1,8 @@
/* When linking with an uncompressed image, these symbols are not
* defined so we provide them here.
*/
__decompressor_uncompressed = 0 ;
__decompressor__start = 0 ;
INCLUDE arch/i386/core/start16z.lds

View File

@@ -0,0 +1,65 @@
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
/* Linker-generated symbols are prefixed with a double underscore.
* Decompressor symbols are prefixed with __decompressor_. All other
* symbols are the same as in the original object file, i.e. the
* runtime addresses.
*/
ENTRY(_start16)
SECTIONS {
.text : {
*(.text)
}
.payload : {
__payload_start = .;
*(.data)
__payload_end = .;
}
/* _payload_size is the size of the binary image appended to
* start16, in bytes.
*/
__payload_size = __payload_end - __payload_start ;
/* _size is the size of the runtime image
* (start32 + the C code), in bytes.
*/
__size = _end - _start ;
/* _decompressor_size is the size of the decompressor, in
* bytes. For a non-compressed image, start16.lds sets
* _decompressor_uncompressed = _decompressor__start = 0.
*/
__decompressor_size = __decompressor_uncompressed - __decompressor__start ;
/* image__size is the total size of the image, after
* decompression and including the decompressor if applicable.
* It is therefore the amount of memory that start16's payload
* needs in order to execute, in bytes.
*/
__image_size = __size + __decompressor_size ;
/* Amount to add to runtime symbols to obtain the offset of
* that symbol within the image.
*/
__offset_adjust = __decompressor_size - _start ;
/* Calculations for the stack
*/
__stack_size = _estack - _stack ;
__offset_stack = _stack + __offset_adjust ;
/* Some symbols will be larger than 16 bits but guaranteed to
* be multiples of 16. We calculate them in paragraphs and
* export these symbols which can be used in 16-bit code
* without risk of overflow.
*/
__image_size_pgh = ( __image_size / 16 );
__start_pgh = ( _start / 16 );
__decompressor_size_pgh = ( __decompressor_size / 16 );
__offset_stack_pgh = ( __offset_stack / 16 );
}

View File

@@ -0,0 +1,767 @@
/* #defines because ljmp wants a number, probably gas bug */
/* .equ KERN_CODE_SEG,_pmcs-_gdt */
#define KERN_CODE_SEG 0x08
.equ KERN_DATA_SEG,_pmds-_gdt
/* .equ REAL_CODE_SEG,_rmcs-_gdt */
#define REAL_CODE_SEG 0x18
.equ REAL_DATA_SEG,_rmds-_gdt
.equ FLAT_CODE_SEG,_pmcs2-_gdt
.equ FLAT_DATA_SEG,_pmds2-_gdt
.equ CR0_PE,1
#ifdef CONFIG_X86_64
.equ LM_CODE_SEG, _lmcs-_gdt
.equ LM_DATA_SEG, _lmds-_gdt
#endif
.equ MSR_K6_EFER, 0xC0000080
.equ EFER_LME, 0x00000100
.equ X86_CR4_PAE, 0x00000020
.equ CR0_PG, 0x80000000
#ifdef GAS291
#define DATA32 data32;
#define ADDR32 addr32;
#define LJMPI(x) ljmp x
#else
#define DATA32 data32
#define ADDR32 addr32
/* newer GAS295 require #define LJMPI(x) ljmp *x */
#define LJMPI(x) ljmp x
#endif
#define BOCHSBP xchgw %bx, %bx
#include "callbacks.h"
#define NUM_PUSHA_REGS (8)
#define NUM_SEG_REGS (6)
/*
* NOTE: if you write a subroutine that is called from C code (gcc/egcs),
* then you only have to take care of %ebx, %esi, %edi and %ebp. These
* registers must not be altered under any circumstance. All other registers
* may be clobbered without any negative side effects. If you don't follow
* this rule then you'll run into strange effects that only occur on some
* gcc versions (because the register allocator may use different registers).
*
* All the data32 prefixes for the ljmp instructions are necessary, because
* the assembler emits code with a relocation address of 0. This means that
* all destinations are initially negative, which the assembler doesn't grok,
* because for some reason negative numbers don't fit into 16 bits. The addr32
* prefixes are there for the same reasons, because otherwise the memory
* references are only 16 bit wide. Theoretically they are all superfluous.
* One last note about prefixes: the data32 prefixes on all call _real_to_prot
* instructions could be removed if the _real_to_prot function is changed to
* deal correctly with 16 bit return addresses. I tried it, but failed.
*/
/**************************************************************************
* START
*
* This file is no longer enterered from the top. init.S will jump to
* either _in_call or _rm_in_call, depending on the processor mode
* when init.S was entered.
**************************************************************************/
.text
.arch i386
.code32
/**************************************************************************
_IN_CALL - make a call in to Etherboot.
**************************************************************************/
/* There are two 32-bit entry points: _in_call and _in_call_far, for
* near calls and far calls respectively. Both should be called with
* flat physical addresses. They will result in a call to the C
* routine in_call(); see there for API details.
*
* Note that this routine makes fairly heavy use of the stack and no
* use of fixed data areas. This is because it must be re-entrant;
* there may be more than one concurrent call in to Etherboot.
*/
#define IC_OFFSET_VA_LIST_PTR ( 0 )
#define IC_OFFSET_VA_LIST_PTR_E ( IC_OFFSET_VA_LIST_PTR + 4 )
#define IC_OFFSET_REGISTERS ( IC_OFFSET_VA_LIST_PTR_E )
#define IC_OFFSET_REGISTERS_E ( IC_OFFSET_REGISTERS + ( NUM_PUSHA_REGS * 4 ) )
#define IC_OFFSET_SEG_REGS ( IC_OFFSET_REGISTERS_E )
#define IC_OFFSET_SEG_REGS_E ( IC_OFFSET_SEG_REGS + ( NUM_SEG_REGS * 2 ) )
#define IC_OFFSET_GDT ( IC_OFFSET_SEG_REGS_E )
#define IC_OFFSET_GDT_E ( IC_OFFSET_GDT + 8 )
#define IC_OFFSET_FLAGS ( IC_OFFSET_GDT_E )
#define IC_OFFSET_FLAGS_E ( IC_OFFSET_FLAGS + 4 )
#define IC_OFFSET_RETADDR ( IC_OFFSET_FLAGS_E )
#define IC_OFFSET_RETADDR_E ( IC_OFFSET_RETADDR + 8 )
#define IC_OFFSET_ORIG_STACK ( IC_OFFSET_RETADDR )
#define IC_OFFSET_OPCODE ( IC_OFFSET_ORIG_STACK + 8 )
#define IC_OFFSET_OPCODE_E ( IC_OFFSET_OPCODE + 4 )
#define IC_OFFSET_VA_LIST ( IC_OFFSET_OPCODE_E )
.code32
.globl _in_call
.globl _in_call_far
_in_call:
/* Expand to far return address */
pushl %eax /* Store %eax */
xorl %eax, %eax
movw %cs, %ax
xchgl %eax, 4(%esp) /* 4(%esp) = %cs, %eax = ret addr */
xchgl %eax, 0(%esp) /* 0(%esp) = ret addr, restore %eax */
_in_call_far:
/* Store flags */
pushfl
/* Store the GDT */
subl $8, %esp
sgdt 0(%esp)
/* Store segment register values */
pushw %gs
pushw %fs
pushw %es
pushw %ds
pushw %ss
pushw %cs
/* Store general-purpose register values */
pushal
/* Replace %esp in store with physical %esp value on entry */
leal (IC_OFFSET_ORIG_STACK - IC_OFFSET_REGISTERS)(%esp), %eax
movl %eax, (IC_OFFSET_REGISTERS - IC_OFFSET_REGISTERS + 12)(%esp)
/* Store va_list pointer (physical address) */
leal (IC_OFFSET_VA_LIST - IC_OFFSET_VA_LIST_PTR_E)(%esp), %eax
pushl %eax
/* IC_OFFSET_*(%esp) are now valid */
/* Switch to virtual addresses */
call _phys_to_virt
/* Fixup the va_list pointer */
movl virt_offset, %ebp
subl %ebp, IC_OFFSET_VA_LIST_PTR(%esp)
/* Check opcode for EB_USE_INTERNAL_STACK flag */
movl IC_OFFSET_OPCODE(%esp), %eax
testl $EB_USE_INTERNAL_STACK, %eax
je 2f
/* Use internal stack flag set */
/* Check %esp is not already in internal stack range */
leal _stack, %esi /* %esi = bottom of internal stack */
leal _estack, %edi /* %edi = top of internal stack */
cmpl %esi, %esp
jb 1f
cmpl %edi, %esp
jbe 2f
1: /* %esp not currently in internal stack range */
movl %esp, %esi /* %esi = original stack */
movl $IC_OFFSET_OPCODE_E, %ecx /* %ecx = length to transfer */
subl %ecx, %edi /* %edi = internal stack pos */
movl %edi, %esp /* = new %esp */
rep movsb /* Copy data to internal stack */
2:
/* Call to C code */
call i386_in_call
/* Set %eax (return code from C) in registers structure on
* stack, so that we return it to the caller.
*/
movl %eax, (IC_OFFSET_REGISTERS + 28)(%esp)
/* Calculate physical continuation address */
movl virt_offset, %ebp
movzwl (IC_OFFSET_SEG_REGS + 0)(%esp), %eax /* %cs */
movzwl (IC_OFFSET_SEG_REGS + 2)(%esp), %ebx /* %ss */
pushl %eax /* Continuation segment */
leal 1f(%ebp), %eax
pushl %eax /* Continuation offset */
/* Restore caller's GDT */
cli /* Temporarily disable interrupts */
lgdt (8+IC_OFFSET_GDT)(%esp)
/* Reset %ss and adjust %esp */
movw %bx, %ss
addl %ebp, %esp
lret /* Reload %cs:eip, flush prefetch */
1:
/* Skip va_list ptr */
popl %eax
/* Reload general-purpose registers to be returned */
popal
/* Reload segment registers as passed in from caller */
popw %gs
popw %fs
popw %es
popw %ds
addl $(4+8), %esp /* Skip %cs, %ss and GDT (already reloaded) */
/* Restore flags (including revert of interrupt status) */
popfl
/* Restore physical %esp from entry. It will only be
* different if EB_USE_INTERNAL_STACK was specified.
*/
movl ( 12 + IC_OFFSET_REGISTERS - IC_OFFSET_RETADDR )(%esp), %esp
/* Check for EB_SKIP_OPCODE */
pushfl
testl $EB_SKIP_OPCODE, 12(%esp)
jnz 1f
/* Normal return */
popfl
lret
1: /* Return and skip opcode */
popfl
lret $4
/**************************************************************************
RELOCATE_TO - relocate etherboot to the specified address
**************************************************************************/
.globl relocate_to
relocate_to:
/* Save the callee save registers */
pushl %ebp
pushl %esi
pushl %edi
/* Compute the virtual destination address */
movl 16(%esp), %edi # dest
subl virt_offset, %edi
/* Compute the new value of virt_offset */
movl 16(%esp), %ebp # virt_offset
subl $_text, %ebp
/* Fixup the gdt */
pushl $_pmcs
pushl %ebp # virt_offset
call set_seg_base
addl $8, %esp
/* Fixup gdtarg */
leal _gdt(%ebp), %eax
movl %eax, gdtarg +2
/* Fixup virt_offset */
movl %ebp, virt_offset
/* Load the move parameters */
movl $_text, %esi
movl $_end, %ecx
subl %esi, %ecx
/* Move etherboot uses %esi, %edi, %ecx */
rep
movsb
/* Reload the gdt */
cs
lgdt gdtarg
/* Reload %cs */
ljmp $KERN_CODE_SEG, $1f
1:
/* reload other segment registers */
movl $KERN_DATA_SEG, %eax
movl %eax,%ds
movl %eax,%es
movl %eax,%ss
movl %eax,%fs
movl %eax,%gs
/* Restore the callee save registers */
popl %edi
popl %esi
popl %ebp
/* return */
ret
/**************************************************************************
XSTART32 - Transfer control to the kernel just loaded
**************************************************************************/
.globl xstart32
xstart32:
/* Save the callee save registers */
movl %ebp, os_regs + 32
movl %esi, os_regs + 36
movl %edi, os_regs + 40
movl %ebx, os_regs + 44
/* save the return address */
popl %eax
movl %eax, os_regs + 48
/* save the stack pointer */
movl %esp, os_regs + 52
/* Get the new destination address */
popl %ecx
/* Store the physical address of xend on the stack */
movl $xend32, %ebx
addl virt_offset, %ebx
pushl %ebx
/* Store the destination address on the stack */
pushl $FLAT_CODE_SEG
pushl %ecx
/* Cache virt_offset */
movl virt_offset, %ebp
/* Switch to using physical addresses */
call _virt_to_phys
/* Save the target stack pointer */
movl %esp, os_regs + 12(%ebp)
leal os_regs(%ebp), %esp
/* Store the pointer to os_regs */
movl %esp, os_regs_ptr(%ebp)
/* Load my new registers */
popal
movl (-32 + 12)(%esp), %esp
/* Jump to the new kernel
* The lret switches to a flat code segment
*/
lret
.balign 4
.globl xend32
xend32:
/* Fixup %eflags */
nop
cli
cld
/* Load %esp with &os_regs + virt_offset */
.byte 0xbc /* movl $0, %esp */
os_regs_ptr:
.long 0
/* Save the result registers */
addl $32, %esp
pushal
/* Compute virt_offset */
movl %esp, %ebp
subl $os_regs, %ebp
/* Load the stack pointer */
movl 52(%esp), %esp
/* Enable the virtual addresses */
leal _phys_to_virt(%ebp), %eax
call *%eax
/* Restore the callee save registers */
movl os_regs + 32, %ebp
movl os_regs + 36, %esi
movl os_regs + 40, %edi
movl os_regs + 44, %ebx
movl os_regs + 48, %edx
movl os_regs + 52, %esp
/* Get the C return value */
movl os_regs + 28, %eax
jmpl *%edx
#ifdef CONFIG_X86_64
.arch sledgehammer
/**************************************************************************
XSTART_lm - Transfer control to the kernel just loaded in long mode
**************************************************************************/
.globl xstart_lm
xstart_lm:
/* Save the callee save registers */
pushl %ebp
pushl %esi
pushl %edi
pushl %ebx
/* Cache virt_offset && (virt_offset & 0xfffff000) */
movl virt_offset, %ebp
movl %ebp, %ebx
andl $0xfffff000, %ebx
/* Switch to using physical addresses */
call _virt_to_phys
/* Initialize the page tables */
/* Level 4 */
leal 0x23 + pgt_level3(%ebx), %eax
leal pgt_level4(%ebx), %edi
movl %eax, (%edi)
/* Level 3 */
leal 0x23 + pgt_level2(%ebx), %eax
leal pgt_level3(%ebx), %edi
movl %eax, 0x00(%edi)
addl $4096, %eax
movl %eax, 0x08(%edi)
addl $4096, %eax
movl %eax, 0x10(%edi)
addl $4096, %eax
movl %eax, 0x18(%edi)
/* Level 2 */
movl $0xe3, %eax
leal pgt_level2(%ebx), %edi
leal 16384(%edi), %esi
pgt_level2_loop:
movl %eax, (%edi)
addl $8, %edi
addl $0x200000, %eax
cmp %esi, %edi
jne pgt_level2_loop
/* Point at the x86_64 page tables */
leal pgt_level4(%ebx), %edi
movl %edi, %cr3
/* Setup for the return from 64bit mode */
/* 64bit align the stack */
movl %esp, %ebx /* original stack pointer + 16 */
andl $0xfffffff8, %esp
/* Save original stack pointer + 16 */
pushl %ebx
/* Save virt_offset */
pushl %ebp
/* Setup for the jmp to 64bit long mode */
leal start_lm(%ebp), %eax
movl %eax, 0x00 + start_lm_addr(%ebp)
movl $LM_CODE_SEG, %eax
movl %eax, 0x04 + start_lm_addr(%ebp)
/* Setup for the jump out of 64bit long mode */
leal end_lm(%ebp), %eax
movl %eax, 0x00 + end_lm_addr(%ebp)
movl $FLAT_CODE_SEG, %eax
movl %eax, 0x04 + end_lm_addr(%ebp)
/* Enable PAE mode */
movl %cr4, %eax
orl $X86_CR4_PAE, %eax
movl %eax, %cr4
/* Enable long mode */
movl $MSR_K6_EFER, %ecx
rdmsr
orl $EFER_LME, %eax
wrmsr
/* Start paging, entering 32bit compatiblity mode */
movl %cr0, %eax
orl $CR0_PG, %eax
movl %eax, %cr0
/* Enter 64bit long mode */
ljmp *start_lm_addr(%ebp)
.code64
start_lm:
/* Load 64bit data segments */
movl $LM_DATA_SEG, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
andq $0xffffffff, %rbx
/* Get the address to jump to */
movl 20(%rbx), %edx
andq $0xffffffff, %rdx
/* Get the argument pointer */
movl 24(%rbx), %ebx
andq $0xffffffff, %rbx
/* Jump to the 64bit code */
call *%rdx
/* Preserve the result */
movl %eax, %edx
/* Fixup %eflags */
cli
cld
/* Switch to 32bit compatibility mode */
ljmp *end_lm_addr(%rip)
.code32
end_lm:
/* Disable paging */
movl %cr0, %eax
andl $~CR0_PG, %eax
movl %eax, %cr0
/* Disable long mode */
movl $MSR_K6_EFER, %ecx
rdmsr
andl $~EFER_LME, %eax
wrmsr
/* Disable PAE */
movl %cr4, %eax
andl $~X86_CR4_PAE, %eax
movl %eax, %cr4
/* Compute virt_offset */
popl %ebp
/* Compute the original stack pointer + 16 */
popl %ebx
movl %ebx, %esp
/* Enable the virtual addresses */
leal _phys_to_virt(%ebp), %eax
call *%eax
/* Restore the callee save registers */
popl %ebx
popl %esi
popl %edi
popl %ebp
/* Get the C return value */
movl %edx, %eax
/* Return */
ret
.arch i386
#endif /* CONFIG_X86_64 */
/**************************************************************************
SETJMP - Save stack context for non-local goto
**************************************************************************/
.globl setjmp
setjmp:
movl 4(%esp),%ecx /* jmpbuf */
movl 0(%esp),%edx /* return address */
movl %edx,0(%ecx)
movl %ebx,4(%ecx)
movl %esp,8(%ecx)
movl %ebp,12(%ecx)
movl %esi,16(%ecx)
movl %edi,20(%ecx)
movl $0,%eax
ret
/**************************************************************************
LONGJMP - Non-local jump to a saved stack context
**************************************************************************/
.globl longjmp
longjmp:
movl 4(%esp),%edx /* jumpbuf */
movl 8(%esp),%eax /* result */
movl 0(%edx),%ecx
movl 4(%edx),%ebx
movl 8(%edx),%esp
movl 12(%edx),%ebp
movl 16(%edx),%esi
movl 20(%edx),%edi
cmpl $0,%eax
jne 1f
movl $1,%eax
1: movl %ecx,0(%esp)
ret
/**************************************************************************
_VIRT_TO_PHYS - Transition from virtual to physical addresses
Preserves all preservable registers and flags
**************************************************************************/
.globl _virt_to_phys
_virt_to_phys:
pushfl
pushl %ebp
pushl %eax
movl virt_offset, %ebp /* Load virt_offset */
addl %ebp, 12(%esp) /* Adjust the return address */
/* reload the code segment */
pushl $FLAT_CODE_SEG
leal 1f(%ebp), %eax
pushl %eax
lret
1:
/* reload other segment registers */
movl $FLAT_DATA_SEG, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
addl %ebp, %esp /* Adjust the stack pointer */
movl %eax, %fs
movl %eax, %gs
popl %eax
popl %ebp
popfl
ret
/**************************************************************************
_PHYS_TO_VIRT - Transition from using physical to virtual addresses
Preserves all preservable registers and flags
**************************************************************************/
.globl _phys_to_virt
_phys_to_virt:
pushfl
pushl %ebp
pushl %eax
call 1f
1: popl %ebp
subl $1b, %ebp
movl %ebp, virt_offset(%ebp)
/* Fixup the gdt */
leal _pmcs(%ebp), %eax
pushl %eax
pushl %ebp
call set_seg_base
addl $8, %esp
/* Fixup gdtarg */
leal _gdt(%ebp), %eax
movl %eax, (gdtarg+2)(%ebp)
/* Load the global descriptor table */
cli
lgdt %cs:gdtarg(%ebp)
ljmp $KERN_CODE_SEG, $1f
1:
/* reload other segment regsters */
movl $KERN_DATA_SEG, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
subl %ebp, %esp /* Adjust the stack pointer */
movl %eax, %fs
movl %eax, %gs
subl %ebp, 12(%esp) /* Adjust the return address */
popl %eax
popl %ebp
popfl
ret
/**************************************************************************
SET_SEG_BASE - Set the base address of a segment register
**************************************************************************/
.globl set_seg_base
set_seg_base:
pushl %eax
pushl %ebx
movl 12(%esp), %eax /* %eax = base address */
movl 16(%esp), %ebx /* %ebx = &code_descriptor */
movw %ax, (0+2)(%ebx) /* CS base bits 0-15 */
movw %ax, (8+2)(%ebx) /* DS base bits 0-15 */
shrl $16, %eax
movb %al, (0+4)(%ebx) /* CS base bits 16-23 */
movb %al, (8+4)(%ebx) /* DS base bits 16-23 */
movb %ah, (0+7)(%ebx) /* CS base bits 24-31 */
movb %ah, (8+7)(%ebx) /* DS base bits 24-31 */
popl %ebx
popl %eax
ret
/**************************************************************************
GLOBAL DESCRIPTOR TABLE
**************************************************************************/
.data
.align 4
.globl _gdt
.globl gdtarg
_gdt:
gdtarg:
.word _gdt_end - _gdt - 1 /* limit */
.long _gdt /* addr */
.word 0
.globl _pmcs
_pmcs:
/* 32 bit protected mode code segment */
.word 0xffff,0
.byte 0,0x9f,0xcf,0
_pmds:
/* 32 bit protected mode data segment */
.word 0xffff,0
.byte 0,0x93,0xcf,0
_rmcs:
/* 16 bit real mode code segment */
.word 0xffff,(0&0xffff)
.byte (0>>16),0x9b,0x00,(0>>24)
_rmds:
/* 16 bit real mode data segment */
.word 0xffff,(0&0xffff)
.byte (0>>16),0x93,0x00,(0>>24)
_pmcs2:
/* 32 bit protected mode code segment, base 0 */
.word 0xffff,0
.byte 0,0x9f,0xcf,0
_pmds2:
/* 32 bit protected mode data segment, base 0 */
.word 0xffff,0
.byte 0,0x93,0xcf,0
#ifdef CONFIG_X86_64
_lmcs:
/* 64bit long mode code segment, base 0 */
.word 0xffff, 0
.byte 0x00, 0x9f, 0xaf , 0x00
_lmds:
/* 64bit long mode data segment, base 0 */
.word 0xffff, 0
.byte 0x00, 0x93, 0xcf, 0x00
#endif
_gdt_end:
/* The initial register contents */
.balign 4
.globl initial_regs
initial_regs:
.fill 8, 4, 0
/* The virtual address offset */
.globl virt_offset
virt_offset:
.long 0
.section ".stack"
.p2align 3
/* allocate a 4K stack in the stack segment */
.globl _stack
_stack:
.space 4096
.globl _estack
_estack:
#ifdef CONFIG_X86_64
.section ".bss"
.p2align 12
/* Include a dummy space in case we are loaded badly aligned */
.space 4096
/* Reserve enough space for a page table convering 4GB with 2MB pages */
pgt_level4:
.space 4096
pgt_level3:
.space 4096
pgt_level2:
.space 16384
start_lm_addr:
.space 8
end_lm_addr:
.space 8
#endif

View File

@@ -0,0 +1,201 @@
#include "realmode.h"
#include "segoff.h"
struct segheader
{
unsigned char length;
unsigned char vendortag;
unsigned char reserved;
unsigned char flags;
unsigned long loadaddr;
unsigned long imglength;
unsigned long memlength;
};
struct imgheader
{
unsigned long magic;
unsigned long length; /* and flags */
union
{
segoff_t segoff;
unsigned long location;
} u;
unsigned long execaddr;
};
/* Keep all context about loaded image in one place */
static struct tagged_context
{
struct imgheader img; /* copy of header */
unsigned long linlocation; /* addr of header */
unsigned long last0, last1; /* of prev segment */
unsigned long segaddr, seglen; /* of current segment */
unsigned char segflags;
unsigned char first;
unsigned long curaddr;
} tctx;
#define TAGGED_PROGRAM_RETURNS (tctx.img.length & 0x00000100) /* bit 8 */
#define LINEAR_EXEC_ADDR (tctx.img.length & 0x80000000) /* bit 31 */
static sector_t tagged_download(unsigned char *data, unsigned int len, int eof);
void xstart16 (unsigned long execaddr, segoff_t location,
void *bootp);
static inline os_download_t tagged_probe(unsigned char *data, unsigned int len)
{
struct segheader *sh;
unsigned long loc;
if (*((uint32_t *)data) != 0x1B031336L) {
return 0;
}
printf("(NBI)");
/* If we don't have enough data give up */
if (len < 512)
return dead_download;
/* Zero all context info */
memset(&tctx, 0, sizeof(tctx));
/* Copy first 4 longwords */
memcpy(&tctx.img, data, sizeof(tctx.img));
/* Memory location where we are supposed to save it */
tctx.segaddr = tctx.linlocation =
((tctx.img.u.segoff.segment) << 4) + tctx.img.u.segoff.offset;
if (!prep_segment(tctx.segaddr, tctx.segaddr + 512, tctx.segaddr + 512,
0, 512)) {
return dead_download;
}
/* Now verify the segments we are about to load */
loc = 512;
for(sh = (struct segheader *)(data
+ ((tctx.img.length & 0x0F) << 2)
+ ((tctx.img.length & 0xF0) >> 2) );
(sh->length > 0) && ((unsigned char *)sh < data + 512);
sh = (struct segheader *)((unsigned char *)sh
+ ((sh->length & 0x0f) << 2) + ((sh->length & 0xf0) >> 2)) ) {
if (!prep_segment(
sh->loadaddr,
sh->loadaddr + sh->imglength,
sh->loadaddr + sh->imglength,
loc, loc + sh->imglength)) {
return dead_download;
}
loc = loc + sh->imglength;
if (sh->flags & 0x04)
break;
}
if (!(sh->flags & 0x04))
return dead_download;
/* Grab a copy */
memcpy(phys_to_virt(tctx.segaddr), data, 512);
/* Advance to first segment descriptor */
tctx.segaddr += ((tctx.img.length & 0x0F) << 2)
+ ((tctx.img.length & 0xF0) >> 2);
/* Remember to skip the first 512 data bytes */
tctx.first = 1;
return tagged_download;
}
static sector_t tagged_download(unsigned char *data, unsigned int len, int eof)
{
int i;
if (tctx.first) {
tctx.first = 0;
if (len > 512) {
len -= 512;
data += 512;
/* and fall through to deal with rest of block */
} else
return 0;
}
for (;;) {
if (len == 0) /* Detect truncated files */
eof = 0;
while (tctx.seglen == 0) {
struct segheader sh;
if (tctx.segflags & 0x04) {
done(1);
if (LINEAR_EXEC_ADDR) {
int result;
/* no gateA20_unset for PM call */
result = xstart32(tctx.img.execaddr,
virt_to_phys(&loaderinfo),
tctx.linlocation,
virt_to_phys(BOOTP_DATA_ADDR));
printf("Secondary program returned %d\n",
result);
if (!TAGGED_PROGRAM_RETURNS) {
/* We shouldn't have returned */
result = -2;
}
if (result == 0)
result = -2;
longjmp(restart_etherboot, result);
} else {
gateA20_unset();
xstart16(tctx.img.execaddr,
tctx.img.u.segoff,
BOOTP_DATA_ADDR);
longjmp(restart_etherboot, -2);
}
}
sh = *((struct segheader *)phys_to_virt(tctx.segaddr));
tctx.seglen = sh.imglength;
if ((tctx.segflags = sh.flags & 0x03) == 0)
tctx.curaddr = sh.loadaddr;
else if (tctx.segflags == 0x01)
tctx.curaddr = tctx.last1 + sh.loadaddr;
else if (tctx.segflags == 0x02)
tctx.curaddr = (Address)(meminfo.memsize * 1024L
+ 0x100000L)
- sh.loadaddr;
else
tctx.curaddr = tctx.last0 - sh.loadaddr;
tctx.last1 = (tctx.last0 = tctx.curaddr) + sh.memlength;
tctx.segflags = sh.flags;
tctx.segaddr += ((sh.length & 0x0F) << 2)
+ ((sh.length & 0xF0) >> 2);
/* Avoid lock-up */
if ( sh.length == 0 ) longjmp(restart_etherboot, -2);
}
if ((len <= 0) && !eof)
break;
i = (tctx.seglen > len) ? len : tctx.seglen;
memcpy(phys_to_virt(tctx.curaddr), data, i);
tctx.seglen -= i;
tctx.curaddr += i;
len -= i;
data += i;
}
return 0;
}
void xstart16 (unsigned long execaddr, segoff_t location,
void *bootp) {
struct {
segoff_t execaddr;
segoff_t location;
segoff_t bootp;
} PACKED in_stack;
/* AFAICT, execaddr is actually already a segment:offset */
*((unsigned long *)&in_stack.execaddr) = execaddr;
in_stack.location = location;
in_stack.bootp.segment = SEGMENT(bootp);
in_stack.bootp.offset = OFFSET(bootp);
RM_FRAGMENT(rm_xstart16,
"popl %eax\n\t" /* Calculated lcall */
"pushw %cs\n\t"
"call 1f\n1:\tpopw %bp\n\t"
"leaw (2f-1b)(%bp), %bx\n\t"
"pushw %bx\n\t"
"pushl %eax\n\t"
"lret\n2:\n\t"
);
real_call ( rm_xstart16, &in_stack, NULL );
}

View File

@@ -0,0 +1,94 @@
/*
*
* modified from linuxbios code
* by Cai Qiang <rimy2000@hotmail.com>
*
*/
#ifdef CONSOLE_DIRECT_VGA
#include <etherboot.h>
#include <vga.h>
static char *vidmem; /* The video buffer */
static int video_line, video_col;
#define VIDBUFFER 0xB8000
static void memsetw(void *s, int c, unsigned int n)
{
int i;
u16 *ss = (u16 *) s;
for (i = 0; i < n; i++) {
ss[i] = ( u16 ) c;
}
}
void video_init(void)
{
static int inited=0;
vidmem = (unsigned char *)phys_to_virt(VIDBUFFER);
if (!inited) {
video_line = 0;
video_col = 0;
memsetw(vidmem, VGA_ATTR_CLR_WHT, 2*1024); //
inited=1;
}
}
static void video_scroll(void)
{
int i;
memcpy(vidmem, vidmem + COLS * 2, (LINES - 1) * COLS * 2);
for (i = (LINES - 1) * COLS * 2; i < LINES * COLS * 2; i += 2)
vidmem[i] = ' ';
}
void vga_putc(unsigned char byte)
{
if (byte == '\n') {
video_line++;
video_col = 0;
} else if (byte == '\r') {
video_col = 0;
} else if (byte == '\b') {
video_col--;
} else if (byte == '\t') {
video_col += 4;
} else if (byte == '\a') {
//beep
//beep(500);
} else {
vidmem[((video_col + (video_line *COLS)) * 2)] = byte;
vidmem[((video_col + (video_line *COLS)) * 2) +1] = VGA_ATTR_CLR_WHT;
video_col++;
}
if (video_col < 0) {
video_col = 0;
}
if (video_col >= COLS) {
video_line++;
video_col = 0;
}
if (video_line >= LINES) {
video_scroll();
video_line--;
}
// move the cursor
write_crtc((video_col + (video_line *COLS)) >> 8, CRTC_CURSOR_HI);
write_crtc((video_col + (video_line *COLS)) & 0x0ff, CRTC_CURSOR_LO);
}
#endif

View File

@@ -0,0 +1,273 @@
#define LOAD_DEBUG 0
static int get_x_header(unsigned char *data, unsigned long now);
static void jump_2ep();
static unsigned char ce_signature[] = {'B', '0', '0', '0', 'F', 'F', '\n',};
static char ** ep;
#define BOOT_ARG_PTR_LOCATION 0x001FFFFC
typedef struct _BOOT_ARGS{
unsigned char ucVideoMode;
unsigned char ucComPort;
unsigned char ucBaudDivisor;
unsigned char ucPCIConfigType;
unsigned long dwSig;
#define BOOTARG_SIG 0x544F4F42
unsigned long dwLen;
unsigned char ucLoaderFlags;
unsigned char ucEshellFlags;
unsigned char ucEdbgAdapterType;
unsigned char ucEdbgIRQ;
unsigned long dwEdbgBaseAddr;
unsigned long dwEdbgDebugZone;
unsigned long dwDHCPLeaseTime;
unsigned long dwEdbgFlags;
unsigned long dwEBootFlag;
unsigned long dwEBootAddr;
unsigned long dwLaunchAddr;
unsigned long pvFlatFrameBuffer;
unsigned short vesaMode;
unsigned short cxDisplayScreen;
unsigned short cyDisplayScreen;
unsigned short cxPhysicalScreen;
unsigned short cyPhysicalScreen;
unsigned short cbScanLineLength;
unsigned short bppScreen;
unsigned char RedMaskSize;
unsigned char REdMaskPosition;
unsigned char GreenMaskSize;
unsigned char GreenMaskPosition;
unsigned char BlueMaskSize;
unsigned char BlueMaskPosition;
} BOOT_ARGS;
BOOT_ARGS BootArgs;
static struct segment_info{
unsigned long addr; // Section Address
unsigned long size; // Section Size
unsigned long checksum; // Section CheckSum
} X;
#define PSIZE (1500) //Max Packet Size
#define DSIZE (PSIZE+12)
static unsigned long dbuffer_available =0;
static unsigned long not_loadin =0;
static unsigned long d_now =0;
unsigned long entry;
static unsigned long ce_curaddr;
static sector_t ce_loader(unsigned char *data, unsigned int len, int eof);
static os_download_t wince_probe(unsigned char *data, unsigned int len)
{
if (strncmp(ce_signature, data, sizeof(ce_signature)) != 0) {
return 0;
}
printf("(WINCE)");
return ce_loader;
}
static sector_t ce_loader(unsigned char *data, unsigned int len, int eof)
{
static unsigned char dbuffer[DSIZE];
int this_write = 0;
static int firsttime = 1;
/*
* new packet in, we have to
* [1] copy data to dbuffer,
*
* update...
* [2] dbuffer_available
*/
memcpy( (dbuffer+dbuffer_available), data, len); //[1]
dbuffer_available += len; // [2]
len = 0;
d_now = 0;
#if 0
printf("dbuffer_available =%ld \n", dbuffer_available);
#endif
if (firsttime)
{
d_now = sizeof(ce_signature);
printf("String Physical Address = %lx \n",
*(unsigned long *)(dbuffer+d_now));
d_now += sizeof(unsigned long);
printf("Image Size = %ld [%lx]\n",
*(unsigned long *)(dbuffer+d_now),
*(unsigned long *)(dbuffer+d_now));
d_now += sizeof(unsigned long);
dbuffer_available -= d_now;
d_now = (unsigned long)get_x_header(dbuffer, d_now);
firsttime = 0;
}
if (not_loadin == 0)
{
d_now = get_x_header(dbuffer, d_now);
}
while ( not_loadin > 0 )
{
/* dbuffer do not have enough data to loading, copy all */
#if LOAD_DEBUG
printf("[0] not_loadin = [%ld], dbuffer_available = [%ld] \n",
not_loadin, dbuffer_available);
printf("[0] d_now = [%ld] \n", d_now);
#endif
if( dbuffer_available <= not_loadin)
{
this_write = dbuffer_available ;
memcpy(phys_to_virt(ce_curaddr), (dbuffer+d_now), this_write );
ce_curaddr += this_write;
not_loadin -= this_write;
/* reset index and available in the dbuffer */
dbuffer_available = 0;
d_now = 0;
#if LOAD_DEBUG
printf("[1] not_loadin = [%ld], dbuffer_available = [%ld] \n",
not_loadin, dbuffer_available);
printf("[1] d_now = [%ld], this_write = [%d] \n",
d_now, this_write);
#endif
// get the next packet...
return (0);
}
/* dbuffer have more data then loading ... , copy partital.... */
else
{
this_write = not_loadin;
memcpy(phys_to_virt(ce_curaddr), (dbuffer+d_now), this_write);
ce_curaddr += this_write;
not_loadin = 0;
/* reset index and available in the dbuffer */
dbuffer_available -= this_write;
d_now += this_write;
#if LOAD_DEBUG
printf("[2] not_loadin = [%ld], dbuffer_available = [%ld] \n",
not_loadin, dbuffer_available);
printf("[2] d_now = [%ld], this_write = [%d] \n\n",
d_now, this_write);
#endif
/* dbuffer not empty, proceed processing... */
// don't have enough data to get_x_header..
if ( dbuffer_available < (sizeof(unsigned long) * 3) )
{
// printf("we don't have enough data remaining to call get_x. \n");
memcpy( (dbuffer+0), (dbuffer+d_now), dbuffer_available);
return (0);
}
else
{
#if LOAD_DEBUG
printf("with remaining data to call get_x \n");
printf("dbuffer available = %ld , d_now = %ld\n",
dbuffer_available, d_now);
#endif
d_now = get_x_header(dbuffer, d_now);
}
}
}
return (0);
}
static int get_x_header(unsigned char *dbuffer, unsigned long now)
{
X.addr = *(unsigned long *)(dbuffer + now);
X.size = *(unsigned long *)(dbuffer + now + sizeof(unsigned long));
X.checksum = *(unsigned long *)(dbuffer + now + sizeof(unsigned long)*2);
if (X.addr == 0)
{
entry = X.size;
done(1);
printf("Entry Point Address = [%lx] \n", entry);
jump_2ep();
}
if (!prep_segment(X.addr, X.addr + X.size, X.addr + X.size, 0, 0)) {
longjmp(restart_etherboot, -2);
}
ce_curaddr = X.addr;
now += sizeof(unsigned long)*3;
/* re-calculate dbuffer available... */
dbuffer_available -= sizeof(unsigned long)*3;
/* reset index of this section */
not_loadin = X.size;
#if 1
printf("\n");
printf("\t Section Address = [%lx] \n", X.addr);
printf("\t Size = %d [%lx]\n", X.size, X.size);
printf("\t Checksum = %ld [%lx]\n", X.checksum, X.checksum);
#endif
#if LOAD_DEBUG
printf("____________________________________________\n");
printf("\t dbuffer_now = %ld \n", now);
printf("\t dbuffer available = %ld \n", dbuffer_available);
printf("\t not_loadin = %ld \n", not_loadin);
#endif
return now;
}
static void jump_2ep()
{
BootArgs.ucVideoMode = 1;
BootArgs.ucComPort = 1;
BootArgs.ucBaudDivisor = 1;
BootArgs.ucPCIConfigType = 1; // do not fill with 0
BootArgs.dwSig = BOOTARG_SIG;
BootArgs.dwLen = sizeof(BootArgs);
if(BootArgs.ucVideoMode == 0)
{
BootArgs.cxDisplayScreen = 640;
BootArgs.cyDisplayScreen = 480;
BootArgs.cxPhysicalScreen = 640;
BootArgs.cyPhysicalScreen = 480;
BootArgs.bppScreen = 16;
BootArgs.cbScanLineLength = 1024;
BootArgs.pvFlatFrameBuffer = 0x800a0000; // ollie say 0x98000000
}
else if(BootArgs.ucVideoMode != 0xFF)
{
BootArgs.cxDisplayScreen = 0;
BootArgs.cyDisplayScreen = 0;
BootArgs.cxPhysicalScreen = 0;
BootArgs.cyPhysicalScreen = 0;
BootArgs.bppScreen = 0;
BootArgs.cbScanLineLength = 0;
BootArgs.pvFlatFrameBuffer = 0;
}
ep = phys_to_virt(BOOT_ARG_PTR_LOCATION);
*ep= virt_to_phys(&BootArgs);
xstart32(entry);
}