[xen] Update to latest stable release headers

Update to the headers from the latest Xen stable release, and mark all
imported headers as permitted for UEFI Secure Boot.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
Michael Brown
2026-01-28 14:27:17 +00:00
parent 8e31ac9fc3
commit f1bcd160ac
20 changed files with 545 additions and 81 deletions

View File

@@ -9,6 +9,9 @@
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
/* Provide stub definitions if no platform-specific header exists */
#ifndef XEN_GUEST_HANDLE
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
@@ -32,6 +35,8 @@ struct arch_shared_info {};
#define XEN_LEGACY_MAX_VCPUS 0
#endif
struct xen_hypervisor;
static inline __attribute__ (( always_inline )) unsigned long

View File

@@ -11,6 +11,7 @@
#define __XEN_PUBLIC_ARCH_ARM_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
/*
* `incontents 50 arm_abi Hypercall Calling Convention
@@ -106,6 +107,7 @@ FILE_LICENCE ( MIT );
* Exactly these sub-operations are supported:
* * HVMOP_set_param
* * HVMOP_get_param
* * HVMOP_guest_request_vm_event
*
* HYPERVISOR_grant_table_op
* All generic sub-operations
@@ -118,6 +120,32 @@ FILE_LICENCE ( MIT );
* HYPERVISOR_argo_op
* All generic sub-operations
*
* HYPERVISOR_hypfs_op
* All generic sub-operations
*
* HYPERVISOR_platform_op
* Exactly these sub-operations are supported:
* * XENPF_settime64
*
* HYPERVISOR_vm_assist
* All generic sub-operations
*
* HYPERVISOR_dm_op
* Exactly these sub-operations are supported:
* * XEN_DMOP_create_ioreq_server
* * XEN_DMOP_get_ioreq_server_info
* * XEN_DMOP_map_io_range_to_ioreq_server
* * XEN_DMOP_unmap_io_range_from_ioreq_server
* * XEN_DMOP_set_ioreq_server_state
* * XEN_DMOP_destroy_ioreq_server
* * XEN_DMOP_set_irq_level
* * XEN_DMOP_nr_vcpus
*
* HYPERVISOR_xsm_op
* All generic sub-operations
*
* HYPERVISOR_multicall
*
* Other notes on the ARM ABI:
*
* - struct start_info is not exported to ARM guests.
@@ -154,8 +182,10 @@ FILE_LICENCE ( MIT );
#define XEN_HYPERCALL_TAG 0XEA1
#define int64_aligned_t int64_t __attribute__((aligned(8)))
#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
#if defined(__XEN__) || defined(__XEN_TOOLS__) || defined(__GNUC__)
#define int64_aligned_t int64_t __attribute__((__aligned__(8)))
#define uint64_aligned_t uint64_t __attribute__((__aligned__(8)))
#endif
#ifndef __ASSEMBLY__
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
@@ -182,7 +212,7 @@ FILE_LICENCE ( MIT );
do { \
__typeof__(&(hnd)) _sxghr_tmp = &(hnd); \
_sxghr_tmp->q = 0; \
_sxghr_tmp->p = val; \
_sxghr_tmp->p = (val); \
} while ( 0 )
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
@@ -298,10 +328,16 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
#define XEN_DOMCTL_CONFIG_TEE_NONE 0
#define XEN_DOMCTL_CONFIG_TEE_OPTEE 1
#define XEN_DOMCTL_CONFIG_TEE_FFA 2
#define XEN_DOMCTL_CONFIG_ARM_SCI_NONE 0
#define XEN_DOMCTL_CONFIG_ARM_SCI_SCMI_SMC 1
struct xen_arch_domainconfig {
/* IN/OUT */
uint8_t gic_version;
/* IN - Contains SVE vector length divided by 128 */
uint8_t sve_vl;
/* IN */
uint16_t tee_type;
/* IN */
@@ -320,6 +356,8 @@ struct xen_arch_domainconfig {
*
*/
uint32_t clock_frequency;
/* IN */
uint8_t arm_sci_type;
};
#endif /* __XEN__ || __XEN_TOOLS__ */
@@ -338,36 +376,36 @@ typedef uint64_t xen_callback_t;
/* PSR bits (CPSR, SPSR) */
#define PSR_THUMB (1<<5) /* Thumb Mode enable */
#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */
#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */
#define PSR_BIG_ENDIAN (1<<9) /* arm32: Big Endian Mode */
#define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */
#define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */
#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
#define PSR_Z (1<<30) /* Zero condition flag */
#define PSR_THUMB (1U <<5) /* Thumb Mode enable */
#define PSR_FIQ_MASK (1U <<6) /* Fast Interrupt mask */
#define PSR_IRQ_MASK (1U <<7) /* Interrupt mask */
#define PSR_ABT_MASK (1U <<8) /* Asynchronous Abort mask */
#define PSR_BIG_ENDIAN (1U << 9) /* arm32: Big Endian Mode */
#define PSR_DBG_MASK (1U << 9) /* arm64: Debug Exception mask */
#define PSR_IT_MASK (0x0600fc00U) /* Thumb If-Then Mask */
#define PSR_JAZELLE (1U << 24) /* Jazelle Mode */
#define PSR_Z (1U << 30) /* Zero condition flag */
/* 32 bit modes */
#define PSR_MODE_USR 0x10
#define PSR_MODE_FIQ 0x11
#define PSR_MODE_IRQ 0x12
#define PSR_MODE_SVC 0x13
#define PSR_MODE_MON 0x16
#define PSR_MODE_ABT 0x17
#define PSR_MODE_HYP 0x1a
#define PSR_MODE_UND 0x1b
#define PSR_MODE_SYS 0x1f
#define PSR_MODE_USR 0x10U
#define PSR_MODE_FIQ 0x11U
#define PSR_MODE_IRQ 0x12U
#define PSR_MODE_SVC 0x13U
#define PSR_MODE_MON 0x16U
#define PSR_MODE_ABT 0x17U
#define PSR_MODE_HYP 0x1aU
#define PSR_MODE_UND 0x1bU
#define PSR_MODE_SYS 0x1fU
/* 64 bit modes */
#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */
#define PSR_MODE_EL3h 0x0d
#define PSR_MODE_EL3t 0x0c
#define PSR_MODE_EL2h 0x09
#define PSR_MODE_EL2t 0x08
#define PSR_MODE_EL1h 0x05
#define PSR_MODE_EL1t 0x04
#define PSR_MODE_EL0t 0x00
#define PSR_MODE_BIT 0x10U /* Set iff AArch32 */
#define PSR_MODE_EL3h 0x0dU
#define PSR_MODE_EL3t 0x0cU
#define PSR_MODE_EL2h 0x09U
#define PSR_MODE_EL2t 0x08U
#define PSR_MODE_EL1h 0x05U
#define PSR_MODE_EL1t 0x04U
#define PSR_MODE_EL0t 0x00U
/*
* We set PSR_Z to be able to boot Linux kernel versions with an invalid
@@ -457,7 +495,7 @@ typedef uint64_t xen_callback_t;
#define GUEST_RAM0_SIZE xen_mk_ullong(0xc0000000)
/* 4GB @ 4GB Prefetch Memory for VPCI */
#define GUEST_VPCI_ADDR_TYPE_PREFETCH_MEM xen_mk_ullong(0x42000000)
#define GUEST_VPCI_ADDR_TYPE_PREFETCH_MEM xen_mk_ullong(0x43000000)
#define GUEST_VPCI_PREFETCH_MEM_ADDR xen_mk_ullong(0x100000000)
#define GUEST_VPCI_PREFETCH_MEM_SIZE xen_mk_ullong(0x100000000)
@@ -475,6 +513,7 @@ typedef uint64_t xen_callback_t;
#define GUEST_MAX_VCPUS 128
/* Interrupts */
#define GUEST_TIMER_VIRT_PPI 27
#define GUEST_TIMER_PHYS_S_PPI 29
#define GUEST_TIMER_PHYS_NS_PPI 30
@@ -485,6 +524,19 @@ typedef uint64_t xen_callback_t;
#define GUEST_VIRTIO_MMIO_SPI_FIRST 33
#define GUEST_VIRTIO_MMIO_SPI_LAST 43
/*
* SGI is the preferred delivery mechanism of FF-A pending notifications or
* schedule recveive interrupt. SGIs 8-15 are normally not used by a guest
* as they in a non-virtualized system typically are assigned to the secure
* world. Here we're free to use SGI 8-15 since they are virtual and have
* nothing to do with the secure world.
*
* For partitioning of SGIs see also Arm Base System Architecture v1.0C,
* https://developer.arm.com/documentation/den0094/
*/
#define GUEST_FFA_NOTIF_PEND_INTR_ID 8
#define GUEST_FFA_SCHEDULE_RECV_INTR_ID 9
/* PSCI functions */
#define PSCI_cpu_suspend 0
#define PSCI_cpu_off 1

110
src/include/xen/arch-ppc.h Normal file
View File

@@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) IBM Corp. 2005, 2006
* Copyright (C) Raptor Engineering, LLC 2023
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Timothy Pearson <tpearson@raptorengineering.com>
* Shawn Anastasio <sanastasio@raptorengineering.com>
*/
#ifndef __XEN_PUBLIC_ARCH_PPC_H__
#define __XEN_PUBLIC_ARCH_PPC_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#define int64_aligned_t int64_t __attribute__((__aligned__(8)))
#define uint64_aligned_t uint64_t __attribute__((__aligned__(8)))
#endif
#ifndef __ASSEMBLY__
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef union { type *p; unsigned long q; } \
__guest_handle_ ## name; \
typedef union { type *p; uint64_aligned_t q; } \
__guest_handle_64_ ## name
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
#define set_xen_guest_handle_raw(hnd, val) \
do { \
__typeof__(&(hnd)) sxghr_tmp_ = &(hnd); \
sxghr_tmp_->q = 0; \
sxghr_tmp_->p = (val); \
} while ( 0 )
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
typedef uint64_t xen_pfn_t;
#define PRI_xen_pfn PRIx64
#define PRIu_xen_pfn PRIu64
/*
* Maximum number of virtual CPUs in legacy multi-processor guests.
* Only one. All other VCPUS must use VCPUOP_register_vcpu_info.
*/
#define XEN_LEGACY_MAX_VCPUS 1
typedef uint64_t xen_ulong_t;
#define PRI_xen_ulong PRIx64
/*
* User-accessible registers: most of these need to be saved/restored
* for every nested Xen invocation.
*/
struct vcpu_guest_core_regs
{
uint64_t gprs[32];
uint64_t lr;
uint64_t ctr;
uint64_t srr0;
uint64_t srr1;
uint64_t pc;
uint64_t msr;
uint64_t fpscr; /* XXX Is this necessary */
uint64_t xer;
uint64_t hid4; /* debug only */
uint64_t dar; /* debug only */
uint32_t dsisr; /* debug only */
uint32_t cr;
uint32_t __pad; /* good spot for another 32bit reg */
uint32_t entry_vector;
};
typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */
/* ONLY used to communicate with dom0! See also struct exec_domain. */
struct vcpu_guest_context {
vcpu_guest_core_regs_t user_regs; /* User-level CPU registers */
uint64_t sdr1; /* Pagetable base */
/* XXX etc */
};
typedef struct vcpu_guest_context vcpu_guest_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
struct arch_shared_info {
uint64_t boot_timebase;
};
struct arch_vcpu_info {
};
struct xen_arch_domainconfig {
};
typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
#endif /* !__ASSEMBLY__ */
#endif /* __XEN_PUBLIC_ARCH_PPC_H__ */

View File

@@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Guest OS interface to RISC-V Xen.
* Initially based on the ARM implementation.
*/
#ifndef __XEN_PUBLIC_ARCH_RISCV_H__
#define __XEN_PUBLIC_ARCH_RISCV_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#if defined(__XEN__) || defined(__XEN_TOOLS__) || defined(__GNUC__)
#define int64_aligned_t int64_t __attribute__((__aligned__(8)))
#define uint64_aligned_t uint64_t __attribute__((__aligned__(8)))
#endif
#ifndef __ASSEMBLY__
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef union { type *p; unsigned long q; } \
__guest_handle_ ## name; \
typedef union { type *p; uint64_aligned_t q; } \
__guest_handle_64_ ## name
/*
* XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
* in a struct in memory. On RISCV is always 8 bytes sizes and 8 bytes
* aligned.
* XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an
* hypercall argument. It is 4 bytes on riscv32 and 8 bytes on riscv64.
*/
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
#define set_xen_guest_handle_raw(hnd, val) \
do { \
typeof(&(hnd)) sxghr_tmp_ = &(hnd); \
sxghr_tmp_->q = 0; \
sxghr_tmp_->p = (val); \
} while ( 0 )
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
typedef uint64_t xen_pfn_t;
#define PRI_xen_pfn PRIx64
#define PRIu_xen_pfn PRIu64
typedef uint64_t xen_ulong_t;
#define PRI_xen_ulong PRIx64
#if defined(__XEN__) || defined(__XEN_TOOLS__)
struct vcpu_guest_context {
};
typedef struct vcpu_guest_context vcpu_guest_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
struct xen_arch_domainconfig {
};
#endif
/* TODO: add a placeholder entry if no real ones surface */
struct arch_vcpu_info {
};
typedef struct arch_vcpu_info arch_vcpu_info_t;
/* TODO: add a placeholder entry if no real ones surface */
struct arch_shared_info {
};
typedef struct arch_shared_info arch_shared_info_t;
/*
* Maximum number of virtual CPUs in legacy multi-processor guests.
* Only one. All other VCPUS must use VCPUOP_register_vcpu_info.
*/
#define XEN_LEGACY_MAX_VCPUS 1
/* Stub definition of PMU structure */
typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
#endif
#endif /* __XEN_PUBLIC_ARCH_RISCV_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@@ -11,10 +11,11 @@
#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
/*
* Hypercall interface:
* Input: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6)
* Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
* Output: %eax
* Access is via hypercall page (set up by guest loader or via a Xen MSR):
* call hypercall_page + hypercall-number * 32
@@ -116,6 +117,10 @@ FILE_LICENCE ( MIT );
#define __DECL_REG_LO16(name) uint32_t e ## name
#endif
#ifdef __XEN__
#define cpu_user_regs guest_user_regs
#endif
struct cpu_user_regs {
__DECL_REG_LO8(b);
__DECL_REG_LO8(c);
@@ -138,8 +143,13 @@ struct cpu_user_regs {
uint16_t fs, _pad4;
uint16_t gs, _pad5;
};
#ifdef __XEN__
#undef cpu_user_regs
#else
typedef struct cpu_user_regs cpu_user_regs_t;
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
#endif
#undef __DECL_REG_LO8
#undef __DECL_REG_LO16

View File

@@ -15,7 +15,7 @@ FILE_SECBOOT ( PERMITTED );
/*
* Hypercall interface:
* Input: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6)
* Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
* Output: %rax
* Access is via hypercall page (set up by guest loader or via a Xen MSR):
* call hypercall_page + hypercall-number * 32
@@ -162,6 +162,10 @@ struct iret_context {
#define __DECL_REG_HI(num) uint64_t r ## num
#endif
#ifdef __XEN__
#define cpu_user_regs guest_user_regs
#endif
struct cpu_user_regs {
__DECL_REG_HI(15);
__DECL_REG_HI(14);
@@ -192,8 +196,13 @@ struct cpu_user_regs {
uint16_t fs, _pad5[3];
uint16_t gs, _pad6[3];
};
#ifdef __XEN__
#undef cpu_user_regs
#else
typedef struct cpu_user_regs cpu_user_regs_t;
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
#endif
#undef __DECL_REG
#undef __DECL_REG_LOHI

View File

@@ -7,8 +7,6 @@
* Copyright (c) 2004-2006, K A Fraser
*/
#include "../xen.h"
#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
#define __XEN_PUBLIC_ARCH_X86_XEN_H__
@@ -39,7 +37,7 @@ FILE_SECBOOT ( PERMITTED );
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = (val); } while (0)
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#if defined(__i386__)
@@ -178,7 +176,18 @@ struct vcpu_guest_context {
#define _VGCF_online 5
#define VGCF_online (1<<_VGCF_online)
unsigned long flags; /* VGCF_* flags */
/*
* Outside of Xen, regs type stays named cpu_user_regs for backwards
* compatibility. Inside Xen, the type called cpu_user_regs is different,
* and the public API type is renamed to guest_user_regs.
*/
#ifdef __XEN__
struct guest_user_regs user_regs; /* User-level CPU registers */
#else
struct cpu_user_regs user_regs; /* User-level CPU registers */
#endif
struct trap_info trap_ctxt[256]; /* Virtual IDT */
unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */

View File

@@ -117,10 +117,10 @@ typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
* EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
* vcpu.
* NOTES:
* 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
* in xen.h for the classification of each VIRQ.
* 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
* re-bound via EVTCHNOP_bind_vcpu.
* 1. Virtual IRQs are classified as per-vcpu, per-domain or global. See the
* VIRQ list in xen.h for the classification of each VIRQ.
* 2. Per-domain and global VIRQs must be allocated on vCPU0 but can
* subsequently be re-bound via EVTCHNOP_bind_vcpu.
* 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
* The allocated event channel is bound to the specified vcpu and the
* binding cannot be changed.

View File

@@ -11,6 +11,7 @@
#define __XEN_PUBLIC_FEATURES_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
/*
* `incontents 200 elfnotes_features XEN_ELFNOTE_FEATURES
@@ -113,6 +114,23 @@ FILE_LICENCE ( MIT );
#define XENFEAT_not_direct_mapped 16
#define XENFEAT_direct_mapped 17
/*
* Signal whether the domain is able to use the following hypercalls:
*
* VCPUOP_register_runstate_phys_area
* VCPUOP_register_vcpu_time_phys_area
*/
#define XENFEAT_runstate_phys_area 18
#define XENFEAT_vcpu_time_phys_area 19
/*
* If set, Xen will passthrough all MSI-X vector ctrl writes to device model,
* not only those unmasking an entry. This allows device model to properly keep
* track of the MSI-X table without having to read it from the device behind
* Xen's backs. This information is relevant only for device models.
*/
#define XENFEAT_dm_msix_all_writes 20
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */

View File

@@ -7,6 +7,7 @@
#define __XEN_PUBLIC_HVM_HVM_OP_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#include "../xen.h"
#include "../trace.h"

View File

@@ -7,6 +7,7 @@
#define __XEN_PUBLIC_HVM_PARAMS_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#include "hvm_op.h"
@@ -248,16 +249,11 @@ FILE_LICENCE ( MIT );
#define HVM_PARAM_VM_GENERATION_ID_ADDR 34
/*
* Set mode for altp2m:
* disabled: don't activate altp2m (default)
* Get mode for altp2m:
* disabled: altp2m not active (default)
* mixed: allow access to all altp2m ops for both in-guest and external tools
* external: allow access to external privileged tools only
* limited: guest only has limited access (ie. control VMFUNC and #VE)
*
* Note that 'mixed' mode has not been evaluated for safety from a
* security perspective. Before using this mode in a
* security-critical environment, each subop should be evaluated for
* safety, with unsafe subops blacklisted in XSM.
*/
#define HVM_PARAM_ALTP2M 35
#define XEN_ALTP2M_disabled 0

View File

@@ -70,7 +70,9 @@ sub try_import_file {
if ( /^\#define\s+_+${maybe_guard}_H_*$/ ) {
die "Duplicate header guard detected in $infile\n" if $guard;
$guard = $maybe_guard;
print $outfh "\nFILE_LICENCE ( MIT );\n";
print $outfh "\n";
print $outfh "FILE_LICENCE ( MIT );\n";
print $outfh "FILE_SECBOOT ( PERMITTED );\n";
}
undef $maybe_guard;
}

View File

@@ -28,8 +28,16 @@ FILE_SECBOOT ( PERMITTED );
* and grant_table.h from the Xen public headers.
*/
#include "../xen.h"
#include "../xen-compat.h"
/* Some PV I/O interfaces need a compatibility variant. */
#if __XEN_INTERFACE_VERSION__ < 0x00041300
#define XENPV_FLEX_ARRAY_DIM 1 /* variable size */
#else
#define XENPV_FLEX_ARRAY_DIM XEN_FLEX_ARRAY_DIM
#endif
#if __XEN_INTERFACE_VERSION__ < 0x00030208
#define xen_mb() mb()
#define xen_rmb() rmb()
@@ -39,11 +47,11 @@ FILE_SECBOOT ( PERMITTED );
typedef unsigned int RING_IDX;
/* Round a 32-bit unsigned constant down to the nearest power of two. */
#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
#define __RD2(x) (((x) & 0x00000002U) ? 0x2 : ((x) & 0x1))
#define __RD4(x) (((x) & 0x0000000cU) ? __RD2((x) >> 2) << 2 : __RD2(x))
#define __RD8(x) (((x) & 0x000000f0U) ? __RD4((x) >> 4) << 4 : __RD4(x))
#define __RD16(x) (((x) & 0x0000ff00U) ? __RD8((x) >> 8) << 8 : __RD8(x))
#define __RD32(x) (((x) & 0xffff0000U) ? __RD16((x) >> 16) << 16 : __RD16(x))
/*
* Calculate size of a shared ring, given the total available space for the
@@ -113,7 +121,7 @@ struct __name##_sring { \
uint8_t pvt_pad[4]; \
} pvt; \
uint8_t __pad[44]; \
union __name##_sring_entry ring[1]; /* variable-length */ \
union __name##_sring_entry ring[XENPV_FLEX_ARRAY_DIM]; \
}; \
\
/* "Front" end's private variables */ \
@@ -482,7 +490,7 @@ struct name##_data_intf { \
uint8_t pad2[56]; \
\
RING_IDX ring_order; \
grant_ref_t ref[]; \
grant_ref_t ref[XEN_FLEX_ARRAY_DIM]; \
}; \
DEFINE_XEN_FLEX_RING(name)

View File

@@ -38,16 +38,16 @@ enum xsd_sockmsg_type
/* XS_RESTRICT has been removed */
XS_RESET_WATCHES = XS_SET_TARGET + 2,
XS_DIRECTORY_PART,
XS_GET_FEATURE,
XS_SET_FEATURE,
XS_GET_QUOTA,
XS_SET_QUOTA,
XS_TYPE_COUNT, /* Number of valid types. */
XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
};
#define XS_WRITE_NONE "NONE"
#define XS_WRITE_CREATE "CREATE"
#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
/* We hand errors as strings, for portability. */
struct xsd_errors
{
@@ -113,6 +113,7 @@ struct xenstore_domain_interface {
uint32_t server_features; /* Bitmap of features supported by the server */
uint32_t connection;
uint32_t error;
uint32_t evtchn_port;
};
/* Violating this is very bad. See docs/misc/xenstore.txt. */
@@ -137,6 +138,12 @@ struct xenstore_domain_interface {
#define XENSTORE_ERROR_RINGIDX 2 /* Invalid ring index */
#define XENSTORE_ERROR_PROTO 3 /* Protocol violation (payload too long) */
/*
* The evtchn_port field is the domain's event channel for xenstored to signal.
* It is filled in by Xen for dom0less/Hyperlaunch domains. It is only used
* when non-zero. Otherwise the event channel from XS_INTRODUCE is used.
*/
#endif /* _XS_WIRE_H */
/*

View File

@@ -11,6 +11,7 @@
#define __XEN_PUBLIC_MEMORY_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#include "xen.h"
#include "physdev.h"
@@ -236,7 +237,7 @@ struct xen_add_to_physmap {
unsigned int space; /* => enum phys_map_space */
#define XENMAPIDX_grant_table_status 0x80000000
#define XENMAPIDX_grant_table_status 0x80000000U
/* Index into space being mapped. */
xen_ulong_t idx;
@@ -428,6 +429,15 @@ typedef enum {
* pausing the vcpu
*/
XENMEM_access_n2rwx,
/*
* Same as XENMEM_access_r, but on processors with
* the TERTIARY_EXEC_EPT_PAGING_WRITE support,
* CPU-initiated page-table walks can still
* write to it (e.g., update A/D bits)
*/
XENMEM_access_r_pw,
/* Take the domain default */
XENMEM_access_default
} xenmem_access_t;

View File

@@ -7,6 +7,7 @@
#define __XEN_PUBLIC_PHYSDEV_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#include "xen.h"
@@ -89,6 +90,9 @@ DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
/*
* Set the current VCPU's I/O-port permissions bitmap.
* @arg == pointer to physdev_set_iobitmap structure.
*
* When @nr_ports is non-zero, Xen, like real CPUs and the TSS IOPB, always
* reads 2 bytes from @bitmap, which might be one byte beyond @nr_ports.
*/
#define PHYSDEVOP_set_iobitmap 7
struct physdev_set_iobitmap {
@@ -298,6 +302,13 @@ DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t);
*/
#define PHYSDEVOP_prepare_msix 30
#define PHYSDEVOP_release_msix 31
/*
* Notify the hypervisor that a PCI device has been reset, so that any
* internally cached state is regenerated. Should be called after any
* device reset performed by the hardware domain.
*/
#define PHYSDEVOP_pci_device_reset 32
struct physdev_pci_device {
/* IN */
uint16_t seg;
@@ -307,6 +318,16 @@ struct physdev_pci_device {
typedef struct physdev_pci_device physdev_pci_device_t;
DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
struct pci_device_reset {
physdev_pci_device_t dev;
#define PCI_DEVICE_RESET_COLD 0x0
#define PCI_DEVICE_RESET_WARM 0x1
#define PCI_DEVICE_RESET_HOT 0x2
#define PCI_DEVICE_RESET_FLR 0x3
#define PCI_DEVICE_RESET_MASK 0x3
uint32_t flags;
};
#define PHYSDEVOP_DBGP_RESET_PREPARE 1
#define PHYSDEVOP_DBGP_RESET_DONE 2

View File

@@ -10,6 +10,7 @@
#define __XEN_PUBLIC_TRACE_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#define TRACE_EXTRA_MAX 7
#define TRACE_EXTRA_SHIFT 28
@@ -69,7 +70,7 @@ FILE_LICENCE ( MIT );
#define TRC_SCHED_CLASS_EVT(_c, _e) \
( ( TRC_SCHED_CLASS | \
((TRC_SCHED_##_c << TRC_SCHED_ID_SHIFT) & TRC_SCHED_ID_MASK) ) + \
(_e & TRC_SCHED_EVT_MASK) )
((_e) & TRC_SCHED_EVT_MASK) )
/* Trace classes for DOM0 operations */
#define TRC_DOM0_DOMOPS 0x00041000 /* Domains manipulations */
@@ -180,8 +181,10 @@ FILE_LICENCE ( MIT );
/* trace events per subclass */
#define TRC_HVM_NESTEDFLAG (0x400)
#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
#define TRC_HVM_VMX_EXIT (TRC_HVM_ENTRYEXIT + 0x02)
#define TRC_HVM_VMX_EXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
#define TRC_HVM_SVM_EXIT (TRC_HVM_ENTRYEXIT + 0x03)
#define TRC_HVM_SVM_EXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x03)
#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)

View File

@@ -12,6 +12,7 @@
#define __XEN_PUBLIC_VERSION_H__
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#include "xen.h"
@@ -21,12 +22,20 @@ FILE_LICENCE ( MIT );
/* arg == NULL; returns major:minor (16:16). */
#define XENVER_version 0
/* arg == xen_extraversion_t. */
/*
* arg == xen_extraversion_t.
*
* This API/ABI is broken. Use XENVER_extraversion2 where possible.
*/
#define XENVER_extraversion 1
typedef char xen_extraversion_t[16];
#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
/* arg == xen_compile_info_t. */
/*
* arg == xen_compile_info_t.
*
* This API/ABI is broken and truncates data.
*/
#define XENVER_compile_info 2
struct xen_compile_info {
char compiler[64];
@@ -36,14 +45,51 @@ struct xen_compile_info {
};
typedef struct xen_compile_info xen_compile_info_t;
/*
* arg == xen_capabilities_info_t.
*
* This API/ABI is broken. Use XENVER_capabilities2 where possible.
*/
#define XENVER_capabilities 3
typedef char xen_capabilities_info_t[1024];
#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
/*
* arg == xen_changeset_info_t.
*
* This API/ABI is broken. Use XENVER_changeset2 where possible.
*/
#define XENVER_changeset 4
typedef char xen_changeset_info_t[64];
#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
/*
* This API is problematic.
*
* It is only applicable to guests which share pagetables with Xen (x86 PV
* guests), but unfortunately has leaked into other guest types and
* architectures with an expectation of never failing.
*
* It is intended to identify the virtual address split between guest kernel
* and Xen.
*
* For 32bit PV guests, there is a split, and it is variable (between two
* fixed bounds), and this boundary is reported to guests. The detail missing
* from the hypercall is that the second boundary is the 32bit architectural
* boundary at 4G.
*
* For 64bit PV guests, Xen lives at the bottom of the upper canonical range.
* This hypercall happens to report the architectural boundary, not the one
* which would be necessary to make a variable split work. As such, this
* hypercall entirely useless for 64bit PV guests, and all inspected
* implementations at the time of writing were found to have compile time
* expectations about the split.
*
* For architectures where this hypercall is implemented, for backwards
* compatibility with the expectation of the hypercall never failing Xen will
* return 0 instead of failing with -ENOSYS in cases where the guest should
* not be making the hypercall.
*/
#define XENVER_platform_parameters 5
struct xen_platform_parameters {
xen_ulong_t virt_start;
@@ -70,14 +116,21 @@ typedef struct xen_feature_info xen_feature_info_t;
*/
#define XENVER_guest_handle 8
/*
* arg == xen_commandline_t.
*
* This API/ABI is broken. Use XENVER_commandline2 where possible.
*/
#define XENVER_commandline 9
typedef char xen_commandline_t[1024];
/*
* Return value is the number of bytes written, or XEN_Exx on error.
* Calling with empty parameter returns the size of build_id.
*
* Note: structure only kept for backwards compatibility. Xen operates in
* terms of xen_varbuf_t.
*/
#define XENVER_build_id 10
struct xen_build_id {
uint32_t len; /* IN: size of buf[]. */
unsigned char buf[XEN_FLEX_ARRAY_DIM];
@@ -85,6 +138,43 @@ struct xen_build_id {
};
typedef struct xen_build_id xen_build_id_t;
/*
* Container for an arbitrary variable length buffer.
*/
struct xen_varbuf {
uint32_t len; /* IN: size of buf[] in bytes. */
unsigned char buf[XEN_FLEX_ARRAY_DIM]; /* OUT: requested data. */
};
typedef struct xen_varbuf xen_varbuf_t;
/*
* arg == xen_varbuf_t
*
* Equivalent to the original ops, but with a non-truncating API/ABI.
*
* These hypercalls can fail for a number of reasons. All callers must handle
* -XEN_xxx return values appropriately.
*
* Passing arg == NULL is a request for size, which will be signalled with a
* non-negative return value. Note: a return size of 0 may be legitimate for
* the requested subop.
*
* Otherwise, the input xen_varbuf_t provides the size of the following
* buffer. Xen will fill the buffer, and return the number of bytes written
* (e.g. if the input buffer was longer than necessary).
*
* Some subops may return binary data. Some subops may be expected to return
* textural data. These are returned without a NUL terminator, and while the
* contents is expected to be ASCII/UTF-8, Xen makes no guarentees to this
* effect. e.g. Xen has no control over the formatting used for the command
* line.
*/
#define XENVER_build_id 10
#define XENVER_extraversion2 11
#define XENVER_capabilities2 12
#define XENVER_changeset2 13
#define XENVER_commandline2 14
#endif /* __XEN_PUBLIC_VERSION_H__ */
/*

View File

@@ -13,7 +13,7 @@
FILE_LICENCE ( MIT );
FILE_SECBOOT ( PERMITTED );
#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040e00
#define __XEN_LATEST_INTERFACE_VERSION__ 0x00041300
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */

View File

@@ -19,6 +19,10 @@ FILE_SECBOOT ( PERMITTED );
#include "arch-x86/xen.h"
#elif defined(__arm__) || defined (__aarch64__)
#include "arch-arm.h"
#elif defined(__powerpc64__)
#include "arch-ppc.h"
#elif defined(__riscv)
#include "arch-riscv.h"
#else
#include <bits/xen.h>
#endif
@@ -158,25 +162,34 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
*
* Virtual interrupts that a guest OS may receive from Xen.
*
* In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
* global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
* The latter can be allocated only once per guest: they must initially be
* allocated to VCPU0 but can subsequently be re-bound.
* There are three types:
*
* 1. (V) Per-vcpu:
* These can be bound once per vCPU, each using a different evtchn port.
* An evtchn for one vCPU cannot be rebound to a different vCPU.
*
* 2. (D) Per-domain:
* These can be bound once per domain. They must be bound on vCPU 0 first,
* but can be rebound to other vCPUs afterwards.
*
* 3. (G) Global:
* Like per-domain, but can only be bound to a single domain at a time.
* The owning domain must unbind before a new domain can bind.
*/
/* ` enum virq { */
#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
#define VIRQ_CONSOLE 2 /* G. Bytes received on emergency console. */
#define VIRQ_DOM_EXC 3 /* G. Exceptional event for some domain. */
#define VIRQ_TBUF 4 /* G. Trace buffer has records available. */
#define VIRQ_DEBUGGER 6 /* G. A domain has paused for debugging. */
#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occurred */
#define VIRQ_ARGO 11 /* G. Argo interdomain message notification */
#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
#define VIRQ_XENPMU 13 /* V. PMC interrupt */
#define VIRQ_CON_RING 8 /* G. Bytes received on console */
#define VIRQ_PCPU_STATE 9 /* G. PCPU state changed */
#define VIRQ_MEM_EVENT 10 /* G. A memory event has occurred */
#define VIRQ_ARGO 11 /* D. Argo interdomain message notification */
#define VIRQ_ENOMEM 12 /* G. Low on heap memory */
#define VIRQ_XENPMU 13 /* V. PMC interrupt */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -622,7 +635,7 @@ DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
/*
* ` enum neg_errnoval
* ` HYPERVISOR_multicall(multicall_entry_t call_list[],
* ` uint32_t nr_calls);
* ` unsigned long nr_calls);
*
* NB. The fields are logically the natural register size for this
* architecture. In cases where xen_ulong_t is larger than this then
@@ -630,7 +643,11 @@ DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
*/
struct multicall_entry {
xen_ulong_t op, result;
#ifndef __XEN__
xen_ulong_t args[6];
#else /* Only 5 arguments are supported in reality. */
xen_ulong_t args[5], unused;
#endif
};
typedef struct multicall_entry multicall_entry_t;
DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);