Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions xen/arch/x86/boot/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -136,12 +136,12 @@ SYM(mle_header, DATA, LOCAL, 16)
.long 0xa2555c0f /* UUID2 */
.long 0x42b651cb /* UUID3 */
.long (.Lmle_header_end - mle_header) /* MLE header size */
.long 0x00020002 /* MLE version 2.2 */
.long 0x00020003 /* MLE version 2.3 */
.long (slaunch_stub_entry - start) /* Linear entry point of MLE (SINIT virt. address) */
.long 0x00000000 /* First valid page of MLE */
.long 0x00000000 /* Offset within binary of first byte of MLE */
.long (_end - start) /* Offset within binary of last byte + 1 of MLE */
.long 0x00000723 /* Bit vector of MLE-supported capabilities */
.long 0x00004723 /* Bit vector of MLE-supported capabilities */
.long 0x00000000 /* Starting linear address of command line (unused) */
.long 0x00000000 /* Ending linear address of command line (unused) */
.Lmle_header_end:
Expand Down
4 changes: 2 additions & 2 deletions xen/arch/x86/boot/slaunch-early.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,6 @@ void asmlinkage slaunch_early_init(uint32_t load_base_addr,

result->mbi_pa = intel_info->boot_params_base;

txt_verify_pmr_ranges(os_mle, os_sinit, intel_info,
load_base_addr, tgt_base_addr, size);
txt_verify_dma_protection(os_mle, os_sinit, intel_info,
load_base_addr, tgt_base_addr, size);
}
5 changes: 3 additions & 2 deletions xen/arch/x86/efi/efi-boot.h
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,9 @@ void __init asmlinkage noreturn start_xen_from_efi(void)
const struct txt_os_sinit_data *os_sinit =
txt_start(txt_heap, TXT_OS2SINIT);

txt_verify_pmr_ranges(os_mle, os_sinit, intel_info, xen_phys_start,
xen_phys_start, xen_image_size);
txt_verify_dma_protection(os_mle, os_sinit, intel_info,
xen_phys_start, xen_phys_start,
xen_image_size);
}
}

Expand Down
195 changes: 150 additions & 45 deletions xen/arch/x86/include/asm/intel-txt.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,9 @@
#define SLAUNCH_ERROR_NO_VENDOR_INFO 0xc0008009U
#define SLAUNCH_ERROR_BAD_VENDOR_INFO 0xc000800AU
#define SLAUNCH_ERROR_BAD_SLRT_ADDRESS 0xc000800BU
#define SLAUNCH_ERROR_TPR_INVALID 0xc000800CU
#define SLAUNCH_ERROR_TPR_UNSUPPORTED 0xc000800DU
#define SLAUNCH_ERROR_TPR_NOT_FOUND 0xc000800EU

#define TXT_AP_BOOT_CS 0x0030
#define TXT_AP_BOOT_DS 0x0038
Expand All @@ -74,6 +77,9 @@
/* Intel SDM: GETSEC Capability Result Encoding */
#define GETSEC_CAP_TXT_CHIPSET 1

/* SINIT/MLE capability bit for TPR (TXT Protected Range) DMA protection. */
#define TXT_SINIT_MLE_CAP_TPR_SUPPORT 14

#ifndef __ASSEMBLER__

#include <xen/multiboot2.h>
Expand Down Expand Up @@ -259,6 +265,19 @@ struct heap_event_log_pointer_element2_1 {
uint32_t next_record_offset;
} __packed;

/*
* Extended data describing TPR (TXT Protected Range) DMA protection ranges.
*/
struct txt_heap_tpr_range {
uint64_t base;
uint64_t size;
} __packed;

struct txt_heap_tpr_req_element {
uint32_t count;
struct txt_heap_tpr_range ranges[0];
} __packed;

/*
* Functions to extract data from the Intel TXT Heap Memory.
*
Expand Down Expand Up @@ -327,67 +346,153 @@ static inline void *txt_init(void)
return txt_heap;
}

static inline bool is_in_pmr(const struct txt_os_sinit_data *os_sinit,
uint64_t base, uint32_t size, bool check_high)
/*
* Find the TPR request element in the TXT heap extended data.
*/
static inline const struct txt_heap_tpr_req_element *
txt_find_tpr_req_element(const struct txt_os_sinit_data *os_sinit)
{
const struct txt_ext_data_element *ext_elem;

ext_elem = (const struct txt_ext_data_element *)
((const uint8_t *)os_sinit + sizeof(struct txt_os_sinit_data));

while ( ext_elem->type != TXT_HEAP_EXTDATA_TYPE_END )
{
if ( ext_elem->type == TXT_HEAP_EXTDATA_TYPE_TPR_REQ )
return (const struct txt_heap_tpr_req_element *)ext_elem->data;

ext_elem = (const struct txt_ext_data_element *)
((const uint8_t *)ext_elem + ext_elem->size);
}

return NULL;
}

static inline bool is_in_dma_prot(const struct txt_os_sinit_data *os_sinit,
uint64_t base, uint32_t size, bool check_high)
{
uint64_t lo_size, hi_base, hi_size;

/* Check for size overflow. */
if ( base + size < base )
txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);

if ( os_sinit->capabilities & (1u << TXT_SINIT_MLE_CAP_TPR_SUPPORT) )
{
const struct txt_heap_tpr_req_element *tpr_req;

tpr_req = txt_find_tpr_req_element(os_sinit);
lo_size = tpr_req->ranges[0].base + tpr_req->ranges[0].size;
if ( tpr_req->count >= 2 )
{
hi_base = tpr_req->ranges[1].base;
hi_size = tpr_req->ranges[1].size;
}
else
{
hi_base = 0;
hi_size = 0;
}
}
else
{
lo_size = os_sinit->vtd_pmr_lo_size;
hi_base = os_sinit->vtd_pmr_hi_base;
hi_size = os_sinit->vtd_pmr_hi_size;
}

/*
* txt_verify_pmr_ranges() makes sure the low range always starts at 0, so
* its size is also end address.
* txt_verify_dma_protection() makes sure the low range always starts at
* 0, so its size is also end address.
*/
if ( base + size <= os_sinit->vtd_pmr_lo_size )
if ( base + size <= lo_size )
return true;

if ( check_high && os_sinit->vtd_pmr_hi_size != 0 )
if ( check_high && hi_size != 0 )
{
if ( base >= os_sinit->vtd_pmr_hi_base &&
base + size <= os_sinit->vtd_pmr_hi_base +
os_sinit->vtd_pmr_hi_size )
if ( base >= hi_base && base + size <= hi_base + hi_size )
return true;
}

return false;
}

static inline void txt_verify_pmr_ranges(
static inline void txt_verify_dma_protection(
const struct txt_os_mle_data *os_mle,
const struct txt_os_sinit_data *os_sinit,
const struct slr_entry_intel_info *info,
uint32_t load_base_addr,
uint32_t tgt_base_addr,
uint32_t xen_size)
{
bool check_high_pmr = false;
bool check_high = false;

/* Verify the value of the low PMR base. It should always be 0. */
if ( os_sinit->vtd_pmr_lo_base != 0 )
txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);

/*
* Low PMR size should not be 0 on current platforms. There is an ongoing
* transition to TPR-based DMA protection instead of PMR-based; this is not
* yet supported by the code.
*/
if ( os_sinit->vtd_pmr_lo_size == 0 )
txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE);

/* Check if regions overlap. Treat regions with no hole between as error. */
if ( os_sinit->vtd_pmr_hi_size != 0 &&
os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size )
txt_reset(SLAUNCH_ERROR_HI_PMR_BASE);
if ( os_sinit->capabilities & (1u << TXT_SINIT_MLE_CAP_TPR_SUPPORT) )
{
const struct txt_heap_tpr_req_element *tpr_req;

tpr_req = txt_find_tpr_req_element(os_sinit);
if ( tpr_req == NULL )
txt_reset(SLAUNCH_ERROR_TPR_NOT_FOUND);
if ( tpr_req->count < 1 )
txt_reset(SLAUNCH_ERROR_TPR_INVALID);
if ( tpr_req->count > 2 )
txt_reset(SLAUNCH_ERROR_TPR_UNSUPPORTED);

/* Lo range must not exceed 4G. */
if ( tpr_req->ranges[0].base + tpr_req->ranges[0].size >
0x100000000ULL )
txt_reset(SLAUNCH_ERROR_TPR_INVALID);

if ( tpr_req->count >= 2 )
{
/* Hi range must start at or above 4G. */
if ( tpr_req->ranges[1].base < 0x100000000ULL )
txt_reset(SLAUNCH_ERROR_TPR_INVALID);

/* Ranges must not overlap. */
if ( tpr_req->ranges[0].base + tpr_req->ranges[0].size >
tpr_req->ranges[1].base )
txt_reset(SLAUNCH_ERROR_TPR_INVALID);

/* All regions accessed by 32b code must be below 4G. */
if ( tpr_req->ranges[1].base + tpr_req->ranges[1].size <=
0x100000000ULL )
check_high = true;
}
}
else
{
/* Verify the value of the low PMR base. It should always be 0. */
if ( os_sinit->vtd_pmr_lo_base != 0 )
txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);

/* Check for size overflow. */
if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <
os_sinit->vtd_pmr_hi_size )
txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
/*
* Low PMR size should not be 0 on current platforms when PMR mode is
* in use.
*/
if ( os_sinit->vtd_pmr_lo_size == 0 )
txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE);

/* All regions accessed by 32b code must be below 4G. */
if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <=
0x100000000ULL )
check_high_pmr = true;
/*
* Check if regions overlap. Treat regions with no hole between as
* error.
*/
if ( os_sinit->vtd_pmr_hi_size != 0 &&
os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size )
txt_reset(SLAUNCH_ERROR_HI_PMR_BASE);

/* Check for size overflow. */
if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <
os_sinit->vtd_pmr_hi_size )
txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);

/* All regions accessed by 32b code must be below 4G. */
if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <=
0x100000000ULL )
check_high = true;
}

/*
* ACM checks that TXT heap and MLE memory is protected against DMA. We have
Expand All @@ -396,30 +501,30 @@ static inline void txt_verify_pmr_ranges(
* both pre- and post-relocation code is protected.
*/

/* Check if all of Xen before relocation is covered by PMR. */
if ( !is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr) )
/* Check if all of Xen before relocation is covered. */
if ( !is_in_dma_prot(os_sinit, load_base_addr, xen_size, check_high) )
txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);

/* Check if all of Xen after relocation is covered by PMR. */
/* Check if all of Xen after relocation is covered. */
if ( load_base_addr != tgt_base_addr &&
!is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr) )
!is_in_dma_prot(os_sinit, tgt_base_addr, xen_size, check_high) )
txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);

/*
* If present, check that MBI is covered by PMR. MBI starts with 'uint32_t
* If present, check that MBI is covered. MBI starts with 'uint32_t
* total_size'.
*/
if ( info->boot_params_base != 0 )
{
const multiboot2_fixed_t *mbi =
(const multiboot2_fixed_t *)(uintptr_t)info->boot_params_base;

if ( !is_in_pmr(os_sinit, info->boot_params_base, mbi->total_size,
check_high_pmr) )
if ( !is_in_dma_prot(os_sinit, info->boot_params_base, mbi->total_size,
check_high) )
txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
}

/* Check if TPM event log (if present) is covered by PMR. */
/* Check if TPM event log (if present) is covered by DMA protection. */
/*
* FIXME: currently commented out as GRUB allocates it in a hole between
* PMR and reserved RAM, due to 2MB resolution of PMR. There are no other
Expand All @@ -439,8 +544,8 @@ static inline void txt_verify_pmr_ranges(
*/
/*
if ( os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 &&
!is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
check_high_pmr) )
!is_in_dma_prot(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
check_high) )
txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
*/
}
Expand Down