@@ -70,4 +70,10 @@ ArmSetMemoryAttributes (
IN UINT64 Attributes
);
+VOID
+EFIAPI
+MapAllPageTablesReadOnly (
+ VOID
+ );
+
#endif
@@ -28,6 +28,8 @@
// We use this index definition to define an invalid block entry
#define TT_ATTR_INDX_INVALID ((UINT32)~0)
+STATIC BOOLEAN mReadOnlyPageTables;
+
STATIC
UINT64
ArmMemoryAttributeToPageAttribute (
@@ -137,6 +139,9 @@ ReplaceLiveEntry (
IN UINT64 Address
)
{
+ if (*Entry == Value) {
+ return;
+ }
if (!ArmMmuEnabled ()) {
*Entry = Value;
} else {
@@ -181,7 +186,8 @@ GetBlockEntryListFromAddress (
IN UINT64 RegionStart,
OUT UINTN *TableLevel,
IN OUT UINT64 *BlockEntrySize,
- OUT UINT64 **LastBlockEntry
+ OUT UINT64 **LastBlockEntry,
+ OUT BOOLEAN *NewPageTablesAllocated
)
{
UINTN RootTableLevel;
@@ -292,6 +298,8 @@ GetBlockEntryListFromAddress (
return NULL;
}
+ *NewPageTablesAllocated = TRUE;
+
// Populate the newly created lower level table
SubTableBlockEntry = TranslationTable;
for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
@@ -316,10 +324,18 @@ GetBlockEntryListFromAddress (
return NULL;
}
+ *NewPageTablesAllocated = TRUE;
+
ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
// Fill the new BlockEntry with the TranslationTable
- *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
+ if (!mReadOnlyPageTables) {
+ *BlockEntry = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
+ } else {
+ ReplaceLiveEntry (BlockEntry,
+ (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY,
+ RegionStart);
+ }
}
}
}
@@ -345,7 +361,8 @@ UpdateRegionMapping (
IN UINT64 RegionStart,
IN UINT64 RegionLength,
IN UINT64 Attributes,
- IN UINT64 BlockEntryMask
+ IN UINT64 BlockEntryMask,
+ OUT BOOLEAN *ReadOnlyRemapDone
)
{
UINT32 Type;
@@ -353,6 +370,7 @@ UpdateRegionMapping (
UINT64 *LastBlockEntry;
UINT64 BlockEntrySize;
UINTN TableLevel;
+ BOOLEAN NewPageTablesAllocated;
// Ensure the Length is aligned on 4KB boundary
if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
@@ -360,11 +378,13 @@ UpdateRegionMapping (
return EFI_INVALID_PARAMETER;
}
+ NewPageTablesAllocated = FALSE;
do {
// Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
// such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
BlockEntrySize = RegionLength;
- BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
+ BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart,
+ &TableLevel, &BlockEntrySize, &LastBlockEntry, &NewPageTablesAllocated);
if (BlockEntry == NULL) {
// GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
return EFI_OUT_OF_RESOURCES;
@@ -378,10 +398,16 @@ UpdateRegionMapping (
do {
// Fill the Block Entry with attribute and output block address
- *BlockEntry &= BlockEntryMask;
- *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
+ if (!mReadOnlyPageTables) {
+ *BlockEntry &= BlockEntryMask;
+ *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
- ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);
+ ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);
+ } else {
+ ReplaceLiveEntry (BlockEntry,
+ (*BlockEntry & BlockEntryMask) | (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type,
+ RegionStart);
+ }
// Go to the next BlockEntry
RegionStart += BlockEntrySize;
@@ -397,9 +423,79 @@ UpdateRegionMapping (
} while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
} while (RegionLength != 0);
+ // if we have switched to read-only page tables, find the newly allocated ones
+ // and update their permissions
+ if (mReadOnlyPageTables && NewPageTablesAllocated) {
+ MapAllPageTablesReadOnly ();
+ if (ReadOnlyRemapDone) {
+ *ReadOnlyRemapDone = TRUE;
+ }
+ }
+
return EFI_SUCCESS;
}
+STATIC
+BOOLEAN
+EFIAPI
+MapPageTableReadOnlyRecursive (
+ IN UINT64 *RootTable,
+ IN UINT64 *TableEntry,
+ IN UINTN NumEntries,
+ IN UINTN TableLevel
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN Done;
+
+ //
+ // The UpdateRegionMapping () call in this function may recurse into
+ // MapAllPageTablesReadOnly () if it allocates any page tables. When
+ // this happens, there is little point in proceeding here, so let's
+ // bail early in that case.
+ //
+ Done = FALSE;
+ Status = UpdateRegionMapping (RootTable, (UINT64)TableEntry, EFI_PAGE_SIZE,
+ TT_AP_RO_RO, ~TT_ADDRESS_MASK_BLOCK_ENTRY, &Done);
+ ASSERT_EFI_ERROR (Status);
+
+ if (TableLevel == 3) {
+ return Done;
+ }
+
+ // go over the table and recurse for each table type entry
+ while (!Done && NumEntries--) {
+ if ((*TableEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
+ Done = MapPageTableReadOnlyRecursive (RootTable,
+ (UINT64 *)(*TableEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE),
+ TT_ENTRY_COUNT, TableLevel + 1);
+ }
+ TableEntry++;
+ }
+ return Done;
+}
+
+VOID
+EFIAPI
+MapAllPageTablesReadOnly (
+ VOID
+ )
+{
+ UINTN T0SZ;
+ UINTN RootTableEntryCount;
+ UINTN RootLevel;
+ UINT64 *RootTable;
+
+ mReadOnlyPageTables = TRUE;
+
+ T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
+ GetRootTranslationTableInfo (T0SZ, &RootLevel, &RootTableEntryCount);
+ RootTable = ArmGetTTBR0BaseAddress ();
+
+ MapPageTableReadOnlyRecursive (RootTable, RootTable, RootTableEntryCount,
+ RootLevel);
+}
+
STATIC
EFI_STATUS
FillTranslationTable (
@@ -412,7 +508,8 @@ FillTranslationTable (
MemoryRegion->VirtualBase,
MemoryRegion->Length,
ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
- 0
+ 0,
+ NULL
);
}
@@ -494,7 +591,8 @@ ArmSetMemoryAttributes (
BaseAddress,
Length,
PageAttributes,
- PageAttributeMask);
+ PageAttributeMask,
+ NULL);
if (EFI_ERROR (Status)) {
return Status;
}
@@ -516,7 +614,8 @@ SetMemoryRegionAttribute (
RootTable = ArmGetTTBR0BaseAddress ();
- Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
+ Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes,
+ BlockEntryMask, NULL);
if (EFI_ERROR (Status)) {
return Status;
}
@@ -844,3 +844,11 @@ ArmMmuBaseLibConstructor (
{
return RETURN_SUCCESS;
}
+
+VOID
+EFIAPI
+MapAllPageTablesReadOnly (
+ VOID
+ )
+{
+}
As a hardening measure, implement support for remapping all page tables read-only at a certain point during the boot (end of DXE is the most appropriate trigger). This should make it a lot more difficult to take advantage of write exploits to defeat authentication checks, since the attacker can no longer manipulate the page tables directly. To allow the page tables to still be manipulated, make use of the existing code to manipulate live entries: this drops into assembler with interrupts off, and disables the MMU for a brief moment to avoid causing TLB conflicts. Since page tables are writable with the MMU off, we can reuse this code to still manipulate the page tables after we updated the CPU mappings to be read-only. Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- ArmPkg/Include/Library/ArmMmuLib.h | 6 + ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 119 ++++++++++++++++++-- ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c | 8 ++ 3 files changed, 123 insertions(+), 10 deletions(-) -- 2.20.1 _______________________________________________ edk2-devel mailing list edk2-devel@lists.01.org https://lists.01.org/mailman/listinfo/edk2-devel