From nobody Mon Dec 23 00:12:47 2024 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=none (zoho.com: 198.145.21.10 is neither permitted nor denied by domain of lists.01.org) smtp.mailfrom=edk2-devel-bounces@lists.01.org Return-Path: Received: from ml01.01.org (ml01.01.org [198.145.21.10]) by mx.zohomail.com with SMTPS id 1521080880108309.8554255890765; Wed, 14 Mar 2018 19:28:00 -0700 (PDT) Received: from [127.0.0.1] (localhost [IPv6:::1]) by ml01.01.org (Postfix) with ESMTP id 29D0E22568636; Wed, 14 Mar 2018 19:21:33 -0700 (PDT) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ml01.01.org (Postfix) with ESMTPS id A5C5621E49035 for ; Wed, 14 Mar 2018 19:21:31 -0700 (PDT) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 14 Mar 2018 19:27:54 -0700 Received: from jwang36-mobl2.ccr.corp.intel.com ([10.239.192.71]) by orsmga001.jf.intel.com with ESMTP; 14 Mar 2018 19:27:53 -0700 X-Original-To: edk2-devel@lists.01.org Received-SPF: none (zoho.com: 198.145.21.10 is neither permitted nor denied by domain of lists.01.org) client-ip=198.145.21.10; envelope-from=edk2-devel-bounces@lists.01.org; helo=ml01.01.org; Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=134.134.136.24; helo=mga09.intel.com; envelope-from=jian.j.wang@intel.com; receiver=edk2-devel@lists.01.org X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.48,308,1517904000"; d="scan'208";a="39022334" From: Jian J Wang To: edk2-devel@lists.01.org Date: Thu, 15 Mar 2018 10:27:50 +0800 Message-Id: <20180315022750.15216-1-jian.j.wang@intel.com> X-Mailer: git-send-email 2.15.1.windows.2 Subject: [edk2] [PATCH] MdeModulePkg/Core: allow HeapGuard even before CpuArchProtocol installed X-BeenThere: edk2-devel@lists.01.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: EDK II Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Jiewen Yao , Eric Dong , Star Zeng MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Errors-To: edk2-devel-bounces@lists.01.org Sender: "edk2-devel" X-ZohoMail: RSF_4 Z_629925259 SPT_0 Content-Type: text/plain; charset="utf-8" > v2 changes: > Fix a logic hole in bits operation on address on 64K boundary with > just 64-bit length (SetBits(), ClearBits(), GetBits()). Due to the fact that HeapGuard needs CpuArchProtocol to update page attributes, the feature is normally enabled after CpuArchProtocol is installed. Since there're some drivers are loaded before CpuArchProtocl, they cannot make use HeapGuard feature to detect potential issues. This patch fixes above situation by updating the DXE core to skip the NULL check against global gCpu in the IsMemoryTypeToGuard(), and adding NULL check against gCpu in SetGuardPage() and UnsetGuardPage() to make sure that they can be called but do nothing. This will allow HeapGuard to record all guarded memory without setting the related Guard pages to not- present. Once the CpuArchProtocol is installed, a protocol notify will be called to complete the work of setting Guard pages to not-present. Please note that above changes will cause a #PF in GCD code during cleanup of map entries, which is initiated by CpuDxe driver to update real mtrr and paging attributes back to GCD. During that time, CpuDxe doesn't allow GCD to update memory attributes and then any Guard page cannot be unset. As a result, this will prevent Guarded memory from freeing during memory map cleanup. The solution is to avoid allocating guarded memory as memory map entries in GCD code. It's done by setting global mOnGuarding to TRUE before memory allocation and setting it back to FALSE afterwards in GCD function CoreAllocateGcdMapEntry(). Cc: Star Zeng Cc: Eric Dong Cc: Jiewen Yao Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Jian J Wang --- MdeModulePkg/Core/Dxe/Gcd/Gcd.c | 10 ++ MdeModulePkg/Core/Dxe/Mem/HeapGuard.c | 148 ++++++++++++++++++++++= ++-- MdeModulePkg/Core/Dxe/Mem/HeapGuard.h | 8 ++ MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c | 5 + MdeModulePkg/Core/PiSmmCore/HeapGuard.c | 16 +-- 5 files changed, 174 insertions(+), 13 deletions(-) diff --git a/MdeModulePkg/Core/Dxe/Gcd/Gcd.c b/MdeModulePkg/Core/Dxe/Gcd/Gc= d.c index 8fbc3d282c..77f4adb4bc 100644 --- a/MdeModulePkg/Core/Dxe/Gcd/Gcd.c +++ b/MdeModulePkg/Core/Dxe/Gcd/Gcd.c @@ -16,6 +16,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER= EXPRESS OR IMPLIED. =20 #include "DxeMain.h" #include "Gcd.h" +#include "Mem/HeapGuard.h" =20 #define MINIMUM_INITIAL_MEMORY_SIZE 0x10000 =20 @@ -391,12 +392,21 @@ CoreAllocateGcdMapEntry ( IN OUT EFI_GCD_MAP_ENTRY **BottomEntry ) { + // + // Set to mOnGuarding to TRUE before memory allocation. This will make s= ure + // that the entry memory is not "guarded" by HeapGuard. Otherwise it mig= ht + // cause problem when it's freed (if HeapGuard is enabled). + // + mOnGuarding =3D TRUE; *TopEntry =3D AllocateZeroPool (sizeof (EFI_GCD_MAP_ENTRY)); + mOnGuarding =3D FALSE; if (*TopEntry =3D=3D NULL) { return EFI_OUT_OF_RESOURCES; } =20 + mOnGuarding =3D TRUE; *BottomEntry =3D AllocateZeroPool (sizeof (EFI_GCD_MAP_ENTRY)); + mOnGuarding =3D FALSE; if (*BottomEntry =3D=3D NULL) { CoreFreePool (*TopEntry); return EFI_OUT_OF_RESOURCES; diff --git a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c b/MdeModulePkg/Core/Dxe/= Mem/HeapGuard.c index ac043b5d9b..fd6aeee8da 100644 --- a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c +++ b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c @@ -70,7 +70,7 @@ SetBits ( StartBit =3D (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address); EndBit =3D (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS; =20 - if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) { + if ((StartBit + BitNumber) >=3D GUARDED_HEAP_MAP_ENTRY_BITS) { Msbs =3D (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) % GUARDED_HEAP_MAP_ENTRY_BITS; Lsbs =3D (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS; @@ -123,7 +123,7 @@ ClearBits ( StartBit =3D (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address); EndBit =3D (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS; =20 - if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) { + if ((StartBit + BitNumber) >=3D GUARDED_HEAP_MAP_ENTRY_BITS) { Msbs =3D (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) % GUARDED_HEAP_MAP_ENTRY_BITS; Lsbs =3D (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS; @@ -188,10 +188,14 @@ GetBits ( Lsbs =3D 0; } =20 - Result =3D RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1= ); - if (Lsbs > 0) { - BitMap +=3D 1; - Result |=3D LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs); + if (StartBit =3D=3D 0 && BitNumber =3D=3D GUARDED_HEAP_MAP_ENTRY_BITS) { + Result =3D *BitMap; + } else { + Result =3D RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1= ); + if (Lsbs > 0) { + BitMap +=3D 1; + Result |=3D LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs); + } } =20 return Result; @@ -576,6 +580,10 @@ SetGuardPage ( IN EFI_PHYSICAL_ADDRESS BaseAddress ) { + if (gCpu =3D=3D NULL) { + return; + } + // // Set flag to make sure allocating memory without GUARD for page table // operation; otherwise infinite loops could be caused. @@ -606,6 +614,10 @@ UnsetGuardPage ( { UINT64 Attributes; =20 + if (gCpu =3D=3D NULL) { + return; + } + // // Once the Guard page is unset, it will be freed back to memory pool. NX // memory protection must be restored for this page if NX is enabled for= free @@ -652,7 +664,7 @@ IsMemoryTypeToGuard ( UINT64 ConfigBit; BOOLEAN InSmm; =20 - if (gCpu =3D=3D NULL || AllocateType =3D=3D AllocateAddress) { + if (AllocateType =3D=3D AllocateAddress) { return FALSE; } =20 @@ -1164,6 +1176,128 @@ CoreConvertPagesWithGuard ( return CoreConvertPages (Start, NumberOfPages, NewType); } =20 +/** + Set all Guard pages which cannot be set before CPU Arch Protocol install= ed. +**/ +VOID +SetAllGuardPages ( + VOID + ) +{ + UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINT64 TableEntry; + UINT64 Address; + UINT64 GuardPage; + INTN Level; + UINTN Index; + BOOLEAN OnGuarding; + + if (mGuardedMemoryMap =3D=3D 0 || + mMapLevel =3D=3D 0 || + mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) { + return; + } + + CopyMem (Entries, mLevelMask, sizeof (Entries)); + CopyMem (Shifts, mLevelShift, sizeof (Shifts)); + + SetMem (Tables, sizeof(Tables), 0); + SetMem (Addresses, sizeof(Addresses), 0); + SetMem (Indices, sizeof(Indices), 0); + + Level =3D GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel; + Tables[Level] =3D mGuardedMemoryMap; + Address =3D 0; + OnGuarding =3D FALSE; + + DEBUG_CODE ( + DumpGuardedMemoryBitmap (); + ); + + while (TRUE) { + if (Indices[Level] > Entries[Level]) { + Tables[Level] =3D 0; + Level -=3D 1; + } else { + + TableEntry =3D ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]]; + Address =3D Addresses[Level]; + + if (TableEntry =3D=3D 0) { + + OnGuarding =3D FALSE; + + } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) { + + Level +=3D 1; + Tables[Level] =3D TableEntry; + Addresses[Level] =3D Address; + Indices[Level] =3D 0; + + continue; + + } else { + + Index =3D 0; + while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) { + if ((TableEntry & 1) =3D=3D 1) { + if (OnGuarding) { + GuardPage =3D 0; + } else { + GuardPage =3D Address - EFI_PAGE_SIZE; + } + OnGuarding =3D TRUE; + } else { + if (OnGuarding) { + GuardPage =3D Address; + } else { + GuardPage =3D 0; + } + OnGuarding =3D FALSE; + } + + if (GuardPage !=3D 0) { + SetGuardPage (GuardPage); + } + + if (TableEntry =3D=3D 0) { + break; + } + + TableEntry =3D RShiftU64 (TableEntry, 1); + Address +=3D EFI_PAGE_SIZE; + Index +=3D 1; + } + } + } + + if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) { + break; + } + + Indices[Level] +=3D 1; + Address =3D (Level =3D=3D 0) ? 0 : Addresses[Level - 1]; + Addresses[Level] =3D Address | LShiftU64(Indices[Level], Shifts[Level]= ); + + } +} + +/** + Notify function used to set all Guard pages before CPU Arch Protocol ins= talled. +**/ +VOID +HeapGuardCpuArchProtocolNotify ( + VOID + ) +{ + ASSERT (gCpu !=3D NULL); + SetAllGuardPages (); +} + /** Helper function to convert a UINT64 value in binary to a string. =20 diff --git a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.h b/MdeModulePkg/Core/Dxe/= Mem/HeapGuard.h index 7208ab1437..8c34692439 100644 --- a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.h +++ b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.h @@ -399,6 +399,14 @@ IsHeapGuardEnabled ( VOID ); =20 +/** + Notify function used to set all Guard pages after CPU Arch Protocol inst= alled. +**/ +VOID +HeapGuardCpuArchProtocolNotify ( + VOID + ); + extern BOOLEAN mOnGuarding; =20 #endif diff --git a/MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c b/MdeModulePkg/C= ore/Dxe/Misc/MemoryProtection.c index 407aece807..2f7e490af1 100644 --- a/MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c +++ b/MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c @@ -1001,6 +1001,11 @@ MemoryProtectionCpuArchProtocolNotify ( InitializeDxeNxMemoryProtectionPolicy (); } =20 + // + // Call notify function meant for Heap Guard. + // + HeapGuardCpuArchProtocolNotify (); + if (mImageProtectionPolicy =3D=3D 0) { return; } diff --git a/MdeModulePkg/Core/PiSmmCore/HeapGuard.c b/MdeModulePkg/Core/Pi= SmmCore/HeapGuard.c index 923af93de2..f9657f9baa 100644 --- a/MdeModulePkg/Core/PiSmmCore/HeapGuard.c +++ b/MdeModulePkg/Core/PiSmmCore/HeapGuard.c @@ -73,7 +73,7 @@ SetBits ( StartBit =3D (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address); EndBit =3D (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS; =20 - if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) { + if ((StartBit + BitNumber) >=3D GUARDED_HEAP_MAP_ENTRY_BITS) { Msbs =3D (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) % GUARDED_HEAP_MAP_ENTRY_BITS; Lsbs =3D (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS; @@ -126,7 +126,7 @@ ClearBits ( StartBit =3D (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address); EndBit =3D (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS; =20 - if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) { + if ((StartBit + BitNumber) >=3D GUARDED_HEAP_MAP_ENTRY_BITS) { Msbs =3D (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) % GUARDED_HEAP_MAP_ENTRY_BITS; Lsbs =3D (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS; @@ -191,10 +191,14 @@ GetBits ( Lsbs =3D 0; } =20 - Result =3D RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1= ); - if (Lsbs > 0) { - BitMap +=3D 1; - Result |=3D LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs); + if (StartBit =3D=3D 0 && BitNumber =3D=3D GUARDED_HEAP_MAP_ENTRY_BITS) { + Result =3D *BitMap; + } else { + Result =3D RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1= ); + if (Lsbs > 0) { + BitMap +=3D 1; + Result |=3D LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs); + } } =20 return Result; --=20 2.16.2.windows.1 _______________________________________________ edk2-devel mailing list edk2-devel@lists.01.org https://lists.01.org/mailman/listinfo/edk2-devel