On 7/23/19 2:48 AM, Laszlo Ersek wrote: > (+ Brijesh, and one comment below) > > On 07/23/19 04:05, Wu, Hao A wrote: >>> -----Original Message----- >>> From: devel@edk2.groups.io [mailto:devel@edk2.groups.io] On Behalf Of Ni, >>> Ray >>> Sent: Monday, July 22, 2019 4:16 PM >>> To: devel@edk2.groups.io >>> Cc: Dong, Eric; Laszlo Ersek >>> Subject: [edk2-devel] [PATCH 4/4] MdeModulePkg/DxeIpl: Create 5-level >>> page table for long mode >>> >>> REF: https://bugzilla.tianocore.org/show_bug.cgi?id=2008 >>> >>> DxeIpl is responsible to create page table for DXE phase running >>> either in long mode or in 32bit mode with certain protection >>> mechanism enabled (refer to ToBuildPageTable()). >>> >>> The patch updates DxeIpl to create 5-level page table for DXE phase >>> running in long mode when PcdUse5LevelPageTable is TRUE and CPU >>> supports 5-level page table. >>> >>> Signed-off-by: Ray Ni <ray...@intel.com> >>> Cc: Eric Dong <eric.d...@intel.com> >>> Cc: Laszlo Ersek <ler...@redhat.com> >>> --- >>> MdeModulePkg/Core/DxeIplPeim/DxeIpl.inf | 1 + >>> .../Core/DxeIplPeim/X64/VirtualMemory.c | 227 ++++++++++++------ >>> 2 files changed, 151 insertions(+), 77 deletions(-) >>> >>> diff --git a/MdeModulePkg/Core/DxeIplPeim/DxeIpl.inf >>> b/MdeModulePkg/Core/DxeIplPeim/DxeIpl.inf >>> index abc3217b01..98bc17fc9d 100644 >>> --- a/MdeModulePkg/Core/DxeIplPeim/DxeIpl.inf >>> +++ b/MdeModulePkg/Core/DxeIplPeim/DxeIpl.inf >>> @@ -110,6 +110,7 @@ [Pcd.IA32,Pcd.X64] >>> >>> gEfiMdeModulePkgTokenSpaceGuid.PcdNullPointerDetectionPropertyMask >>> ## CONSUMES >>> gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPropertyMask >>> ## CONSUMES >>> gEfiMdeModulePkgTokenSpaceGuid.PcdCpuStackGuard ## >>> CONSUMES >>> + gEfiMdeModulePkgTokenSpaceGuid.PcdUse5LevelPageTable ## >>> SOMETIMES_CONSUMES >>> >>> [Pcd.IA32,Pcd.X64,Pcd.ARM,Pcd.AARCH64] >>> gEfiMdeModulePkgTokenSpaceGuid.PcdSetNxForStack ## >>> SOMETIMES_CONSUMES >>> diff --git a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c >>> b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c >>> index edc38e4525..a5bcdcc734 100644 >>> --- a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c >>> +++ b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c >>> @@ -15,7 +15,7 @@ >>> 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume >>> 2:Instruction Set Reference, Intel >>> 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume >>> 3:System Programmer's Guide, Intel >>> >>> -Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR> >>> +Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR> >>> Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR> >>> >>> SPDX-License-Identifier: BSD-2-Clause-Patent >>> @@ -626,14 +626,19 @@ CreateIdentityMappingPageTables ( >>> ) >>> { >>> UINT32 RegEax; >>> + UINT32 RegEbx; >>> + UINT32 RegEcx; >>> UINT32 RegEdx; >>> UINT8 PhysicalAddressBits; >>> EFI_PHYSICAL_ADDRESS PageAddress; >>> + UINTN IndexOfPml5Entries; >>> UINTN IndexOfPml4Entries; >>> UINTN IndexOfPdpEntries; >>> UINTN >>> IndexOfPageDirectoryEntries; >>> + UINT32 NumberOfPml5EntriesNeeded; >>> UINT32 NumberOfPml4EntriesNeeded; >>> UINT32 NumberOfPdpEntriesNeeded; >>> + PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry; >>> PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry; >>> PAGE_MAP_AND_DIRECTORY_POINTER *PageMap; >>> PAGE_MAP_AND_DIRECTORY_POINTER >>> *PageDirectoryPointerEntry; >>> @@ -641,9 +646,11 @@ CreateIdentityMappingPageTables ( >>> UINTN TotalPagesNum; >>> UINTN BigPageAddress; >>> VOID *Hob; >>> + BOOLEAN Page5LevelSupport; >>> BOOLEAN Page1GSupport; >>> PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry; >>> UINT64 AddressEncMask; >>> + IA32_CR4 Cr4; >>> >>> // >>> // Make sure AddressEncMask is contained to smallest supported address >>> field >>> @@ -677,33 +684,66 @@ CreateIdentityMappingPageTables ( >>> } >>> } >>> >>> + Page5LevelSupport = FALSE; >>> + if (PcdGetBool (PcdUse5LevelPageTable)) { >>> + AsmCpuidEx (0x7, 0, &RegEax, &RegEbx, &RegEcx, &RegEdx); >>> + DEBUG ((DEBUG_INFO, "Cpuid(7/0): %08x/%08x/%08x/%08x\n", RegEax, >>> RegEbx, RegEcx, RegEdx)); >>> + if ((RegEcx & BIT16) != 0) { >>> + Page5LevelSupport = TRUE; >>> + } >>> + } >>> + >>> + DEBUG ((DEBUG_INFO, "AddressBits/5LevelPaging/1GPage >>> = %d/%d/%d\n", PhysicalAddressBits, Page5LevelSupport, Page1GSupport)); >>> + >>> // >>> - // IA-32e paging translates 48-bit linear addresses to 52-bit physical >>> addresses. >>> + // IA-32e paging translates 48-bit linear addresses to 52-bit physical >>> addresses >>> + // when 5-Level Paging is disabled, >>> + // due to either unsupported by HW, or disabled by PCD. >>> // >>> ASSERT (PhysicalAddressBits <= 52); >>> - if (PhysicalAddressBits > 48) { >>> + if (!Page5LevelSupport && PhysicalAddressBits > 48) { >>> PhysicalAddressBits = 48; >>> } >>> >>> // >>> // Calculate the table entries needed. >>> // >>> - if (PhysicalAddressBits <= 39 ) { >>> - NumberOfPml4EntriesNeeded = 1; >>> - NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, >>> (PhysicalAddressBits - 30)); >>> - } else { >>> - NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, >>> (PhysicalAddressBits - 39)); >>> - NumberOfPdpEntriesNeeded = 512; >>> + NumberOfPml5EntriesNeeded = 1; >>> + if (PhysicalAddressBits > 48) { >>> + NumberOfPml5EntriesNeeded = (UINT32) LShiftU64 (1, >>> PhysicalAddressBits - 48); >>> + PhysicalAddressBits = 48; >>> } >>> >>> + NumberOfPml4EntriesNeeded = 1; >>> + if (PhysicalAddressBits > 39) { >>> + NumberOfPml4EntriesNeeded = (UINT32) LShiftU64 (1, >>> PhysicalAddressBits - 39); >>> + PhysicalAddressBits = 39; >>> + } >>> + >>> + NumberOfPdpEntriesNeeded = 1; >>> + ASSERT (PhysicalAddressBits > 30); >>> + NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, PhysicalAddressBits >>> - 30); >>> + >>> // >>> // Pre-allocate big pages to avoid later allocations. >>> // >>> if (!Page1GSupport) { >>> - TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * >>> NumberOfPml4EntriesNeeded + 1; >>> + TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * >>> NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1; >>> } else { >>> - TotalPagesNum = NumberOfPml4EntriesNeeded + 1; >>> + TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * >>> NumberOfPml5EntriesNeeded + 1; >>> + } >>> + >>> + // >>> + // Substract the one page occupied by PML5 entries if 5-Level Paging is >>> disabled. >>> + // >>> + if (!Page5LevelSupport) { >>> + TotalPagesNum--; >>> } >>> + >>> + DEBUG ((DEBUG_INFO, "Pml5/Pml4/Pdp/TotalPage = %d/%d/%d/%d\n", >>> + NumberOfPml5EntriesNeeded, NumberOfPml4EntriesNeeded, >>> + NumberOfPdpEntriesNeeded, TotalPagesNum)); >>> + >>> BigPageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum); >>> ASSERT (BigPageAddress != 0); >>> >>> @@ -711,92 +751,125 @@ CreateIdentityMappingPageTables ( >>> // By architecture only one PageMapLevel4 exists - so lets allocate >>> storage >>> for it. >>> // >>> PageMap = (VOID *) BigPageAddress; >>> - BigPageAddress += SIZE_4KB; >>> - >>> - PageMapLevel4Entry = PageMap; >>> - PageAddress = 0; >>> - for (IndexOfPml4Entries = 0; IndexOfPml4Entries < >>> NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, >>> PageMapLevel4Entry++) { >>> + if (Page5LevelSupport) { >>> // >>> - // Each PML4 entry points to a page of Page Directory Pointer entires. >>> - // So lets allocate space for them and fill them in in the >>> IndexOfPdpEntries >>> loop. >>> + // By architecture only one PageMapLevel5 exists - so lets allocate >>> storage >>> for it. >>> // >>> - PageDirectoryPointerEntry = (VOID *) BigPageAddress; >>> - BigPageAddress += SIZE_4KB; >>> + PageMapLevel5Entry = PageMap; >>> + BigPageAddress += SIZE_4KB; >>> + } >>> + PageAddress = 0; >>> >>> + for ( IndexOfPml5Entries = 0 >>> + ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded >>> + ; IndexOfPml5Entries++, PageMapLevel5Entry++) { >>> // >>> - // Make a PML4 Entry >>> + // Each PML5 entry points to a page of PML4 entires. >>> + // So lets allocate space for them and fill them in in the >>> IndexOfPml4Entries loop. >>> + // When 5-Level Paging is disabled, below allocation happens only once. >>> // >>> - PageMapLevel4Entry->Uint64 = >>> (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask; >>> - PageMapLevel4Entry->Bits.ReadWrite = 1; >>> - PageMapLevel4Entry->Bits.Present = 1; >>> + PageMapLevel4Entry = (VOID *) BigPageAddress; >>> + BigPageAddress += SIZE_4KB; >>> >>> - if (Page1GSupport) { >>> - PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry; >>> + if (Page5LevelSupport) { >>> + // >>> + // Make a PML5 Entry >>> + // >>> + PageMapLevel5Entry->Uint64 = (UINT64) (UINTN) PageMapLevel4Entry; >> >> >> Hello, >> >> Do we need to bitwise-or with 'AddressEncMask' here? > > Indeed it crossed my mind how this extension intersects with SEV. > > I didn't ask the question myself because I thought 5-level paging didn't > apply to the AMD processors that supported SEV. But maybe we should > figure that out now regardless. >
We should apply the PAGE_ENC mask while creating the 5-level pgtable entries (similar to what we do today). The current available processor which support SEV does not support 5-level pgtable but I expect things will change in future generation. -Brijesh > Thanks > Laszlo > >> Best Regards, >> Hao Wu >> >> >>> + PageMapLevel5Entry->Bits.ReadWrite = 1; >>> + PageMapLevel5Entry->Bits.Present = 1; >>> + } >>> >>> - for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < >>> 512; >>> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += >>> SIZE_1GB) { >>> - if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, >>> StackSize)) { >>> - Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, >>> StackBase, StackSize); >>> - } else { >>> - // >>> - // Fill in the Page Directory entries >>> - // >>> - PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | >>> AddressEncMask; >>> - PageDirectory1GEntry->Bits.ReadWrite = 1; >>> - PageDirectory1GEntry->Bits.Present = 1; >>> - PageDirectory1GEntry->Bits.MustBe1 = 1; >>> - } >>> - } >>> - } else { >>> - for (IndexOfPdpEntries = 0; IndexOfPdpEntries < >>> NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, >>> PageDirectoryPointerEntry++) { >>> - // >>> - // Each Directory Pointer entries points to a page of Page >>> Directory >>> entires. >>> - // So allocate space for them and fill them in in the >>> IndexOfPageDirectoryEntries loop. >>> - // >>> - PageDirectoryEntry = (VOID *) BigPageAddress; >>> - BigPageAddress += SIZE_4KB; >>> + for ( IndexOfPml4Entries = 0 >>> + ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? >>> NumberOfPml4EntriesNeeded : 512) >>> + ; IndexOfPml4Entries++, PageMapLevel4Entry++) { >>> + // >>> + // Each PML4 entry points to a page of Page Directory Pointer >>> entires. >>> + // So lets allocate space for them and fill them in in the >>> IndexOfPdpEntries loop. >>> + // >>> + PageDirectoryPointerEntry = (VOID *) BigPageAddress; >>> + BigPageAddress += SIZE_4KB; >>> >>> - // >>> - // Fill in a Page Directory Pointer Entries >>> - // >>> - PageDirectoryPointerEntry->Uint64 = >>> (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask; >>> - PageDirectoryPointerEntry->Bits.ReadWrite = 1; >>> - PageDirectoryPointerEntry->Bits.Present = 1; >>> + // >>> + // Make a PML4 Entry >>> + // >>> + PageMapLevel4Entry->Uint64 = >>> (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask; >>> + PageMapLevel4Entry->Bits.ReadWrite = 1; >>> + PageMapLevel4Entry->Bits.Present = 1; >>> >>> - for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < >>> 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += >>> SIZE_2MB) { >>> - if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, >>> StackSize)) { >>> - // >>> - // Need to split this 2M page that covers NULL or stack range. >>> - // >>> - Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, >>> StackBase, StackSize); >>> + if (Page1GSupport) { >>> + PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry; >>> + >>> + for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < >>> 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress >>> += SIZE_1GB) { >>> + if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, >>> StackSize)) { >>> + Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, >>> StackBase, StackSize); >>> } else { >>> // >>> // Fill in the Page Directory entries >>> // >>> - PageDirectoryEntry->Uint64 = (UINT64)PageAddress | >>> AddressEncMask; >>> - PageDirectoryEntry->Bits.ReadWrite = 1; >>> - PageDirectoryEntry->Bits.Present = 1; >>> - PageDirectoryEntry->Bits.MustBe1 = 1; >>> + PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | >>> AddressEncMask; >>> + PageDirectory1GEntry->Bits.ReadWrite = 1; >>> + PageDirectory1GEntry->Bits.Present = 1; >>> + PageDirectory1GEntry->Bits.MustBe1 = 1; >>> } >>> } >>> - } >>> + } else { >>> + for ( IndexOfPdpEntries = 0 >>> + ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? >>> NumberOfPdpEntriesNeeded : 512) >>> + ; IndexOfPdpEntries++, PageDirectoryPointerEntry++) { >>> + // >>> + // Each Directory Pointer entries points to a page of Page >>> Directory >>> entires. >>> + // So allocate space for them and fill them in in the >>> IndexOfPageDirectoryEntries loop. >>> + // >>> + PageDirectoryEntry = (VOID *) BigPageAddress; >>> + BigPageAddress += SIZE_4KB; >>> >>> - for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, >>> PageDirectoryPointerEntry++) { >>> - ZeroMem ( >>> - PageDirectoryPointerEntry, >>> - sizeof(PAGE_MAP_AND_DIRECTORY_POINTER) >>> - ); >>> + // >>> + // Fill in a Page Directory Pointer Entries >>> + // >>> + PageDirectoryPointerEntry->Uint64 = >>> (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask; >>> + PageDirectoryPointerEntry->Bits.ReadWrite = 1; >>> + PageDirectoryPointerEntry->Bits.Present = 1; >>> + >>> + for (IndexOfPageDirectoryEntries = 0; >>> IndexOfPageDirectoryEntries < >>> 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += >>> SIZE_2MB) { >>> + if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, >>> StackSize)) { >>> + // >>> + // Need to split this 2M page that covers NULL or stack >>> range. >>> + // >>> + Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, >>> StackBase, StackSize); >>> + } else { >>> + // >>> + // Fill in the Page Directory entries >>> + // >>> + PageDirectoryEntry->Uint64 = (UINT64)PageAddress | >>> AddressEncMask; >>> + PageDirectoryEntry->Bits.ReadWrite = 1; >>> + PageDirectoryEntry->Bits.Present = 1; >>> + PageDirectoryEntry->Bits.MustBe1 = 1; >>> + } >>> + } >>> + } >>> + >>> + // >>> + // Fill with null entry for unused PDPTE >>> + // >>> + ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * >>> sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)); >>> } >>> } >>> + >>> + // >>> + // For the PML4 entries we are not using fill in a null entry. >>> + // >>> + ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof >>> (PAGE_MAP_AND_DIRECTORY_POINTER)); >>> } >>> >>> - // >>> - // For the PML4 entries we are not using fill in a null entry. >>> - // >>> - for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, >>> PageMapLevel4Entry++) { >>> - ZeroMem ( >>> - PageMapLevel4Entry, >>> - sizeof (PAGE_MAP_AND_DIRECTORY_POINTER) >>> - ); >>> + if (Page5LevelSupport) { >>> + Cr4.UintN = AsmReadCr4 (); >>> + Cr4.Bits.LA57 = 1; >>> + AsmWriteCr4 (Cr4.UintN); >>> + // >>> + // For the PML5 entries we are not using fill in a null entry. >>> + // >>> + ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof >>> (PAGE_MAP_AND_DIRECTORY_POINTER)); >>> } >>> >>> // >>> -- >>> 2.21.0.windows.1 >>> >>> >>> >> > -=-=-=-=-=-=-=-=-=-=-=- Groups.io Links: You receive all messages sent to this group. View/Reply Online (#44320): https://edk2.groups.io/g/devel/message/44320 Mute This Topic: https://groups.io/mt/32556535/21656 Group Owner: devel+ow...@edk2.groups.io Unsubscribe: https://edk2.groups.io/g/devel/unsub [arch...@mail-archive.com] -=-=-=-=-=-=-=-=-=-=-=-