From a3c44dd9db8240c8ad986f8856bf7cf41891e150 Mon Sep 17 00:00:00 2001 From: Dhaval Sharma Date: Wed, 26 Oct 2022 15:29:51 +0530 Subject: [PATCH 1/5] OvmfPkg: RiscVVirt: Add switch to enable CMO support Adding support for build time switch to select CMO support Requires GCC Binutils version 2.39 onwards and CPU support Cc: Sunil V L Signed-off-by: Dhaval --- OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc b/OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc index 33c945f57624..b90317fdd3b7 100644 --- a/OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc +++ b/OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc @@ -44,6 +44,13 @@ DEFINE NETWORK_ISCSI_ENABLE = FALSE DEFINE NETWORK_ALLOW_HTTP_CONNECTIONS = TRUE + # + # CMO support for RV. It depends on 2 factors. First support in compiler + # GCC:Binutils 2.39 is required. We could make it runtime detection later + # using FDT or feature CSR. + # + DEFINE RV_CMO_FEATURE_AVAILABLE = FALSE + [BuildOptions] GCC:RELEASE_*_*_CC_FLAGS = -DMDEPKG_NDEBUG !ifdef $(SOURCE_DEBUG_ENABLE) @@ -80,7 +87,11 @@ CpuLib|MdePkg/Library/BaseCpuLib/BaseCpuLib.inf PerformanceLib|MdePkg/Library/BasePerformanceLibNull/BasePerformanceLibNull.inf PeCoffLib|MdePkg/Library/BasePeCoffLib/BasePeCoffLib.inf +!if $(RV_CMO_FEATURE_AVAILABLE) == FALSE + CacheMaintenanceLib|MdePkg/Library/BaseCacheMaintenanceLibNull/BaseCacheMaintenanceLibNull.inf +!else CacheMaintenanceLib|MdePkg/Library/BaseCacheMaintenanceLib/BaseCacheMaintenanceLib.inf +!endif UefiDecompressLib|MdePkg/Library/BaseUefiDecompressLib/BaseUefiDecompressLib.inf UefiHiiServicesLib|MdeModulePkg/Library/UefiHiiServicesLib/UefiHiiServicesLib.inf HiiLib|MdeModulePkg/Library/UefiHiiLib/UefiHiiLib.inf From 9532d101f57de535c8df2ebc44d0126c81fd73de Mon Sep 17 00:00:00 2001 From: Dhaval Sharma Date: Wed, 26 Oct 2022 15:33:45 +0530 Subject: [PATCH 2/5] MdePkg/BaseCacheMaintenanceLib: Add CMO support Implement CMO according to RiscV Zicbo ext specifications Cc: Sunil V L Signed-off-by: Dhaval --- .../BaseCacheMaintenanceLib/RiscVCache.c | 276 +++++++++++------- 1 file changed, 166 insertions(+), 110 deletions(-) diff --git a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c index 67a3387ff3c6..81bf77d875d4 100644 --- a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c +++ b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c @@ -2,6 +2,7 @@ RISC-V specific functionality for cache. Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.
+ Copyright (c) 2022, Rivos Inc. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent **/ @@ -11,51 +12,118 @@ #include /** - RISC-V invalidate instruction cache. - + Use runtime discovery mechanism in future when avalable + through https://lists.riscv.org/g/tech-privileged/topic/83853282 **/ -VOID -EFIAPI -RiscVInvalidateInstCacheAsm ( - VOID - ); +#define RV64_CACHE_BLOCK_SIZE 64 + +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +//asm volatile("nop") + +#define CMO_OP(_op, _start) \ +asm volatile("cbo." __stringify(_op) " (%0)" :: "r" (Start)) + +typedef enum{ + cln, + flsh, + invd, +}CACHE_OP; /** - RISC-V invalidate data cache. + Performs required opeartion on cache lines in the cache coherency domain + of the calling CPU. If Address is not aligned on a cache line boundary, + then entire cache line containing Address is operated. If Address + Length + is not aligned on a cache line boundary, then the entire cache line + containing Address + Length -1 is operated. + + If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT(). + + @param Address The base address of the cache lines to + invalidate. If the CPU is in a physical addressing mode, then + Address is a physical address. If the CPU is in a virtual + addressing mode, then Address is a virtual address. + + @param Length The number of bytes to invalidate from the instruction cache. + + @return Address. **/ -VOID + +VOID * EFIAPI -RiscVInvalidateDataCacheAsm ( - VOID - ); +CacheOpCacheRange ( + IN VOID *Address, + IN UINTN Length, + IN CACHE_OP op + ) +{ + UINTN CacheLineSize; + UINTN Start; + UINTN End; + + if (Length == 0) { + return Address; + } + + ASSERT ((Length - 1) <= (MAX_ADDRESS - (UINTN)Address)); + + // + // Cache line size is 8 * Bits 15-08 of EBX returned from CPUID 01H + // + CacheLineSize = RV64_CACHE_BLOCK_SIZE; + + Start = (UINTN)Address; + // + // Calculate the cache line alignment + // + End = (Start + Length + (CacheLineSize - 1)) & ~(CacheLineSize - 1); + Start &= ~((UINTN)CacheLineSize - 1); + + do { + switch (op) { + case invd: + CMO_OP(inval, Start); + break; + case flsh: + CMO_OP(flush, Start); + break; + case cln: + CMO_OP(clean, Start); + break; + default: + DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported operation\n")); + break; + } + + Start = Start + CacheLineSize; + } while (Start != End); -/** - Invalidates the entire instruction cache in cache coherency domain of the - calling CPU. + return Address; +} +/** + RV does not support whole cache invd functionality. + There maybe platform level implementation which is + outside of this implementation. **/ + VOID EFIAPI InvalidateInstructionCache ( VOID ) { - RiscVInvalidateInstCacheAsm (); + DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__)); } + /** - Invalidates a range of instruction cache lines in the cache coherency domain - of the calling CPU. - - Invalidates the instruction cache lines specified by Address and Length. If - Address is not aligned on a cache line boundary, then entire instruction - cache line containing Address is invalidated. If Address + Length is not - aligned on a cache line boundary, then the entire instruction cache line - containing Address + Length -1 is invalidated. This function may choose to - invalidate the entire instruction cache if that is more efficient than - invalidating the specified range. If Length is 0, then no instruction cache - lines are invalidated. Address is returned. + An invalidate operation makes data from store operations performed by a + set of non-coherent agents visible to the set of coherent agents at a + point common to both sets by deallocating all copies of a cache block + from the set of coherent caches up to that point. If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT(). @@ -76,20 +144,71 @@ InvalidateInstructionCacheRange ( IN UINTN Length ) { - DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__)); + //RV does not support $I specific operation. + CacheOpCacheRange(Address, Length, invd); return Address; } /** - Writes back and invalidates the entire data cache in cache coherency domain - of the calling CPU. + RV does not support whole cache invd functionality. + There maybe platform level implementation which is + outside of this implementation. +**/ + +VOID +EFIAPI +InvalidateDataCache ( + VOID + ) +{ + DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__)); +} + +/** + Invalidates a range of data cache lines in the cache coherency domain of the + calling CPU. + + Invalidates the data cache lines specified by Address and Length. If Address + is not aligned on a cache line boundary, then entire data cache line + containing Address is invalidated. If Address + Length is not aligned on a + cache line boundary, then the entire data cache line containing Address + + Length -1 is invalidated. This function must never invalidate any cache lines + outside the specified range. If Length is 0, then no data cache lines are + invalidated. Address is returned. This function must be used with care + because dirty cache lines are not written back to system memory. It is + typically used for cache diagnostics. If the CPU does not support + invalidation of a data cache range, then a write back and invalidate + operation should be performed on the data cache range. + + If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT(). + + @param Address The base address of the data cache lines to invalidate. If + the CPU is in a physical addressing mode, then Address is a + physical address. If the CPU is in a virtual addressing mode, + then Address is a virtual address. + @param Length The number of bytes to invalidate from the data cache. + + @return Address. - Writes back and invalidates the entire data cache in cache coherency domain - of the calling CPU. This function guarantees that all dirty cache lines are - written back to system memory, and also invalidates all the data cache lines - in the cache coherency domain of the calling CPU. +**/ +VOID * +EFIAPI +InvalidateDataCacheRange ( + IN VOID *Address, + IN UINTN Length + ) +{ + //RV does not support $D specific operation. + CacheOpCacheRange(Address, Length, invd); + return Address; +} +/** + RV does not support whole cache Invd functionality. + There maybe platform level implementation which is + outside of this implementation. **/ + VOID EFIAPI WriteBackInvalidateDataCache ( @@ -132,20 +251,17 @@ WriteBackInvalidateDataCacheRange ( IN UINTN Length ) { - DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__)); + //Note: For this to work + CacheOpCacheRange(Address, Length, flsh); return Address; } /** - Writes back the entire data cache in cache coherency domain of the calling - CPU. - - Writes back the entire data cache in cache coherency domain of the calling - CPU. This function guarantees that all dirty cache lines are written back to - system memory. This function may also invalidate all the data cache lines in - the cache coherency domain of the calling CPU. - + RV does not support whole cache WB functionality. + There maybe platform level implementation which is + outside of this implementation. **/ + VOID EFIAPI WriteBackDataCache ( @@ -159,15 +275,12 @@ WriteBackDataCache ( Writes back a range of data cache lines in the cache coherency domain of the calling CPU. - Writes back the data cache lines specified by Address and Length. If Address - is not aligned on a cache line boundary, then entire data cache line - containing Address is written back. If Address + Length is not aligned on a - cache line boundary, then the entire data cache line containing Address + - Length -1 is written back. This function may choose to write back the entire - data cache if that is more efficient than writing back the specified range. - If Length is 0, then no data cache lines are written back. This function may - also invalidate all the data cache lines in the specified range of the cache - coherency domain of the calling CPU. Address is returned. + Called clean operation in RV spec, it makes data from store operations + performed by the set of coherent agents visible to a set + of non-coherent agents at a point common to both sets by performing a write + transfer of a copy of a cache block to that point provided a coherent + agent performed a store operation that modified the data in the cache block + since the previous invalidate, clean, or flush operation on the cache block If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT(). @@ -187,64 +300,7 @@ WriteBackDataCacheRange ( IN UINTN Length ) { - DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__)); + CacheOpCacheRange(Address, Length, cln); return Address; } -/** - Invalidates the entire data cache in cache coherency domain of the calling - CPU. - - Invalidates the entire data cache in cache coherency domain of the calling - CPU. This function must be used with care because dirty cache lines are not - written back to system memory. It is typically used for cache diagnostics. If - the CPU does not support invalidation of the entire data cache, then a write - back and invalidate operation should be performed on the entire data cache. - -**/ -VOID -EFIAPI -InvalidateDataCache ( - VOID - ) -{ - RiscVInvalidateDataCacheAsm (); -} - -/** - Invalidates a range of data cache lines in the cache coherency domain of the - calling CPU. - - Invalidates the data cache lines specified by Address and Length. If Address - is not aligned on a cache line boundary, then entire data cache line - containing Address is invalidated. If Address + Length is not aligned on a - cache line boundary, then the entire data cache line containing Address + - Length -1 is invalidated. This function must never invalidate any cache lines - outside the specified range. If Length is 0, then no data cache lines are - invalidated. Address is returned. This function must be used with care - because dirty cache lines are not written back to system memory. It is - typically used for cache diagnostics. If the CPU does not support - invalidation of a data cache range, then a write back and invalidate - operation should be performed on the data cache range. - - If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT(). - - @param Address The base address of the data cache lines to invalidate. If - the CPU is in a physical addressing mode, then Address is a - physical address. If the CPU is in a virtual addressing mode, - then Address is a virtual address. - @param Length The number of bytes to invalidate from the data cache. - - @return Address. - -**/ -VOID * -EFIAPI -InvalidateDataCacheRange ( - IN VOID *Address, - IN UINTN Length - ) -{ - DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__)); - return Address; -} From f6cfd5ec695f3a45943a3338ac7c57534ceb45de Mon Sep 17 00:00:00 2001 From: Dhaval Sharma Date: Wed, 26 Oct 2022 21:58:58 +0530 Subject: [PATCH 3/5] MdePkg/BaseCacheMaintenanceLib: Fix Patch Check Issues Fixed minor patch check issues Cc: Sunil V L Signed-off-by: Dhaval --- MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c index 81bf77d875d4..d6ec0c1200f9 100644 --- a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c +++ b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c @@ -20,9 +20,7 @@ #define __stringify_1(x...) #x #define __stringify(x...) __stringify_1(x) -//asm volatile("nop") - -#define CMO_OP(_op, _start) \ +#define CMO_OP(_op, _start)\ asm volatile("cbo." __stringify(_op) " (%0)" :: "r" (Start)) typedef enum{ @@ -251,7 +249,6 @@ WriteBackInvalidateDataCacheRange ( IN UINTN Length ) { - //Note: For this to work CacheOpCacheRange(Address, Length, flsh); return Address; } From c7c7cb1c92a809331661975102a4f2a713733915 Mon Sep 17 00:00:00 2001 From: Dhaval Sharma Date: Thu, 10 Nov 2022 23:30:20 +0530 Subject: [PATCH 4/5] MdePkg/BaseCacheMaintenanceLib: Add CMO support Implement CMO according to RiscV Zicbo ext specifications Cc: Sunil V L Signed-off-by: Dhaval --- .../BaseCacheMaintenanceLib/RiscVCache.c | 32 +++++--- MdePkg/Library/BaseLib/BaseLib.inf | 3 +- MdePkg/Library/BaseLib/RiscV64/CpuCache.c | 74 +++++++++++++++++++ .../Library/BaseLib/RiscV64/RiscVCpuCache.S | 24 ++++++ 4 files changed, 123 insertions(+), 10 deletions(-) create mode 100644 MdePkg/Library/BaseLib/RiscV64/CpuCache.c create mode 100644 MdePkg/Library/BaseLib/RiscV64/RiscVCpuCache.S diff --git a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c index d6ec0c1200f9..2cbd6c12f74d 100644 --- a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c +++ b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c @@ -17,18 +17,32 @@ **/ #define RV64_CACHE_BLOCK_SIZE 64 -#define __stringify_1(x...) #x -#define __stringify(x...) __stringify_1(x) - -#define CMO_OP(_op, _start)\ -asm volatile("cbo." __stringify(_op) " (%0)" :: "r" (Start)) - typedef enum{ cln, flsh, invd, }CACHE_OP; +/* Ideally we should do this through BaseLib.h by adding + Asm*CacheLine functions. This can be done after Initial + RV refactoring is complete. For now call functions directly +*/ +VOID +EFIAPI RiscVCpuCacheFlush ( + UINTN + ); + +VOID +EFIAPI RiscVCpuCacheClean ( + UINTN + ); + +VOID +EFIAPI RiscVCpuCacheInval ( + UINTN + ); + + /** Performs required opeartion on cache lines in the cache coherency domain of the calling CPU. If Address is not aligned on a cache line boundary, @@ -82,13 +96,13 @@ CacheOpCacheRange ( do { switch (op) { case invd: - CMO_OP(inval, Start); + RiscVCpuCacheInval(Start); break; case flsh: - CMO_OP(flush, Start); + RiscVCpuCacheFlush(Start); break; case cln: - CMO_OP(clean, Start); + RiscVCpuCacheClean(Start); break; default: DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported operation\n")); diff --git a/MdePkg/Library/BaseLib/BaseLib.inf b/MdePkg/Library/BaseLib/BaseLib.inf index babbee1ca08b..a22f0345f439 100644 --- a/MdePkg/Library/BaseLib/BaseLib.inf +++ b/MdePkg/Library/BaseLib/BaseLib.inf @@ -395,12 +395,13 @@ RiscV64/DisableInterrupts.c RiscV64/EnableInterrupts.c RiscV64/CpuPause.c + RiscV64/CpuCache.c RiscV64/MemoryFence.S | GCC RiscV64/RiscVSetJumpLongJump.S | GCC RiscV64/RiscVCpuBreakpoint.S | GCC RiscV64/RiscVCpuPause.S | GCC RiscV64/RiscVInterrupt.S | GCC - RiscV64/FlushCache.S | GCC + RiscV64/RiscVCpuCache.S | GCC RiscV64/CpuScratch.S | GCC RiscV64/ReadTimer.S | GCC diff --git a/MdePkg/Library/BaseLib/RiscV64/CpuCache.c b/MdePkg/Library/BaseLib/RiscV64/CpuCache.c new file mode 100644 index 000000000000..8f1be619a2a5 --- /dev/null +++ b/MdePkg/Library/BaseLib/RiscV64/CpuCache.c @@ -0,0 +1,74 @@ +/** @file + CPU pause for RISC-V + + Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.
+ Copyright (c) 2022, Rivos Inc. All rights reserved.
+ + + SPDX-License-Identifier: BSD-2-Clause-Patent +**/ + +#include "BaseLibInternals.h" + +extern VOID +RiscVCpuCacheFlush ( + UINTN Address + ); + +/** + Requests CPU to pause for a short period of time. + + Requests CPU to pause for a short period of time. Typically used in MP + systems to prevent memory starvation while waiting for a spin lock. + +**/ +VOID +EFIAPI +CpuCacheFlush ( + UINTN Address + ) +{ + RiscVCpuCacheFlush (Address); +} + +extern VOID +RiscVCpuCacheClean ( + UINTN Address + ); + +/** + Requests CPU to pause for a short period of time. + + Requests CPU to pause for a short period of time. Typically used in MP + systems to prevent memory starvation while waiting for a spin lock. + +**/ +VOID +EFIAPI +CpuCacheClean ( + UINTN Address + ) +{ + RiscVCpuCacheClean (Address); +} + +extern VOID +RiscVCpuCacheInvd ( + UINTN Address + ); + +/** + Requests CPU to pause for a short period of time. + + Requests CPU to pause for a short period of time. Typically used in MP + systems to prevent memory starvation while waiting for a spin lock. + +**/ +VOID +EFIAPI +CpuCacheInvd ( + UINTN Address + ) +{ + RiscVCpuCacheInvd (Address); +} diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVCpuCache.S b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuCache.S new file mode 100644 index 000000000000..661b371e52a8 --- /dev/null +++ b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuCache.S @@ -0,0 +1,24 @@ +//------------------------------------------------------------------------------ +// +// CpuPause for RISC-V +// +// Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.
+// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// +//------------------------------------------------------------------------------ + +ASM_GLOBAL ASM_PFX(RiscVCpuCacheFlush) +ASM_PFX(RiscVCpuCacheFlush): + cbo.flush (a0) + ret + +ASM_GLOBAL ASM_PFX(RiscVCpuCacheClean) +ASM_PFX(RiscVCpuCacheClean): + cbo.clean (a0) + ret + +ASM_GLOBAL ASM_PFX(RiscVCpuCacheInval) +ASM_PFX(RiscVCpuCacheInval): + cbo.inval (a0) + ret From 123bab4ec86222a1550e3f1a7e3644e79aa0c97d Mon Sep 17 00:00:00 2001 From: Dhaval Sharma Date: Thu, 10 Nov 2022 23:41:26 +0530 Subject: [PATCH 5/5] MdePkg: Fix comments for functions Fix comments for CMO functions Cc: Sunil V L Signed-off-by: Dhaval --- MdePkg/Library/BaseLib/RiscV64/CpuCache.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/MdePkg/Library/BaseLib/RiscV64/CpuCache.c b/MdePkg/Library/BaseLib/RiscV64/CpuCache.c index 8f1be619a2a5..3c4e81440d06 100644 --- a/MdePkg/Library/BaseLib/RiscV64/CpuCache.c +++ b/MdePkg/Library/BaseLib/RiscV64/CpuCache.c @@ -1,5 +1,5 @@ /** @file - CPU pause for RISC-V + CPU Cache Operations for RISC-V Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.
Copyright (c) 2022, Rivos Inc. All rights reserved.
@@ -16,11 +16,7 @@ RiscVCpuCacheFlush ( ); /** - Requests CPU to pause for a short period of time. - - Requests CPU to pause for a short period of time. Typically used in MP - systems to prevent memory starvation while waiting for a spin lock. - + Flush CPU Cacheline as per RV Zicbo spec **/ VOID EFIAPI @@ -37,11 +33,7 @@ RiscVCpuCacheClean ( ); /** - Requests CPU to pause for a short period of time. - - Requests CPU to pause for a short period of time. Typically used in MP - systems to prevent memory starvation while waiting for a spin lock. - + Clean CPU Cacheline as per RV Zicbo spec **/ VOID EFIAPI @@ -58,11 +50,7 @@ RiscVCpuCacheInvd ( ); /** - Requests CPU to pause for a short period of time. - - Requests CPU to pause for a short period of time. Typically used in MP - systems to prevent memory starvation while waiting for a spin lock. - + Invd CPU Cacheline as per RV Zicbo spec **/ VOID EFIAPI