diff --git a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
index 67a3387ff3c6..2cbd6c12f74d 100644
--- a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
+++ b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
@@ -2,6 +2,7 @@
RISC-V specific functionality for cache.
Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.
+ Copyright (c) 2022, Rivos Inc. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -11,51 +12,130 @@
#include
/**
- RISC-V invalidate instruction cache.
-
+ Use runtime discovery mechanism in future when avalable
+ through https://lists.riscv.org/g/tech-privileged/topic/83853282
**/
+#define RV64_CACHE_BLOCK_SIZE 64
+
+typedef enum{
+ cln,
+ flsh,
+ invd,
+}CACHE_OP;
+
+/* Ideally we should do this through BaseLib.h by adding
+ Asm*CacheLine functions. This can be done after Initial
+ RV refactoring is complete. For now call functions directly
+*/
VOID
-EFIAPI
-RiscVInvalidateInstCacheAsm (
- VOID
+EFIAPI RiscVCpuCacheFlush (
+ UINTN
);
-/**
- RISC-V invalidate data cache.
+VOID
+EFIAPI RiscVCpuCacheClean (
+ UINTN
+ );
-**/
VOID
-EFIAPI
-RiscVInvalidateDataCacheAsm (
- VOID
+EFIAPI RiscVCpuCacheInval (
+ UINTN
);
+
/**
- Invalidates the entire instruction cache in cache coherency domain of the
- calling CPU.
+ Performs required opeartion on cache lines in the cache coherency domain
+ of the calling CPU. If Address is not aligned on a cache line boundary,
+ then entire cache line containing Address is operated. If Address + Length
+ is not aligned on a cache line boundary, then the entire cache line
+ containing Address + Length -1 is operated.
+
+ If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
+
+ @param Address The base address of the cache lines to
+ invalidate. If the CPU is in a physical addressing mode, then
+ Address is a physical address. If the CPU is in a virtual
+ addressing mode, then Address is a virtual address.
+
+ @param Length The number of bytes to invalidate from the instruction cache.
+
+ @return Address.
+
+**/
+
+VOID *
+EFIAPI
+CacheOpCacheRange (
+ IN VOID *Address,
+ IN UINTN Length,
+ IN CACHE_OP op
+ )
+{
+ UINTN CacheLineSize;
+ UINTN Start;
+ UINTN End;
+
+ if (Length == 0) {
+ return Address;
+ }
+
+ ASSERT ((Length - 1) <= (MAX_ADDRESS - (UINTN)Address));
+
+ //
+ // Cache line size is 8 * Bits 15-08 of EBX returned from CPUID 01H
+ //
+ CacheLineSize = RV64_CACHE_BLOCK_SIZE;
+
+ Start = (UINTN)Address;
+ //
+ // Calculate the cache line alignment
+ //
+ End = (Start + Length + (CacheLineSize - 1)) & ~(CacheLineSize - 1);
+ Start &= ~((UINTN)CacheLineSize - 1);
+
+ do {
+ switch (op) {
+ case invd:
+ RiscVCpuCacheInval(Start);
+ break;
+ case flsh:
+ RiscVCpuCacheFlush(Start);
+ break;
+ case cln:
+ RiscVCpuCacheClean(Start);
+ break;
+ default:
+ DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported operation\n"));
+ break;
+ }
+
+ Start = Start + CacheLineSize;
+ } while (Start != End);
+
+ return Address;
+}
+/**
+ RV does not support whole cache invd functionality.
+ There maybe platform level implementation which is
+ outside of this implementation.
**/
+
VOID
EFIAPI
InvalidateInstructionCache (
VOID
)
{
- RiscVInvalidateInstCacheAsm ();
+ DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__));
}
+
/**
- Invalidates a range of instruction cache lines in the cache coherency domain
- of the calling CPU.
-
- Invalidates the instruction cache lines specified by Address and Length. If
- Address is not aligned on a cache line boundary, then entire instruction
- cache line containing Address is invalidated. If Address + Length is not
- aligned on a cache line boundary, then the entire instruction cache line
- containing Address + Length -1 is invalidated. This function may choose to
- invalidate the entire instruction cache if that is more efficient than
- invalidating the specified range. If Length is 0, then no instruction cache
- lines are invalidated. Address is returned.
+ An invalidate operation makes data from store operations performed by a
+ set of non-coherent agents visible to the set of coherent agents at a
+ point common to both sets by deallocating all copies of a cache block
+ from the set of coherent caches up to that point.
If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
@@ -76,20 +156,71 @@ InvalidateInstructionCacheRange (
IN UINTN Length
)
{
- DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__));
+ //RV does not support $I specific operation.
+ CacheOpCacheRange(Address, Length, invd);
return Address;
}
/**
- Writes back and invalidates the entire data cache in cache coherency domain
- of the calling CPU.
+ RV does not support whole cache invd functionality.
+ There maybe platform level implementation which is
+ outside of this implementation.
+**/
+
+VOID
+EFIAPI
+InvalidateDataCache (
+ VOID
+ )
+{
+ DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__));
+}
- Writes back and invalidates the entire data cache in cache coherency domain
- of the calling CPU. This function guarantees that all dirty cache lines are
- written back to system memory, and also invalidates all the data cache lines
- in the cache coherency domain of the calling CPU.
+/**
+ Invalidates a range of data cache lines in the cache coherency domain of the
+ calling CPU.
+ Invalidates the data cache lines specified by Address and Length. If Address
+ is not aligned on a cache line boundary, then entire data cache line
+ containing Address is invalidated. If Address + Length is not aligned on a
+ cache line boundary, then the entire data cache line containing Address +
+ Length -1 is invalidated. This function must never invalidate any cache lines
+ outside the specified range. If Length is 0, then no data cache lines are
+ invalidated. Address is returned. This function must be used with care
+ because dirty cache lines are not written back to system memory. It is
+ typically used for cache diagnostics. If the CPU does not support
+ invalidation of a data cache range, then a write back and invalidate
+ operation should be performed on the data cache range.
+
+ If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
+
+ @param Address The base address of the data cache lines to invalidate. If
+ the CPU is in a physical addressing mode, then Address is a
+ physical address. If the CPU is in a virtual addressing mode,
+ then Address is a virtual address.
+ @param Length The number of bytes to invalidate from the data cache.
+
+ @return Address.
+
+**/
+VOID *
+EFIAPI
+InvalidateDataCacheRange (
+ IN VOID *Address,
+ IN UINTN Length
+ )
+{
+ //RV does not support $D specific operation.
+ CacheOpCacheRange(Address, Length, invd);
+ return Address;
+}
+
+/**
+ RV does not support whole cache Invd functionality.
+ There maybe platform level implementation which is
+ outside of this implementation.
**/
+
VOID
EFIAPI
WriteBackInvalidateDataCache (
@@ -132,20 +263,16 @@ WriteBackInvalidateDataCacheRange (
IN UINTN Length
)
{
- DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__));
+ CacheOpCacheRange(Address, Length, flsh);
return Address;
}
/**
- Writes back the entire data cache in cache coherency domain of the calling
- CPU.
-
- Writes back the entire data cache in cache coherency domain of the calling
- CPU. This function guarantees that all dirty cache lines are written back to
- system memory. This function may also invalidate all the data cache lines in
- the cache coherency domain of the calling CPU.
-
+ RV does not support whole cache WB functionality.
+ There maybe platform level implementation which is
+ outside of this implementation.
**/
+
VOID
EFIAPI
WriteBackDataCache (
@@ -159,15 +286,12 @@ WriteBackDataCache (
Writes back a range of data cache lines in the cache coherency domain of the
calling CPU.
- Writes back the data cache lines specified by Address and Length. If Address
- is not aligned on a cache line boundary, then entire data cache line
- containing Address is written back. If Address + Length is not aligned on a
- cache line boundary, then the entire data cache line containing Address +
- Length -1 is written back. This function may choose to write back the entire
- data cache if that is more efficient than writing back the specified range.
- If Length is 0, then no data cache lines are written back. This function may
- also invalidate all the data cache lines in the specified range of the cache
- coherency domain of the calling CPU. Address is returned.
+ Called clean operation in RV spec, it makes data from store operations
+ performed by the set of coherent agents visible to a set
+ of non-coherent agents at a point common to both sets by performing a write
+ transfer of a copy of a cache block to that point provided a coherent
+ agent performed a store operation that modified the data in the cache block
+ since the previous invalidate, clean, or flush operation on the cache block
If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
@@ -187,64 +311,7 @@ WriteBackDataCacheRange (
IN UINTN Length
)
{
- DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__));
+ CacheOpCacheRange(Address, Length, cln);
return Address;
}
-/**
- Invalidates the entire data cache in cache coherency domain of the calling
- CPU.
-
- Invalidates the entire data cache in cache coherency domain of the calling
- CPU. This function must be used with care because dirty cache lines are not
- written back to system memory. It is typically used for cache diagnostics. If
- the CPU does not support invalidation of the entire data cache, then a write
- back and invalidate operation should be performed on the entire data cache.
-
-**/
-VOID
-EFIAPI
-InvalidateDataCache (
- VOID
- )
-{
- RiscVInvalidateDataCacheAsm ();
-}
-
-/**
- Invalidates a range of data cache lines in the cache coherency domain of the
- calling CPU.
-
- Invalidates the data cache lines specified by Address and Length. If Address
- is not aligned on a cache line boundary, then entire data cache line
- containing Address is invalidated. If Address + Length is not aligned on a
- cache line boundary, then the entire data cache line containing Address +
- Length -1 is invalidated. This function must never invalidate any cache lines
- outside the specified range. If Length is 0, then no data cache lines are
- invalidated. Address is returned. This function must be used with care
- because dirty cache lines are not written back to system memory. It is
- typically used for cache diagnostics. If the CPU does not support
- invalidation of a data cache range, then a write back and invalidate
- operation should be performed on the data cache range.
-
- If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
-
- @param Address The base address of the data cache lines to invalidate. If
- the CPU is in a physical addressing mode, then Address is a
- physical address. If the CPU is in a virtual addressing mode,
- then Address is a virtual address.
- @param Length The number of bytes to invalidate from the data cache.
-
- @return Address.
-
-**/
-VOID *
-EFIAPI
-InvalidateDataCacheRange (
- IN VOID *Address,
- IN UINTN Length
- )
-{
- DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __FUNCTION__));
- return Address;
-}
diff --git a/MdePkg/Library/BaseLib/BaseLib.inf b/MdePkg/Library/BaseLib/BaseLib.inf
index babbee1ca08b..a22f0345f439 100644
--- a/MdePkg/Library/BaseLib/BaseLib.inf
+++ b/MdePkg/Library/BaseLib/BaseLib.inf
@@ -395,12 +395,13 @@
RiscV64/DisableInterrupts.c
RiscV64/EnableInterrupts.c
RiscV64/CpuPause.c
+ RiscV64/CpuCache.c
RiscV64/MemoryFence.S | GCC
RiscV64/RiscVSetJumpLongJump.S | GCC
RiscV64/RiscVCpuBreakpoint.S | GCC
RiscV64/RiscVCpuPause.S | GCC
RiscV64/RiscVInterrupt.S | GCC
- RiscV64/FlushCache.S | GCC
+ RiscV64/RiscVCpuCache.S | GCC
RiscV64/CpuScratch.S | GCC
RiscV64/ReadTimer.S | GCC
diff --git a/MdePkg/Library/BaseLib/RiscV64/CpuCache.c b/MdePkg/Library/BaseLib/RiscV64/CpuCache.c
new file mode 100644
index 000000000000..3c4e81440d06
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/CpuCache.c
@@ -0,0 +1,62 @@
+/** @file
+ CPU Cache Operations for RISC-V
+
+ Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.
+ Copyright (c) 2022, Rivos Inc. All rights reserved.
+
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+**/
+
+#include "BaseLibInternals.h"
+
+extern VOID
+RiscVCpuCacheFlush (
+ UINTN Address
+ );
+
+/**
+ Flush CPU Cacheline as per RV Zicbo spec
+**/
+VOID
+EFIAPI
+CpuCacheFlush (
+ UINTN Address
+ )
+{
+ RiscVCpuCacheFlush (Address);
+}
+
+extern VOID
+RiscVCpuCacheClean (
+ UINTN Address
+ );
+
+/**
+ Clean CPU Cacheline as per RV Zicbo spec
+**/
+VOID
+EFIAPI
+CpuCacheClean (
+ UINTN Address
+ )
+{
+ RiscVCpuCacheClean (Address);
+}
+
+extern VOID
+RiscVCpuCacheInvd (
+ UINTN Address
+ );
+
+/**
+ Invd CPU Cacheline as per RV Zicbo spec
+**/
+VOID
+EFIAPI
+CpuCacheInvd (
+ UINTN Address
+ )
+{
+ RiscVCpuCacheInvd (Address);
+}
diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVCpuCache.S b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuCache.S
new file mode 100644
index 000000000000..661b371e52a8
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuCache.S
@@ -0,0 +1,24 @@
+//------------------------------------------------------------------------------
+//
+// CpuPause for RISC-V
+//
+// Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+//------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(RiscVCpuCacheFlush)
+ASM_PFX(RiscVCpuCacheFlush):
+ cbo.flush (a0)
+ ret
+
+ASM_GLOBAL ASM_PFX(RiscVCpuCacheClean)
+ASM_PFX(RiscVCpuCacheClean):
+ cbo.clean (a0)
+ ret
+
+ASM_GLOBAL ASM_PFX(RiscVCpuCacheInval)
+ASM_PFX(RiscVCpuCacheInval):
+ cbo.inval (a0)
+ ret
diff --git a/OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc b/OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc
index 33c945f57624..b90317fdd3b7 100644
--- a/OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc
+++ b/OvmfPkg/Platforms/RiscVVirt/RiscVVirt.dsc
@@ -44,6 +44,13 @@
DEFINE NETWORK_ISCSI_ENABLE = FALSE
DEFINE NETWORK_ALLOW_HTTP_CONNECTIONS = TRUE
+ #
+ # CMO support for RV. It depends on 2 factors. First support in compiler
+ # GCC:Binutils 2.39 is required. We could make it runtime detection later
+ # using FDT or feature CSR.
+ #
+ DEFINE RV_CMO_FEATURE_AVAILABLE = FALSE
+
[BuildOptions]
GCC:RELEASE_*_*_CC_FLAGS = -DMDEPKG_NDEBUG
!ifdef $(SOURCE_DEBUG_ENABLE)
@@ -80,7 +87,11 @@
CpuLib|MdePkg/Library/BaseCpuLib/BaseCpuLib.inf
PerformanceLib|MdePkg/Library/BasePerformanceLibNull/BasePerformanceLibNull.inf
PeCoffLib|MdePkg/Library/BasePeCoffLib/BasePeCoffLib.inf
+!if $(RV_CMO_FEATURE_AVAILABLE) == FALSE
+ CacheMaintenanceLib|MdePkg/Library/BaseCacheMaintenanceLibNull/BaseCacheMaintenanceLibNull.inf
+!else
CacheMaintenanceLib|MdePkg/Library/BaseCacheMaintenanceLib/BaseCacheMaintenanceLib.inf
+!endif
UefiDecompressLib|MdePkg/Library/BaseUefiDecompressLib/BaseUefiDecompressLib.inf
UefiHiiServicesLib|MdeModulePkg/Library/UefiHiiServicesLib/UefiHiiServicesLib.inf
HiiLib|MdeModulePkg/Library/UefiHiiLib/UefiHiiLib.inf