From: AbnerChang <[email protected]>

 The library instances of RISC-V MDE implementations.
  
 Contributed-under: TianoCore Contribution Agreement 1.0
 
 Signed-off-by: Abner Chang<[email protected]>

---
 .../BaseCacheMaintenanceLib.inf                    |   4 +
 .../Library/BaseCacheMaintenanceLib/RiscVCache.c   | 228 +++++++++++
 MdePkg/Library/BaseCpuLib/BaseCpuLib.inf           |   4 +
 MdePkg/Library/BaseCpuLib/RiscV/Cpu.s              |  25 ++
 .../BaseIoLibIntrinsic/BaseIoLibIntrinsic.inf      |   8 +-
 MdePkg/Library/BaseIoLibIntrinsic/IoLibRiscV.c     | 418 +++++++++++++++++++++
 MdePkg/Library/BaseLib/BaseLib.inf                 |  14 +
 MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c     |  33 ++
 MdePkg/Library/BaseLib/RiscV64/CpuPause.c          |  35 ++
 MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c |  30 ++
 MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c  |  31 ++
 MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c |  42 +++
 .../Library/BaseLib/RiscV64/InternalSwitchStack.c  |  61 +++
 MdePkg/Library/BaseLib/RiscV64/LongJump.c          |  38 ++
 .../Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S   |  20 +
 MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S     |  20 +
 MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S    |  33 ++
 .../Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S |  63 ++++
 MdePkg/Library/BaseLib/RiscV64/Unaligned.c         | 270 +++++++++++++
 MdePkg/Library/BasePeCoffLib/BasePeCoff.c          |   3 +-
 MdePkg/Library/BasePeCoffLib/BasePeCoffLib.inf     |   5 +
 MdePkg/Library/BasePeCoffLib/BasePeCoffLib.uni     |   2 +
 .../Library/BasePeCoffLib/BasePeCoffLibInternals.h |   1 +
 .../Library/BasePeCoffLib/RiscV/PeCoffLoaderEx.c   | 161 ++++++++
 .../BaseSynchronizationLib.inf                     |   5 +
 .../RiscV64/Synchronization.c                      | 147 ++++++++
 26 files changed, 1698 insertions(+), 3 deletions(-)
 create mode 100644 MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
 create mode 100644 MdePkg/Library/BaseCpuLib/RiscV/Cpu.s
 create mode 100644 MdePkg/Library/BaseIoLibIntrinsic/IoLibRiscV.c
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/CpuPause.c
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/InternalSwitchStack.c
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/LongJump.c
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S
 create mode 100644 MdePkg/Library/BaseLib/RiscV64/Unaligned.c
 create mode 100644 MdePkg/Library/BasePeCoffLib/RiscV/PeCoffLoaderEx.c
 create mode 100644 
MdePkg/Library/BaseSynchronizationLib/RiscV64/Synchronization.c

diff --git a/MdePkg/Library/BaseCacheMaintenanceLib/BaseCacheMaintenanceLib.inf 
b/MdePkg/Library/BaseCacheMaintenanceLib/BaseCacheMaintenanceLib.inf
index d659161..dcb6043 100644
--- a/MdePkg/Library/BaseCacheMaintenanceLib/BaseCacheMaintenanceLib.inf
+++ b/MdePkg/Library/BaseCacheMaintenanceLib/BaseCacheMaintenanceLib.inf
@@ -6,6 +6,7 @@
 #
 #  Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
 #  Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+#  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
 #
 #  This program and the accompanying materials
 #  are licensed and made available under the terms and conditions of the BSD 
License
@@ -49,6 +50,9 @@
 [Sources.AARCH64]
   ArmCache.c
 
+[Sources.RISCV64]
+  RiscVCache.c
+
 [Packages]
   MdePkg/MdePkg.dec
 
diff --git a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c 
b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
new file mode 100644
index 0000000..27b4c35
--- /dev/null
+++ b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
@@ -0,0 +1,228 @@
+/** @file
+  RISC-V specific functionality for cache.
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include <Base.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+
+/**
+  Invalidates the entire instruction cache in cache coherency domain of the
+  calling CPU.
+
+**/
+VOID
+EFIAPI
+InvalidateInstructionCache (
+  VOID
+  )
+{
+}
+
+/**
+  Invalidates a range of instruction cache lines in the cache coherency domain
+  of the calling CPU.
+
+  Invalidates the instruction cache lines specified by Address and Length. If
+  Address is not aligned on a cache line boundary, then entire instruction
+  cache line containing Address is invalidated. If Address + Length is not
+  aligned on a cache line boundary, then the entire instruction cache line
+  containing Address + Length -1 is invalidated. This function may choose to
+  invalidate the entire instruction cache if that is more efficient than
+  invalidating the specified range. If Length is 0, then no instruction cache
+  lines are invalidated. Address is returned.
+
+  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
+
+  @param  Address The base address of the instruction cache lines to
+                  invalidate. If the CPU is in a physical addressing mode, then
+                  Address is a physical address. If the CPU is in a virtual
+                  addressing mode, then Address is a virtual address.
+
+  @param  Length  The number of bytes to invalidate from the instruction cache.
+
+  @return Address.
+
+**/
+VOID *
+EFIAPI
+InvalidateInstructionCacheRange (
+  IN      VOID                      *Address,
+  IN      UINTN                     Length
+  )
+{
+  return Address;
+}
+
+/**
+  Writes back and invalidates the entire data cache in cache coherency domain
+  of the calling CPU.
+
+  Writes back and invalidates the entire data cache in cache coherency domain
+  of the calling CPU. This function guarantees that all dirty cache lines are
+  written back to system memory, and also invalidates all the data cache lines
+  in the cache coherency domain of the calling CPU.
+
+**/
+VOID
+EFIAPI
+WriteBackInvalidateDataCache (
+  VOID
+  )
+{
+}
+
+/**
+  Writes back and invalidates a range of data cache lines in the cache
+  coherency domain of the calling CPU.
+
+  Writes back and invalidates the data cache lines specified by Address and
+  Length. If Address is not aligned on a cache line boundary, then entire data
+  cache line containing Address is written back and invalidated. If Address +
+  Length is not aligned on a cache line boundary, then the entire data cache
+  line containing Address + Length -1 is written back and invalidated. This
+  function may choose to write back and invalidate the entire data cache if
+  that is more efficient than writing back and invalidating the specified
+  range. If Length is 0, then no data cache lines are written back and
+  invalidated. Address is returned.
+
+  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
+
+  @param  Address The base address of the data cache lines to write back and
+                  invalidate. If the CPU is in a physical addressing mode, then
+                  Address is a physical address. If the CPU is in a virtual
+                  addressing mode, then Address is a virtual address.
+  @param  Length  The number of bytes to write back and invalidate from the
+                  data cache.
+
+  @return Address of cache invalidation.
+
+**/
+VOID *
+EFIAPI
+WriteBackInvalidateDataCacheRange (
+  IN      VOID                      *Address,
+  IN      UINTN                     Length
+  )
+{
+  return Address;
+}
+
+/**
+  Writes back the entire data cache in cache coherency domain of the calling
+  CPU.
+
+  Writes back the entire data cache in cache coherency domain of the calling
+  CPU. This function guarantees that all dirty cache lines are written back to
+  system memory. This function may also invalidate all the data cache lines in
+  the cache coherency domain of the calling CPU.
+
+**/
+VOID
+EFIAPI
+WriteBackDataCache (
+  VOID
+  )
+{
+}
+
+/**
+  Writes back a range of data cache lines in the cache coherency domain of the
+  calling CPU.
+
+  Writes back the data cache lines specified by Address and Length. If Address
+  is not aligned on a cache line boundary, then entire data cache line
+  containing Address is written back. If Address + Length is not aligned on a
+  cache line boundary, then the entire data cache line containing Address +
+  Length -1 is written back. This function may choose to write back the entire
+  data cache if that is more efficient than writing back the specified range.
+  If Length is 0, then no data cache lines are written back. This function may
+  also invalidate all the data cache lines in the specified range of the cache
+  coherency domain of the calling CPU. Address is returned.
+
+  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
+
+  @param  Address The base address of the data cache lines to write back. If
+                  the CPU is in a physical addressing mode, then Address is a
+                  physical address. If the CPU is in a virtual addressing
+                  mode, then Address is a virtual address.
+  @param  Length  The number of bytes to write back from the data cache.
+
+  @return Address of cache written in main memory.
+
+**/
+VOID *
+EFIAPI
+WriteBackDataCacheRange (
+  IN      VOID                      *Address,
+  IN      UINTN                     Length
+  )
+{
+  return Address;
+}
+
+/**
+  Invalidates the entire data cache in cache coherency domain of the calling
+  CPU.
+
+  Invalidates the entire data cache in cache coherency domain of the calling
+  CPU. This function must be used with care because dirty cache lines are not
+  written back to system memory. It is typically used for cache diagnostics. If
+  the CPU does not support invalidation of the entire data cache, then a write
+  back and invalidate operation should be performed on the entire data cache.
+
+**/
+VOID
+EFIAPI
+InvalidateDataCache (
+  VOID
+  )
+{
+}
+
+/**
+  Invalidates a range of data cache lines in the cache coherency domain of the
+  calling CPU.
+
+  Invalidates the data cache lines specified by Address and Length. If Address
+  is not aligned on a cache line boundary, then entire data cache line
+  containing Address is invalidated. If Address + Length is not aligned on a
+  cache line boundary, then the entire data cache line containing Address +
+  Length -1 is invalidated. This function must never invalidate any cache lines
+  outside the specified range. If Length is 0, then no data cache lines are
+  invalidated. Address is returned. This function must be used with care
+  because dirty cache lines are not written back to system memory. It is
+  typically used for cache diagnostics. If the CPU does not support
+  invalidation of a data cache range, then a write back and invalidate
+  operation should be performed on the data cache range.
+
+  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
+
+  @param  Address The base address of the data cache lines to invalidate. If
+                  the CPU is in a physical addressing mode, then Address is a
+                  physical address. If the CPU is in a virtual addressing mode,
+                  then Address is a virtual address.
+  @param  Length  The number of bytes to invalidate from the data cache.
+
+  @return Address.
+
+**/
+VOID *
+EFIAPI
+InvalidateDataCacheRange (
+  IN      VOID                      *Address,
+  IN      UINTN                     Length
+  )
+{
+  return Address;
+}
diff --git a/MdePkg/Library/BaseCpuLib/BaseCpuLib.inf 
b/MdePkg/Library/BaseCpuLib/BaseCpuLib.inf
index 4e0f0a1..f2f7419 100644
--- a/MdePkg/Library/BaseCpuLib/BaseCpuLib.inf
+++ b/MdePkg/Library/BaseCpuLib/BaseCpuLib.inf
@@ -7,6 +7,7 @@
 #  Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
 #  Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
 #  Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+#  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
 #
 #  This program and the accompanying materials
 #  are licensed and made available under the terms and conditions of the BSD 
License
@@ -66,6 +67,9 @@
   AArch64/CpuFlushTlb.S | GCC
   AArch64/CpuSleep.S    | GCC
 
+[Sources.RISCV64]
+  RiscV/Cpu.s
+
 [Packages]
   MdePkg/MdePkg.dec
 
diff --git a/MdePkg/Library/BaseCpuLib/RiscV/Cpu.s 
b/MdePkg/Library/BaseCpuLib/RiscV/Cpu.s
new file mode 100644
index 0000000..91c1d74
--- /dev/null
+++ b/MdePkg/Library/BaseCpuLib/RiscV/Cpu.s
@@ -0,0 +1,25 @@
+//------------------------------------------------------------------------------
 
+//
+// CpuSleep for RISC-V
+//
+// Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD 
License
+// which accompanies this distribution.  The full text of the license may be 
found at
+// http://opensource.org/licenses/bsd-license.php.
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR 
IMPLIED.
+//
+//------------------------------------------------------------------------------
+.data
+.align 3
+.section .text
+
+.global ASM_PFX(_CpuSleep)
+                           
+ASM_PFX(_CpuSleep):
+    wfi
+    ret
+
+
diff --git a/MdePkg/Library/BaseIoLibIntrinsic/BaseIoLibIntrinsic.inf 
b/MdePkg/Library/BaseIoLibIntrinsic/BaseIoLibIntrinsic.inf
index 29b9e8b..700ce38 100644
--- a/MdePkg/Library/BaseIoLibIntrinsic/BaseIoLibIntrinsic.inf
+++ b/MdePkg/Library/BaseIoLibIntrinsic/BaseIoLibIntrinsic.inf
@@ -2,12 +2,13 @@
 #  Instance of I/O Library using compiler intrinsics.
 #
 #  I/O Library that uses compiler intrinsics to perform IN and OUT instructions
-#  for IA-32 and x64.  On IPF, I/O port requests are translated into MMIO 
requests.
+#  for IA-32, x64 and RISC-V.  On IPF, I/O port requests are translated into 
MMIO requests.
 #  MMIO requests are forwarded directly to memory.  For EBC, I/O port requests
 #  ASSERT().
 #
 #  Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
 #  Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+#  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
 #
 #  This program and the accompanying materials
 #  are licensed and made available under the terms and conditions of the BSD 
License
@@ -30,7 +31,7 @@
 
 
 #
-#  VALID_ARCHITECTURES           = IA32 X64 EBC IPF ARM AARCH64
+#  VALID_ARCHITECTURES           = IA32 X64 EBC IPF ARM AARCH64 RISCV64
 #
 
 [Sources]
@@ -63,6 +64,9 @@
 [Sources.AARCH64]
   IoLibArm.c
 
+[Sources.RISCV64]
+  IoLibRiscV.c
+
 [Packages]
   MdePkg/MdePkg.dec
 
diff --git a/MdePkg/Library/BaseIoLibIntrinsic/IoLibRiscV.c 
b/MdePkg/Library/BaseIoLibIntrinsic/IoLibRiscV.c
new file mode 100644
index 0000000..cb110fe
--- /dev/null
+++ b/MdePkg/Library/BaseIoLibIntrinsic/IoLibRiscV.c
@@ -0,0 +1,418 @@
+/** @file
+  Common I/O Library routines for RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include "BaseIoLibIntrinsicInternal.h"
+
+/**
+  Reads an 8-bit MMIO register.
+
+  Reads the 8-bit MMIO register specified by Address. The 8-bit read value is
+  returned. This function must guarantee that all MMIO read and write
+  operations are serialized.
+
+  If 8-bit MMIO register operations are not supported, then ASSERT().
+
+  @param  Address The MMIO register to read.
+
+  @return The value read.
+
+**/
+UINT8
+EFIAPI
+MmioRead8 (
+  IN      UINTN                     Address
+  )
+{
+  return *(volatile UINT8*)Address;
+}
+
+/**
+  Writes an 8-bit MMIO register.
+
+  Writes the 8-bit MMIO register specified by Address with the value specified
+  by Value and returns Value. This function must guarantee that all MMIO read
+  and write operations are serialized.
+
+  If 8-bit MMIO register operations are not supported, then ASSERT().
+
+  @param  Address The MMIO register to write.
+  @param  Value   The value to write to the MMIO register.
+  
+  @return Value.
+
+**/
+UINT8
+EFIAPI
+MmioWrite8 (
+  IN      UINTN                     Address,
+  IN      UINT8                     Value
+  )
+{
+  *(volatile UINT8 *)Address = Value; 
+  return Value;
+}
+
+/**
+  Reads a 16-bit MMIO register.
+
+  Reads the 16-bit MMIO register specified by Address. The 16-bit read value is
+  returned. This function must guarantee that all MMIO read and write
+  operations are serialized.
+
+  If 16-bit MMIO register operations are not supported, then ASSERT().
+  If Address is not aligned on a 16-bit boundary, then ASSERT().
+
+  @param  Address The MMIO register to read.
+
+  @return The value read.
+
+**/
+UINT16
+EFIAPI
+MmioRead16 (
+  IN      UINTN                     Address
+  )
+{
+  return *(volatile UINT16 *)Address;
+}
+
+/**
+  Writes a 16-bit MMIO register.
+
+  Writes the 16-bit MMIO register specified by Address with the value specified
+  by Value and returns Value. This function must guarantee that all MMIO read
+  and write operations are serialized.
+
+  If 16-bit MMIO register operations are not supported, then ASSERT().
+  If Address is not aligned on a 16-bit boundary, then ASSERT().
+
+  @param  Address The MMIO register to write.
+  @param  Value   The value to write to the MMIO register.
+  
+  @return Value.
+
+**/
+UINT16
+EFIAPI
+MmioWrite16 (
+  IN      UINTN                     Address,
+  IN      UINT16                    Value
+  )
+{
+  *(volatile UINT16 *)Address = Value; 
+  return Value;
+}
+
+/**
+  Reads a 32-bit MMIO register.
+
+  Reads the 32-bit MMIO register specified by Address. The 32-bit read value is
+  returned. This function must guarantee that all MMIO read and write
+  operations are serialized.
+
+  If 32-bit MMIO register operations are not supported, then ASSERT().
+  If Address is not aligned on a 32-bit boundary, then ASSERT().
+
+  @param  Address The MMIO register to read.
+
+  @return The value read.
+
+**/
+UINT32
+EFIAPI
+MmioRead32 (
+  IN      UINTN                     Address
+  )
+{
+  return *(volatile UINT32 *)Address;
+}
+
+/**
+  Writes a 32-bit MMIO register.
+
+  Writes the 32-bit MMIO register specified by Address with the value specified
+  by Value and returns Value. This function must guarantee that all MMIO read
+  and write operations are serialized.
+
+  If 32-bit MMIO register operations are not supported, then ASSERT().
+  If Address is not aligned on a 32-bit boundary, then ASSERT().
+
+  @param  Address The MMIO register to write.
+  @param  Value   The valu  return *(volatile UINT8*)Address;e 
+                  to write to the MMIO register.
+  
+  @return Value.
+
+**/
+UINT32
+EFIAPI
+MmioWrite32 (
+  IN      UINTN                     Address,
+  IN      UINT32                    Value
+  )
+{
+  *(volatile UINT32 *)Address = Value; 
+  return Value;
+}
+
+/**
+  Reads a 64-bit MMIO register.
+
+  Reads the 64-bit MMIO register specified by Address. The 64-bit read value is
+  returned. This function must guarantee that all MMIO read and write
+  operations are serialized.
+
+  If 64-bit MMIO register operations are not supported, then ASSERT().
+  If Address is not aligned on a 64-bit boundary, then ASSERT().
+
+  @param  Address The MMIO register to read.
+
+  @return The value read.
+
+**/
+UINT64
+EFIAPI
+MmioRead64 (
+  IN      UINTN                     Address
+  )
+{
+  return *(volatile UINT64 *)Address;
+}
+
+/**
+  Writes a 64-bit MMIO register.
+
+  Writes the 64-bit MMIO register specified by Address with the value specified
+  by Value and returns Value. This function must guarantee that all MMIO read
+  and write operations are serialized.
+
+  If 64-bit MMIO register operations are not supported, then ASSERT().
+  If Address is not aligned on a 64-bit boundary, then ASSERT().
+
+  @param  Address The MMIO register to write.
+  @param  Value   The value to write to the MMIO register.
+
+**/
+UINT64
+EFIAPI
+MmioWrite64 (
+  IN      UINTN                     Address,
+  IN      UINT64                    Value
+  )
+{
+  *(volatile UINT64 *)Address = Value; 
+  return Value;
+}
+
+/**
+  Reads an 8-bit I/O port.
+
+  Reads the 8-bit I/O port specified by Port. The 8-bit read value is returned.
+  This function must guarantee that all I/O read and write operations are
+  serialized.
+
+  If 8-bit I/O port operations are not supported, then ASSERT().
+
+  @param  Port  The I/O port to read.
+
+  @return The value read.
+
+**/
+UINT8
+EFIAPI
+IoRead8 (
+  IN      UINTN                     Port
+  )
+{
+  return *(volatile UINT8*)Port;
+}
+
+/**
+  Writes an 8-bit I/O port.
+
+  Writes the 8-bit I/O port specified by Port with the value specified by Value
+  and returns Value. This function must guarantee that all I/O read and write
+  operations are serialized.
+
+  If 8-bit I/O port operations are not supported, then ASSERT().
+
+  @param  Port  The I/O port to write.
+  @param  Value The value to write to the I/O port.
+
+  @return The value written the I/O port.
+
+**/
+
+UINT8
+EFIAPI
+IoWrite8 (
+  IN      UINTN                     Port,
+  IN      UINT8                     Value
+  )
+{
+  
+  *(volatile UINT8 *)Port = Value; 
+  return Value;
+}
+
+/**
+  Reads a 16-bit I/O port.
+
+  Reads the 16-bit I/O port specified by Port. The 16-bit read value is 
returned.
+  This function must guarantee that all I/O read and write operations are
+  serialized.
+
+  If 16-bit I/O port operations are not supported, then ASSERT().
+
+  @param  Port  The I/O port to read.
+
+  @return The value read.
+
+**/
+UINT16
+EFIAPI
+IoRead16 (
+  IN      UINTN                     Port
+  )
+{
+  return *(volatile UINT16*)Port;
+}
+
+/**
+  Writes a 16-bit I/O port.
+
+  Writes the 16-bit I/O port specified by Port with the value specified by 
Value
+  and returns Value. This function must guarantee that all I/O read and write
+  operations are serialized.
+
+  If 16-bit I/O port operations are not supported, then ASSERT().
+
+  @param  Port  The I/O port to write.
+  @param  Value The value to write to the I/O port.
+
+  @return The value written the I/O port.
+
+**/
+UINT16
+EFIAPI
+IoWrite16 (
+  IN      UINTN                     Port,
+  IN      UINT16                    Value
+  )
+{
+  *(volatile UINT16*)Port = Value;
+  return Value;
+}
+
+/**
+  Reads a 32-bit I/O port.
+
+  Reads the 32-bit I/O port specified by Port. The 32-bit read value is 
returned.
+  This function must guarantee that all I/O read and write operations are
+  serialized.
+
+  If 32-bit I/O port operations are not supported, then ASSERT().
+
+  @param  Port  The I/O port to read.
+
+  @return The value read.
+
+**/
+UINT32
+EFIAPI
+IoRead32 (
+  IN      UINTN                     Port
+  )
+{
+  return *(volatile UINT32*)Port;
+}
+
+/**
+  Writes a 32-bit I/O port.
+
+  Writes the 32-bit I/O port specified by Port with the value specified by 
Value
+  and returns Value. This function must guarantee that all I/O read and write
+  operations are serialized.
+
+  If 32-bit I/O port operations are not supported, then ASSERT().
+
+  @param  Port  The I/O port to write.
+  @param  Value The value to write to the I/O port.
+
+  @return The value written the I/O port.
+
+**/
+UINT32
+EFIAPI
+IoWrite32 (
+  IN      UINTN                     Port,
+  IN      UINT32                    Value
+  )
+{
+  *(volatile UINT32*)Port = Value;
+  return Value;
+}
+
+/**
+  Reads a 64-bit I/O port.
+
+  Reads the 64-bit I/O port specified by Port. The 64-bit read value is 
returned.
+  This function must guarantee that all I/O read and write operations are
+  serialized.
+
+  If 64-bit I/O port operations are not supported, then ASSERT().
+  If Port is not aligned on a 64-bit boundary, then ASSERT().
+
+  @param  Port  The I/O port to read.
+
+  @return The value read.
+
+**/
+UINT64
+EFIAPI
+IoRead64 (
+  IN      UINTN                     Port
+  )
+{
+  return *(volatile UINT64*)Port;
+}
+
+/**
+  Writes a 64-bit I/O port.
+
+  Writes the 64-bit I/O port specified by Port with the value specified by 
Value
+  and returns Value. This function must guarantee that all I/O read and write
+  operations are serialized.
+
+  If 64-bit I/O port operations are not supported, then ASSERT().
+  If Port is not aligned on a 64-bit boundary, then ASSERT().
+
+  @param  Port  The I/O port to write.
+  @param  Value The value to write to the I/O port.
+
+  @return The value written to the I/O port.
+
+**/
+UINT64
+EFIAPI
+IoWrite64 (
+  IN      UINTN                     Port,
+  IN      UINT64                    Value
+  )
+{
+  *(volatile UINT64*)Port = Value;
+  return 0;
+}
+
+
diff --git a/MdePkg/Library/BaseLib/BaseLib.inf 
b/MdePkg/Library/BaseLib/BaseLib.inf
index e83a569..a605c61 100644
--- a/MdePkg/Library/BaseLib/BaseLib.inf
+++ b/MdePkg/Library/BaseLib/BaseLib.inf
@@ -503,6 +503,20 @@
   AArch64/SetJumpLongJump.S         | GCC
   AArch64/CpuBreakpoint.S           | GCC
 
+[Sources.RISCV64]
+  Math64.c
+  RiscV64/Unaligned.c
+  RiscV64/InternalSwitchStack.c
+  RiscV64/CpuBreakpoint.c
+  RiscV64/GetInterruptState.c
+  RiscV64/DisableInterrupts.c
+  RiscV64/EnableInterrupts.c
+  RiscV64/CpuPause.c
+  RiscV64/RiscVSetJumpLongJump.S    | GCC
+  RiscV64/RiscVCpuBreakpoint.S      | GCC
+  RiscV64/RiscVCpuPause.S           | GCC
+  RiscV64/RiscVInterrupt.S          | GCC
+
 [Packages]
   MdePkg/MdePkg.dec
 
diff --git a/MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c 
b/MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c
new file mode 100644
index 0000000..68cf521
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c
@@ -0,0 +1,33 @@
+/** @file
+  CPU breakpoint for RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include "BaseLibInternals.h"
+
+extern VOID RiscVCpuBreakpoint (VOID);
+
+/**
+  Generates a breakpoint on the CPU.
+
+  Generates a breakpoint on the CPU. The breakpoint must be implemented such
+  that code can resume normal execution after the breakpoint.
+
+**/
+VOID
+EFIAPI
+CpuBreakpoint (
+  VOID
+  )
+{
+  RiscVCpuBreakpoint ();
+}
diff --git a/MdePkg/Library/BaseLib/RiscV64/CpuPause.c 
b/MdePkg/Library/BaseLib/RiscV64/CpuPause.c
new file mode 100644
index 0000000..6fc1833
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/CpuPause.c
@@ -0,0 +1,35 @@
+/** @file
+  CPU pause for RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include "BaseLibInternals.h"
+
+extern VOID RiscVCpuPause (VOID);
+
+
+/**
+  Requests CPU to pause for a short period of time.
+
+  Requests CPU to pause for a short period of time. Typically used in MP
+  systems to prevent memory starvation while waiting for a spin lock.
+
+**/
+VOID
+EFIAPI
+CpuPause (
+  VOID
+  )
+{
+  RiscVCpuPause ();
+}
+
diff --git a/MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c 
b/MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c
new file mode 100644
index 0000000..8bd0cb7
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c
@@ -0,0 +1,30 @@
+/** @file
+  CPU disable interrupt function for RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+#include "BaseLibInternals.h"
+
+extern VOID RiscVDisableInterrupts (VOID);
+
+/**
+  Disables CPU interrupts.
+
+**/
+VOID
+EFIAPI
+DisableInterrupts (
+  VOID
+  )
+{
+  RiscVDisableInterrupts ();
+}
+
diff --git a/MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c 
b/MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c
new file mode 100644
index 0000000..345f544
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c
@@ -0,0 +1,31 @@
+/** @file
+  CPU enable interrupt function for RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include "BaseLibInternals.h"
+
+extern VOID RiscVEnableInterrupt (VOID);
+
+/**
+  Enables CPU interrupts.
+
+**/
+VOID
+EFIAPI
+EnableInterrupts (
+  VOID
+  )
+{
+  RiscVEnableInterrupt ();
+}
+
diff --git a/MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c 
b/MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c
new file mode 100644
index 0000000..9183cfc
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c
@@ -0,0 +1,42 @@
+/** @file
+  CPU get interrupt state function for RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include "BaseLibInternals.h"
+
+extern UINT32 RiscVGetInterrupts (VOID);
+
+
+/**
+  Retrieves the current CPU interrupt state.
+
+  Returns TRUE is interrupts are currently enabled. Otherwise
+  returns FALSE.
+
+  @retval TRUE  CPU interrupts are enabled.
+  @retval FALSE CPU interrupts are disabled.
+
+**/
+BOOLEAN
+EFIAPI
+GetInterruptState (
+  VOID
+  )
+{
+  UINT32 RetValue;
+
+  RetValue = RiscVGetInterrupts ();
+  return (RetValue == 0)? FALSE: TRUE;
+}
+
+
diff --git a/MdePkg/Library/BaseLib/RiscV64/InternalSwitchStack.c 
b/MdePkg/Library/BaseLib/RiscV64/InternalSwitchStack.c
new file mode 100644
index 0000000..ac7a765
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/InternalSwitchStack.c
@@ -0,0 +1,61 @@
+/** @file
+  Switch stack function for RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include "BaseLibInternals.h"
+
+/**
+  Transfers control to a function starting with a new stack.
+
+  Transfers control to the function specified by EntryPoint using the
+  new stack specified by NewStack and passing in the parameters specified
+  by Context1 and Context2.  Context1 and Context2 are optional and may
+  be NULL.  The function EntryPoint must never return.
+  Marker will be ignored on IA-32, x64, and EBC.
+  IPF CPUs expect one additional parameter of type VOID * that specifies
+  the new backing store pointer.
+
+  If EntryPoint is NULL, then ASSERT().
+  If NewStack is NULL, then ASSERT().
+
+  @param  EntryPoint  A pointer to function to call with the new stack.
+  @param  Context1    A pointer to the context to pass into the EntryPoint
+                      function.
+  @param  Context2    A pointer to the context to pass into the EntryPoint
+                      function.
+  @param  NewStack    A pointer to the new stack to use for the EntryPoint
+                      function.
+  @param  Marker      VA_LIST marker for the variable argument list.
+
+**/
+VOID
+EFIAPI
+InternalSwitchStack (
+  IN      SWITCH_STACK_ENTRY_POINT  EntryPoint,
+  IN      VOID                      *Context1,   OPTIONAL
+  IN      VOID                      *Context2,   OPTIONAL
+  IN      VOID                      *NewStack,
+  IN      VA_LIST                   Marker
+  )
+{
+  BASE_LIBRARY_JUMP_BUFFER  JumpBuffer;
+
+  DEBUG ((EFI_D_INFO, "RISC-V InternalSwitchStack Entry:%x Context1:%x 
Context2:%x NewStack%x\n", \
+          EntryPoint, Context1, Context2, NewStack));
+  JumpBuffer.RA = (UINTN)EntryPoint;
+  JumpBuffer.SP = (UINTN)NewStack - sizeof (VOID *);
+  JumpBuffer.S0 = (UINT64)(UINTN)Context1;
+  JumpBuffer.S1 = (UINT64)(UINTN)Context2;
+  LongJump (&JumpBuffer, (UINTN)-1);
+  ASSERT(FALSE);
+}
diff --git a/MdePkg/Library/BaseLib/RiscV64/LongJump.c 
b/MdePkg/Library/BaseLib/RiscV64/LongJump.c
new file mode 100644
index 0000000..efab00d
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/LongJump.c
@@ -0,0 +1,38 @@
+/** @file
+  Long jump implementation of RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include "BaseLibInternals.h"
+
+
+/**
+  Restores the CPU context that was saved with SetJump().
+
+  Restores the CPU context from the buffer specified by JumpBuffer.
+  This function never returns to the caller.
+  Instead is resumes execution based on the state of JumpBuffer.
+
+  @param  JumpBuffer    A pointer to CPU context buffer.
+  @param  Value         The value to return when the SetJump() context is 
restored.
+
+**/
+VOID
+EFIAPI
+InternalLongJump (
+  IN      BASE_LIBRARY_JUMP_BUFFER  *JumpBuffer,
+  IN      UINTN                     Value
+  )
+{
+    ASSERT (FALSE);
+}
+
diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S 
b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S
new file mode 100644
index 0000000..6d8de1f
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S
@@ -0,0 +1,20 @@
+//------------------------------------------------------------------------------
 
+//
+// CpuBreakpoint for RISC-V
+//
+// Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD 
License
+// which accompanies this distribution.  The full text of the license may be 
found at
+// http://opensource.org/licenses/bsd-license.php.
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR 
IMPLIED.
+//
+//------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(RiscVCpuBreakpoint)
+ASM_PFX(RiscVCpuBreakpoint):
+  ebreak
+  ret
diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S 
b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S
new file mode 100644
index 0000000..b924b7a
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S
@@ -0,0 +1,20 @@
+//------------------------------------------------------------------------------
 
+//
+// CpuPause for RISC-V
+//
+// Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD 
License
+// which accompanies this distribution.  The full text of the license may be 
found at
+// http://opensource.org/licenses/bsd-license.php.
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR 
IMPLIED.
+//
+//------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(RiscVCpuPause)
+ASM_PFX(RiscVCpuPause):
+  nop
+  ret
diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S 
b/MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S
new file mode 100644
index 0000000..8f39667
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S
@@ -0,0 +1,33 @@
+//------------------------------------------------------------------------------
 
+//
+// Cpu interrupt enable/disable for RISC-V
+//
+// Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD 
License
+// which accompanies this distribution.  The full text of the license may be 
found at
+// http://opensource.org/licenses/bsd-license.php.
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR 
IMPLIED.
+//
+//------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(RiscVDisableInterrupts)
+ASM_GLOBAL ASM_PFX(RiscVEnableInterrupt)
+ASM_GLOBAL ASM_PFX(RiscVGetInterrupts)
+
+ASM_PFX(RiscVDisableInterrupts):
+  li    a1, 0xee
+  csrrc a0, 0x304, a1
+  ret
+
+ASM_PFX(RiscVEnableInterrupt):
+  li    a1, 0xee
+  csrrw a0, 0x304, a1
+  ret
+
+ASM_PFX(RiscVGetInterrupts):
+  csrrw a0, 0x304, 0
+  ret
diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S 
b/MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S
new file mode 100644
index 0000000..8625280
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S
@@ -0,0 +1,63 @@
+//------------------------------------------------------------------------------
 
+//
+// Set/Long jump for RISC-V
+//
+// Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD 
License
+// which accompanies this distribution.  The full text of the license may be 
found at
+// http://opensource.org/licenses/bsd-license.php.
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR 
IMPLIED.
+//
+//------------------------------------------------------------------------------
+# define REG_S  sd 
+# define REG_L  ld 
+# define SZREG  8 
+.align 3
+    .globl  SetJump
+
+SetJump: 
+    REG_S ra,  0*SZREG(a0) 
+    REG_S s0,  1*SZREG(a0) 
+    REG_S s1,  2*SZREG(a0) 
+    REG_S s2,  3*SZREG(a0) 
+    REG_S s3,  4*SZREG(a0) 
+    REG_S s4,  5*SZREG(a0) 
+    REG_S s5,  6*SZREG(a0) 
+    REG_S s6,  7*SZREG(a0) 
+    REG_S s7,  8*SZREG(a0) 
+    REG_S s8,  9*SZREG(a0) 
+    REG_S s9, 10*SZREG(a0) 
+    REG_S s10,11*SZREG(a0) 
+    REG_S s11,12*SZREG(a0) 
+    REG_S sp, 13*SZREG(a0) 
+    li    a0, 0
+    ret
+ 
+    .globl  InternalLongJump 
+InternalLongJump: 
+    REG_L ra,  0*SZREG(a0) 
+    REG_L s0,  1*SZREG(a0) 
+    REG_L s1,  2*SZREG(a0) 
+    REG_L s2,  3*SZREG(a0) 
+    REG_L s3,  4*SZREG(a0) 
+    REG_L s4,  5*SZREG(a0) 
+    REG_L s5,  6*SZREG(a0) 
+    REG_L s6,  7*SZREG(a0) 
+    REG_L s7,  8*SZREG(a0) 
+    REG_L s8,  9*SZREG(a0) 
+    REG_L s9, 10*SZREG(a0) 
+    REG_L s10,11*SZREG(a0) 
+    REG_L s11,12*SZREG(a0) 
+    REG_L sp, 13*SZREG(a0) 
+
+    add a0, s0, 0
+    add a1, s1, 0
+    add a2, s2, 0
+    add a3, s3, 0
+    ret
+    
+
diff --git a/MdePkg/Library/BaseLib/RiscV64/Unaligned.c 
b/MdePkg/Library/BaseLib/RiscV64/Unaligned.c
new file mode 100644
index 0000000..3d17cc8
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/Unaligned.c
@@ -0,0 +1,270 @@
+/** @file
+  RISC-V specific functionality for (un)aligned memory read/write.
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+#include "BaseLibInternals.h"
+
+/**
+  Reads a 16-bit value from memory that may be unaligned.
+
+  This function returns the 16-bit value pointed to by Buffer. The function
+  guarantees that the read operation does not produce an alignment fault.
+
+  If the Buffer is NULL, then ASSERT().
+
+  @param  Buffer  A pointer to a 16-bit value that may be unaligned.
+
+  @return The 16-bit value read from Buffer.
+
+**/
+UINT16
+EFIAPI
+ReadUnaligned16 (
+  IN CONST UINT16              *Buffer
+  )
+{
+  UINT16 Value;
+  INT8 Count;
+
+  ASSERT (Buffer != NULL);
+
+  for (Count = sizeof (UINT16) - 1, Value = 0; Count >= 0 ; Count --) {
+    Value = Value << 8;
+    Value |= *((UINT8*)Buffer + Count);
+  }
+  return Value;
+}
+
+/**
+  Writes a 16-bit value to memory that may be unaligned.
+
+  This function writes the 16-bit value specified by Value to Buffer. Value is
+  returned. The function guarantees that the write operation does not produce
+  an alignment fault.
+
+  If the Buffer is NULL, then ASSERT().
+
+  @param  Buffer  A pointer to a 16-bit value that may be unaligned.
+  @param  Value   16-bit value to write to Buffer.
+
+  @return The 16-bit value to write to Buffer.
+
+**/
+UINT16
+EFIAPI
+WriteUnaligned16 (
+  OUT UINT16                    *Buffer,
+  IN  UINT16                    Value
+  )
+{
+  INT8 Count;
+  UINT16 ValueTemp;
+
+  ASSERT (Buffer != NULL);
+
+  for (Count = 0, ValueTemp = Value; Count < sizeof (UINT16) ; Count ++) {
+    *((UINT8*)Buffer + Count) = (UINT8)(ValueTemp & 0xff);
+    ValueTemp = ValueTemp >> 8;
+  }
+  return Value;
+}
+
+/**
+  Reads a 24-bit value from memory that may be unaligned.
+
+  This function returns the 24-bit value pointed to by Buffer. The function
+  guarantees that the read operation does not produce an alignment fault.
+
+  If the Buffer is NULL, then ASSERT().
+
+  @param  Buffer  A pointer to a 24-bit value that may be unaligned.
+
+  @return The 24-bit value read from Buffer.
+
+**/
+UINT32
+EFIAPI
+ReadUnaligned24 (
+  IN CONST UINT32              *Buffer
+  )
+{
+  UINT32 Value;
+  INT8 Count;
+
+  ASSERT (Buffer != NULL);
+  for (Count = 2, Value = 0; Count >= 0 ; Count --) {
+    Value = Value << 8;
+    Value |= *((UINT8*)Buffer + Count);
+  }
+  return Value;
+}
+
+/**
+  Writes a 24-bit value to memory that may be unaligned.
+
+  This function writes the 24-bit value specified by Value to Buffer. Value is
+  returned. The function guarantees that the write operation does not produce
+  an alignment fault.
+
+  If the Buffer is NULL, then ASSERT().
+
+  @param  Buffer  A pointer to a 24-bit value that may be unaligned.
+  @param  Value   24-bit value to write to Buffer.
+
+  @return The 24-bit value to write to Buffer.
+
+**/
+UINT32
+EFIAPI
+WriteUnaligned24 (
+  OUT UINT32                    *Buffer,
+  IN  UINT32                    Value
+  )
+{
+  INT8 Count;
+  UINT32 ValueTemp;
+
+  ASSERT (Buffer != NULL);
+  for (Count = 0, ValueTemp = Value; Count < 3 ; Count ++) {
+    *((UINT8*)Buffer + Count) = (UINT8)(ValueTemp & 0xff);
+    ValueTemp = ValueTemp >> 8;
+  }
+  return Value;
+}
+
+/**
+  Reads a 32-bit value from memory that may be unaligned.
+
+  This function returns the 32-bit value pointed to by Buffer. The function
+  guarantees that the read operation does not produce an alignment fault.
+
+  If the Buffer is NULL, then ASSERT().
+
+  @param  Buffer  A pointer to a 32-bit value that may be unaligned.
+
+  @return The 32-bit value read from Buffer.
+
+**/
+UINT32
+EFIAPI
+ReadUnaligned32 (
+  IN CONST UINT32              *Buffer
+  )
+{
+  UINT32 Value;
+  INT8 Count;
+
+  ASSERT (Buffer != NULL);
+
+  for (Count = sizeof (UINT32) - 1, Value = 0; Count >= 0 ; Count --) {
+    Value = Value << 8;
+    Value |= *((UINT8*)Buffer + Count);
+  }
+  return Value;
+}
+
+/**
+  Writes a 32-bit value to memory that may be unaligned.
+
+  This function writes the 32-bit value specified by Value to Buffer. Value is
+  returned. The function guarantees that the write operation does not produce
+  an alignment fault.
+
+  If the Buffer is NULL, then ASSERT().
+
+  @param  Buffer  A pointer to a 32-bit value that may be unaligned.
+  @param  Value   The 32-bit value to write to Buffer.
+
+  @return The 32-bit value to write to Buffer.
+
+**/
+UINT32
+EFIAPI
+WriteUnaligned32 (
+  OUT UINT32                    *Buffer,
+  IN  UINT32                    Value
+  )
+{
+  INT8 Count;
+  UINT32 ValueTemp;
+
+  ASSERT (Buffer != NULL);
+  for (Count = 0, ValueTemp = Value; Count < sizeof (UINT32) ; Count ++) {
+    *((UINT8*)Buffer + Count) = (UINT8)(ValueTemp & 0xff);
+    ValueTemp = ValueTemp >> 8;
+  }
+  return Value;
+}
+
+/**
+  Reads a 64-bit value from memory that may be unaligned.
+
+  This function returns the 64-bit value pointed to by Buffer. The function
+  guarantees that the read operation does not produce an alignment fault.
+
+  If the Buffer is NULL, then ASSERT().
+
+  @param  Buffer  A pointer to a 64-bit value that may be unaligned.
+
+  @return The 64-bit value read from Buffer.
+
+**/
+UINT64
+EFIAPI
+ReadUnaligned64 (
+  IN CONST UINT64              *Buffer
+  )
+{
+  UINT64 Value;
+  INT8 Count;
+
+  ASSERT (Buffer != NULL);
+  for (Count = sizeof (UINT64) - 1, Value = 0; Count >= 0 ; Count --) {
+    Value = Value << 8;
+    Value |= *((UINT8*)Buffer + Count);
+  }
+  return Value;
+}
+
+/**
+  Writes a 64-bit value to memory that may be unaligned.
+
+  This function writes the 64-bit value specified by Value to Buffer. Value is
+  returned. The function guarantees that the write operation does not produce
+  an alignment fault.
+
+  If the Buffer is NULL, then ASSERT().
+
+  @param  Buffer  A pointer to a 64-bit value that may be unaligned.
+  @param  Value   The 64-bit value to write to Buffer.
+
+  @return The 64-bit value to write to Buffer.
+
+**/
+UINT64
+EFIAPI
+WriteUnaligned64 (
+  OUT UINT64                    *Buffer,
+  IN  UINT64                    Value
+  )
+{
+  INT8 Count;
+  UINT64 ValueTemp;
+
+  ASSERT (Buffer != NULL);
+  for (Count = 0, ValueTemp = Value; Count < sizeof (UINT64) ; Count ++) {
+    *((UINT8*)Buffer + Count) = (UINT8)(ValueTemp & 0xff);
+    ValueTemp = ValueTemp >> 8;
+  }
+  return Value;
+}
diff --git a/MdePkg/Library/BasePeCoffLib/BasePeCoff.c 
b/MdePkg/Library/BasePeCoffLib/BasePeCoff.c
index 33cad23..d66d209 100644
--- a/MdePkg/Library/BasePeCoffLib/BasePeCoff.c
+++ b/MdePkg/Library/BasePeCoffLib/BasePeCoff.c
@@ -1,6 +1,6 @@
 /** @file
   Base PE/COFF loader supports loading any PE32/PE32+ or TE image, but
-  only supports relocating IA32, x64, IPF, and EBC images.
+  only supports relocating IA32, x64, IPF, ARM, RISC-V and EBC images.
 
   Caution: This file requires additional review when modified.
   This library will have external input - PE/COFF image.
@@ -17,6 +17,7 @@
 
   Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.<BR>
   Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
   This program and the accompanying materials
   are licensed and made available under the terms and conditions of the BSD 
License
   which accompanies this distribution.  The full text of the license may be 
found at
diff --git a/MdePkg/Library/BasePeCoffLib/BasePeCoffLib.inf 
b/MdePkg/Library/BasePeCoffLib/BasePeCoffLib.inf
index ff0580f..8f839e4 100644
--- a/MdePkg/Library/BasePeCoffLib/BasePeCoffLib.inf
+++ b/MdePkg/Library/BasePeCoffLib/BasePeCoffLib.inf
@@ -3,6 +3,7 @@
 #  The IPF version library supports loading IPF and EBC PE/COFF image.
 #  The IA32 version library support loading IA32, X64 and EBC PE/COFF images.
 #  The X64 version library support loading IA32, X64 and EBC PE/COFF images.
+#  The RISC-V version library support loading RISC-V images.
 #
 #  Caution: This module requires additional review when modified.
 #  This library will have external input - PE/COFF image.
@@ -11,6 +12,7 @@
 #
 #  Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.<BR>
 #  Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+#  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
 #
 #  This program and the accompanying materials
 #  are licensed and made available under the terms and conditions of the BSD 
License
@@ -49,6 +51,9 @@
 [Sources.ARM]
   Arm/PeCoffLoaderEx.c
 
+[Sources.RISCV64]
+  RiscV/PeCoffLoaderEx.c
+  
 [Packages]
   MdePkg/MdePkg.dec
 
diff --git a/MdePkg/Library/BasePeCoffLib/BasePeCoffLib.uni 
b/MdePkg/Library/BasePeCoffLib/BasePeCoffLib.uni
index 56b6f1b..fe8ab15 100644
--- a/MdePkg/Library/BasePeCoffLib/BasePeCoffLib.uni
+++ b/MdePkg/Library/BasePeCoffLib/BasePeCoffLib.uni
@@ -4,6 +4,7 @@
 // The IPF version library supports loading IPF and EBC PE/COFF image.
 // The IA32 version library support loading IA32, X64 and EBC PE/COFF images.
 // The X64 version library support loading IA32, X64 and EBC PE/COFF images.
+// The RISC-V version library support loading RISC-V32 and RISC-V64 PE/COFF 
images.
 // 
 // Caution: This module requires additional review when modified.
 // This library will have external input - PE/COFF image.
@@ -12,6 +13,7 @@
 //
 // Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.<BR>
 // Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+// Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
 //
 // This program and the accompanying materials
 // are licensed and made available under the terms and conditions of the BSD 
License
diff --git a/MdePkg/Library/BasePeCoffLib/BasePeCoffLibInternals.h 
b/MdePkg/Library/BasePeCoffLib/BasePeCoffLibInternals.h
index 0851acc..88129da 100644
--- a/MdePkg/Library/BasePeCoffLib/BasePeCoffLibInternals.h
+++ b/MdePkg/Library/BasePeCoffLib/BasePeCoffLibInternals.h
@@ -2,6 +2,7 @@
   Declaration of internal functions in PE/COFF Lib.
 
   Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
   This program and the accompanying materials
   are licensed and made available under the terms and conditions of the BSD 
License
   which accompanies this distribution.  The full text of the license may be 
found at
diff --git a/MdePkg/Library/BasePeCoffLib/RiscV/PeCoffLoaderEx.c 
b/MdePkg/Library/BasePeCoffLib/RiscV/PeCoffLoaderEx.c
new file mode 100644
index 0000000..3bdfbea
--- /dev/null
+++ b/MdePkg/Library/BasePeCoffLib/RiscV/PeCoffLoaderEx.c
@@ -0,0 +1,161 @@
+/** @file
+  PE/Coff loader for RISC-V PE image
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+#include "BasePeCoffLibInternals.h"
+#include <Library/BaseLib.h>
+
+//
+// RISC-V definition.
+//
+#define RV_X(x, s, n) (((x) >> (s)) & ((1<<(n))-1))
+#define RISCV_IMM_BITS 12
+#define RISCV_IMM_REACH (1LL<<RISCV_IMM_BITS)
+#define RISCV_CONST_HIGH_PART(VALUE) \
+  (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1))
+
+/**
+  Performs an RISC-V specific relocation fixup and is a no-op on
+  other instruction sets. 
+  RISC-V splits 32-bit fixup into 20bit and 12-bit with two relocation 
+  types. We have to know the lower 12-bit fixup first then we can deal 
+  carry over on high 20-bit fixup. So we log the high 20-bit in 
+  FixupData.
+
+  @param  Reloc       The pointer to the relocation record.
+  @param  Fixup       The pointer to the address to fix up.
+  @param  FixupData   The pointer to a buffer to log the fixups.
+  @param  Adjust      The offset to adjust the fixup.
+
+  @return Status code.
+
+**/
+RETURN_STATUS
+PeCoffLoaderRelocateImageEx (
+  IN UINT16      *Reloc,
+  IN OUT CHAR8   *Fixup,
+  IN OUT CHAR8   **FixupData,
+  IN UINT64      Adjust
+  )
+{
+  UINT32 Value;
+  UINT32 Value2;
+  UINT32 *RiscVHi20Fixup;
+
+  switch ((*Reloc) >> 12) {
+  case EFI_IMAGE_REL_BASED_RISCV_HI20:
+      //*(UINT32 *)ScratchBuffer = (UINT32)(UINTN)Fixup;
+      *(UINT64 *)(*FixupData) = (UINT64)(UINTN)Fixup;
+      //*FixupData = *FixupData + sizeof(UINT64);
+      break;
+
+  case EFI_IMAGE_REL_BASED_RISCV_LOW12I:
+      //RiscVHi20Fixup = (UINT32 *)*ScratchBuffer;
+      RiscVHi20Fixup =  (UINT32 *)(*(UINT64 *)(*FixupData));
+      if (RiscVHi20Fixup != NULL) {
+        //*(UINT64 *)(*FixupData) = (UINT64)(UINTN)Fixup;
+        //*FixupData = *FixupData + sizeof(UINT64);
+
+        Value = (UINT32)(RV_X(*RiscVHi20Fixup, 12, 20) << 12); 
+        Value2 = (UINT32)(RV_X(*(UINT32 *)Fixup, 20, 12));
+        if (Value2 & (RISCV_IMM_REACH/2)) {
+          Value2 |= ~(RISCV_IMM_REACH-1);
+        }
+        Value += Value2;
+        Value += (UINT32)Adjust;
+        Value2 = RISCV_CONST_HIGH_PART (Value);
+        *(UINT32 *)RiscVHi20Fixup = (RV_X (Value2, 12, 20) << 12) | \
+                                           (RV_X (*(UINT32 *)RiscVHi20Fixup, 
0, 12));
+        *(UINT32 *)Fixup = (RV_X (Value, 0, 12) << 20) | \
+                           (RV_X (*(UINT32 *)Fixup, 0, 20));
+      }
+      //*ScratchBuffer = (UINT64)NULL;
+      break;
+
+  case EFI_IMAGE_REL_BASED_RISCV_LOW12S:
+      //RiscVHi20Fixup = (UINT32 *)*ScratchBuffer;
+      RiscVHi20Fixup =  (UINT32 *)(*(UINT64 *)(*FixupData));
+      if (RiscVHi20Fixup != NULL) {
+        //*(UINT64 *)(*FixupData) = (UINT64)(UINTN)Fixup;
+        //*FixupData = *FixupData + sizeof(UINT64);
+
+        Value = (UINT32)(RV_X(*RiscVHi20Fixup, 12, 20) << 12); 
+        Value2 = (UINT32)(RV_X(*(UINT32 *)Fixup, 7, 5) | (RV_X(*(UINT32 
*)Fixup, 25, 7) << 5));
+        if (Value2 & (RISCV_IMM_REACH/2)) {
+          Value2 |= ~(RISCV_IMM_REACH-1);
+        }
+        Value += Value2;
+        Value += (UINT32)Adjust;
+        Value2 = RISCV_CONST_HIGH_PART (Value);
+        *(UINT32 *)RiscVHi20Fixup = (RV_X (Value2, 12, 20) << 12) | \
+                                           (RV_X (*(UINT32 *)RiscVHi20Fixup, 
0, 12));
+        Value2 = *(UINT32 *)Fixup & 0x01fff07f;
+        Value &= RISCV_IMM_REACH - 1;
+        *(UINT32 *)Fixup = Value2 | (UINT32)(((RV_X(Value, 0, 5) << 7) | 
(RV_X(Value, 5, 7) << 25)));
+      }
+      //*ScratchBuffer = (UINT64)NULL;
+      break;
+
+  default:
+      return RETURN_UNSUPPORTED;
+
+  }
+  return RETURN_SUCCESS;
+}
+
+/**
+  Returns TRUE if the machine type of PE/COFF image is supported. Supported
+  does not mean the image can be executed it means the PE/COFF loader supports
+  loading and relocating of the image type. It's up to the caller to support
+  the entry point.
+
+  @param  Machine   Machine type from the PE Header.
+
+  @return TRUE if this PE/COFF loader can load the image
+
+**/
+BOOLEAN
+PeCoffLoaderImageFormatSupported (
+  IN  UINT16  Machine
+  )
+{
+  if ((Machine == IMAGE_FILE_MACHINE_RISCV32) || (Machine ==  
IMAGE_FILE_MACHINE_RISCV64)) {
+    return TRUE;
+  }
+
+  return FALSE;
+}
+
+/**
+  Performs an Itanium-based specific re-relocation fixup and is a no-op on 
other
+  instruction sets. This is used to re-relocated the image into the EFI virtual
+  space for runtime calls.
+
+  @param  Reloc       The pointer to the relocation record.
+  @param  Fixup       The pointer to the address to fix up.
+  @param  FixupData   The pointer to a buffer to log the fixups.
+  @param  Adjust      The offset to adjust the fixup.
+
+  @return Status code.
+
+**/
+RETURN_STATUS
+PeHotRelocateImageEx (
+  IN UINT16      *Reloc,
+  IN OUT CHAR8   *Fixup,
+  IN OUT CHAR8   **FixupData,
+  IN UINT64      Adjust
+  )
+{
+  return RETURN_UNSUPPORTED;
+}
+
diff --git a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf 
b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
index bcd7935..e7cd8bc 100755
--- a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
+++ b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
@@ -3,6 +3,7 @@
 #
 #  Copyright (c) 2007 - 2016, Intel Corporation. All rights reserved.<BR>
 #  Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+#  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
 #
 #  This program and the accompanying materials
 #  are licensed and made available under the terms and conditions of the BSD 
License
@@ -97,6 +98,10 @@
   Synchronization.c
   AArch64/Synchronization.S
 
+[Sources.RISCV64]
+  Synchronization.c
+  RiscV64/Synchronization.c  | GCC
+
 [Packages]
   MdePkg/MdePkg.dec
 
diff --git a/MdePkg/Library/BaseSynchronizationLib/RiscV64/Synchronization.c 
b/MdePkg/Library/BaseSynchronizationLib/RiscV64/Synchronization.c
new file mode 100644
index 0000000..a93827d
--- /dev/null
+++ b/MdePkg/Library/BaseSynchronizationLib/RiscV64/Synchronization.c
@@ -0,0 +1,147 @@
+/** @file
+  Implementation of synchronization functions on RISC-V
+
+  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All rights 
reserved.<BR>
+
+  This program and the accompanying materials
+  are licensed and made available under the terms and conditions of the BSD 
License
+  which accompanies this distribution. The full text of the license may be 
found at
+  http://opensource.org/licenses/bsd-license.php
+
+  THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+  WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+**/
+
+/**
+  Performs an atomic compare exchange operation on a 16-bit
+  unsigned integer.
+
+  Performs an atomic compare exchange operation on the 16-bit
+  unsigned integer specified by Value.  If Value is equal to
+  CompareValue, then Value is set to ExchangeValue and
+  CompareValue is returned.  If Value is not equal to
+  CompareValue, then Value is returned. The compare exchange
+  operation must be performed using MP safe mechanisms.
+
+  @param  Value         A pointer to the 16-bit value for the
+                        compare exchange operation.
+  @param  CompareValue  16-bit value used in compare operation.
+  @param  ExchangeValue 16-bit value used in exchange operation.
+
+  @return The original *Value before exchange.
+
+**/
+UINT16
+EFIAPI
+InternalSyncCompareExchange16 (
+  IN      volatile UINT16           *Value,
+  IN      UINT16                    CompareValue,
+  IN      UINT16                    ExchangeValue
+  )
+{
+  return *Value != CompareValue ? *Value :
+           ((*Value = ExchangeValue), CompareValue);
+}
+
+/**
+  Performs an atomic compare exchange operation on a 32-bit
+  unsigned integer.
+
+  Performs an atomic compare exchange operation on the 32-bit
+  unsigned integer specified by Value.  If Value is equal to
+  CompareValue, then Value is set to ExchangeValue and
+  CompareValue is returned.  If Value is not equal to
+  CompareValue, then Value is returned. The compare exchange
+  operation must be performed using MP safe mechanisms.
+
+  @param  Value         A pointer to the 32-bit value for the
+                        compare exchange operation.
+  @param  CompareValue  32-bit value used in compare operation.
+  @param  ExchangeValue 32-bit value used in exchange operation.
+
+  @return The original *Value before exchange.
+
+**/
+UINT32
+EFIAPI
+InternalSyncCompareExchange32 (
+  IN      volatile UINT32           *Value,
+  IN      UINT32                    CompareValue,
+  IN      UINT32                    ExchangeValue
+  )
+{
+  return *Value != CompareValue ? *Value :
+           ((*Value = ExchangeValue), CompareValue);
+}
+
+/**
+  Performs an atomic compare exchange operation on a 64-bit unsigned integer.
+
+  Performs an atomic compare exchange operation on the 64-bit unsigned integer 
specified 
+  by Value.  If Value is equal to CompareValue, then Value is set to 
ExchangeValue and 
+  CompareValue is returned.  If Value is not equal to CompareValue, then Value 
is returned. 
+  The compare exchange operation must be performed using MP safe mechanisms.
+
+  @param  Value         A pointer to the 64-bit value for the compare exchange
+                        operation.
+  @param  CompareValue  64-bit value used in compare operation.
+  @param  ExchangeValue 64-bit value used in exchange operation.
+
+  @return The original *Value before exchange.
+
+**/
+UINT64
+EFIAPI
+InternalSyncCompareExchange64 (
+  IN      volatile UINT64           *Value,
+  IN      UINT64                    CompareValue,
+  IN      UINT64                    ExchangeValue
+  )
+{
+  return *Value != CompareValue ? *Value :
+           ((*Value = ExchangeValue), CompareValue);
+}
+
+/**
+  Performs an atomic increment of an 32-bit unsigned integer.
+
+  Performs an atomic increment of the 32-bit unsigned integer specified by
+  Value and returns the incremented value. The increment operation must be
+  performed using MP safe mechanisms. The state of the return value is not
+  guaranteed to be MP safe.
+
+  @param  Value A pointer to the 32-bit value to increment.
+
+  @return The incremented value.
+
+**/
+UINT32
+EFIAPI
+InternalSyncIncrement (
+  IN      volatile UINT32           *Value
+  )
+{
+  return ++*Value;
+}
+
+/**
+  Performs an atomic decrement of an 32-bit unsigned integer.
+
+  Performs an atomic decrement of the 32-bit unsigned integer specified by
+  Value and returns the decrement value. The decrement operation must be
+  performed using MP safe mechanisms. The state of the return value is not
+  guaranteed to be MP safe.
+
+  @param  Value A pointer to the 32-bit value to decrement.
+
+  @return The decrement value.
+
+**/
+UINT32
+EFIAPI
+InternalSyncDecrement (
+  IN      volatile UINT32           *Value
+  )
+{
+  return --*Value;
+}
-- 
1.9.5.msysgit.0

_______________________________________________
edk2-devel mailing list
[email protected]
https://lists.01.org/mailman/listinfo/edk2-devel

Reply via email to