On 22.05.24 17:34, Jiaxun Yang wrote:
Commit for directories, Kconfig, Makefile and headers

Some of them are copied from linux, some of them are derived
from other architectures, the rest are wriiten on my own.

Signed-off-by: Jiaxun Yang <jiaxun.y...@flygoat.com>
---
  arch/Kconfig                                       |   15 +
  arch/loongarch/Kconfig                             |   40 +
  arch/loongarch/Makefile                            |   19 +
  arch/loongarch/config.mk                           |   23 +
  arch/loongarch/cpu/Makefile                        |    5 +
  arch/loongarch/cpu/u-boot.lds                      |   85 ++
  arch/loongarch/dts/Makefile                        |   13 +
  arch/loongarch/include/asm/acpi_table.h            |    8 +
  arch/loongarch/include/asm/addrspace.h             |   87 ++
  .../include/asm/arch-generic/entry-init.h          |   15 +
  arch/loongarch/include/asm/asm.h                   |  186 +++
  arch/loongarch/include/asm/atomic.h                |   12 +
  arch/loongarch/include/asm/barrier.h               |  138 ++
  arch/loongarch/include/asm/bitops.h                |  156 +++
  arch/loongarch/include/asm/byteorder.h             |   22 +
  arch/loongarch/include/asm/cache.h                 |   24 +
  arch/loongarch/include/asm/config.h                |   11 +
  arch/loongarch/include/asm/cpu.h                   |  123 ++
  arch/loongarch/include/asm/dma-mapping.h           |   27 +
  arch/loongarch/include/asm/global_data.h           |   43 +
  arch/loongarch/include/asm/gpio.h                  |   11 +
  arch/loongarch/include/asm/io.h                    |  399 ++++++
  arch/loongarch/include/asm/linkage.h               |   11 +
  arch/loongarch/include/asm/loongarch.h             | 1468 ++++++++++++++++++++
  arch/loongarch/include/asm/posix_types.h           |   87 ++
  arch/loongarch/include/asm/processor.h             |   11 +
  arch/loongarch/include/asm/ptrace.h                |   33 +
  arch/loongarch/include/asm/regdef.h                |   42 +
  arch/loongarch/include/asm/sections.h              |    8 +
  arch/loongarch/include/asm/setjmp.h                |   25 +
  arch/loongarch/include/asm/spl.h                   |   11 +
  arch/loongarch/include/asm/string.h                |   11 +
  arch/loongarch/include/asm/system.h                |   74 +
  arch/loongarch/include/asm/types.h                 |   37 +
  arch/loongarch/include/asm/u-boot-loongarch.h      |   23 +
  arch/loongarch/include/asm/u-boot.h                |   30 +
  arch/loongarch/include/asm/unaligned.h             |   11 +
  arch/loongarch/lib/Makefile                        |    5 +
  38 files changed, 3349 insertions(+)

diff --git a/arch/Kconfig b/arch/Kconfig
index abd406d48841..236b0e637385 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -70,6 +70,20 @@ config ARM
        select SUPPORT_ACPI
        select SUPPORT_OF_CONTROL

+config LOONGARCH
+       bool "LoongArch architecture"
+       select CREATE_ARCH_SYMLINK
+       select SUPPORT_OF_CONTROL
+       select OF_CONTROL
+       select DM
+       select DM_EVENT
+       imply DM_SERIAL
+       imply BLK
+       imply CLK
+       imply MTD
+       imply TIMER
+       imply CMD_DM
+
  config M68K
        bool "M68000 architecture"
        select HAVE_PRIVATE_LIBGCC
@@ -496,6 +510,7 @@ config SYS_NONCACHED_MEMORY

  source "arch/arc/Kconfig"
  source "arch/arm/Kconfig"
+source "arch/loongarch/Kconfig"
  source "arch/m68k/Kconfig"
  source "arch/microblaze/Kconfig"
  source "arch/mips/Kconfig"
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
new file mode 100644
index 000000000000..4e8e9d4ee88b
--- /dev/null
+++ b/arch/loongarch/Kconfig
@@ -0,0 +1,40 @@
+menu "LoongArch architecture"
+       depends on LOONGARCH
+
+config SYS_ARCH
+       default "loongarch"
+
+choice
+       prompt "Target select"
+
+endchoice
+
+# board-specific options below
+
+# platform-specific options below
+
+# architecture-specific options below
+choice
+       prompt "Base ISA"
+
+config ARCH_LA64

While having short symbols is generally good, I would prefer something
self explanatory: ARCH_LOONGARCH64.

Best regards

Heinrich

+       bool "LoongArch64"
+       select 64BIT
+       select PHYS_64BIT
+       help
+         Choose this option to target the LoongArch64 base ISA.
+
+endchoice
+
+config DMA_ADDR_T_64BIT
+       bool
+       default y if 64BIT
+
+config STACK_SIZE_SHIFT
+       int
+       default 14
+
+config OF_BOARD_FIXUP
+       default y if OF_SEPARATE
+
+endmenu
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
new file mode 100644
index 000000000000..288c695a634d
--- /dev/null
+++ b/arch/loongarch/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2024 Jiaxun yang <jiaxun.y...@flygoat.com>
+#
+
+ARCH_FLAGS = -march=loongarch64 -mabi=lp64s -msoft-float
+
+ifeq ($(CONFIG_$(SPL_)FRAMEPOINTER),y)
+       ARCH_FLAGS += -fno-omit-frame-pointer
+endif
+
+PLATFORM_CPPFLAGS += $(ARCH_FLAGS)
+
+head-y := arch/loongarch/cpu/start.o
+
+libs-y += arch/loongarch/cpu/
+libs-y += arch/loongarch/cpu/$(CPU)/
+libs-y += arch/loongarch/lib/
+
diff --git a/arch/loongarch/config.mk b/arch/loongarch/config.mk
new file mode 100644
index 000000000000..7c247400e361
--- /dev/null
+++ b/arch/loongarch/config.mk
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2024 Jiaxun yang <jiaxun.y...@flygoat.com>
+#
+
+32bit-bfd              = elf32-loongarch
+64bit-bfd              = elf64-loongarch
+32bit-emul             = elf32loongarch
+64bit-emul             = elf64loongarch
+
+ifdef CONFIG_32BIT
+KBUILD_LDFLAGS         += -m $(32bit-emul)
+PLATFORM_ELFFLAGS      += -B loongarch -O $(32bit-bfd)
+endif
+
+ifdef CONFIG_64BIT
+KBUILD_LDFLAGS         += -m $(64bit-emul)
+PLATFORM_ELFFLAGS      += -B loongarch -O $(64bit-bfd)
+endif
+
+PLATFORM_CPPFLAGS      += -fpic
+PLATFORM_RELFLAGS      += -fno-common -ffunction-sections -fdata-sections
+LDFLAGS_u-boot         += --gc-sections -static -pie
diff --git a/arch/loongarch/cpu/Makefile b/arch/loongarch/cpu/Makefile
new file mode 100644
index 000000000000..3dbed94cc624
--- /dev/null
+++ b/arch/loongarch/cpu/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2024 Jiaxun yang <jiaxun.y...@flygoat.com>
+#
+
diff --git a/arch/loongarch/cpu/u-boot.lds b/arch/loongarch/cpu/u-boot.lds
new file mode 100644
index 000000000000..2f0201c0c817
--- /dev/null
+++ b/arch/loongarch/cpu/u-boot.lds
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+OUTPUT_ARCH(loongarch)
+ENTRY(_start)
+
+SECTIONS
+{
+       . = ALIGN(4);
+       .text : {
+               arch/loongarch/cpu/start.o      (.text)
+       }
+
+       /* This needs to come before *(.text*) */
+       .efi_runtime : {
+               __efi_runtime_start = .;
+               *(.text.efi_runtime*)
+               *(.rodata.efi_runtime*)
+               *(.data.efi_runtime*)
+               __efi_runtime_stop = .;
+       }
+
+       .text_rest : {
+               *(.text*)
+       }
+
+       . = ALIGN(4);
+       .rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) }
+
+       . = ALIGN(4);
+       .data : {
+               *(.data*)
+       }
+       . = ALIGN(4);
+
+       .got : {
+               __got_start = .;
+               *(.got.plt) *(.got)
+               __got_end = .;
+       }
+
+       . = ALIGN(4);
+
+       __u_boot_list : {
+               KEEP(*(SORT(__u_boot_list*)));
+       }
+
+       . = ALIGN(8);
+
+       .efi_runtime_rel : {
+               __efi_runtime_rel_start = .;
+               *(.rel*.efi_runtime)
+               *(.rel*.efi_runtime.*)
+               __efi_runtime_rel_stop = .;
+       }
+
+       /DISCARD/ : { *(.rela.plt*) }
+       .rela.dyn : {
+               __rel_dyn_start = .;
+               *(.rela*)
+               __rel_dyn_end = .;
+       }
+
+       . = ALIGN(8);
+
+       .dynsym : {
+               __dyn_sym_start = .;
+               *(.dynsym)
+               __dyn_sym_end = .;
+       }
+
+       . = ALIGN(8);
+
+       _end = .;
+       __init_end = .;
+
+       .bss : {
+               __bss_start = .;
+               *(.bss*)
+               . = ALIGN(8);
+               __bss_end = .;
+       }
+}
diff --git a/arch/loongarch/dts/Makefile b/arch/loongarch/dts/Makefile
new file mode 100644
index 000000000000..a71db58d48a9
--- /dev/null
+++ b/arch/loongarch/dts/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+include $(srctree)/scripts/Makefile.dts
+
+targets += $(dtb-y)
+
+DTC_FLAGS += -R 4 -p 0x1000
+
+PHONY += dtbs
+dtbs: $(addprefix $(obj)/, $(dtb-y))
+       @:
+
+clean-files := *.dtb
diff --git a/arch/loongarch/include/asm/acpi_table.h 
b/arch/loongarch/include/asm/acpi_table.h
new file mode 100644
index 000000000000..db2f644f07af
--- /dev/null
+++ b/arch/loongarch/include/asm/acpi_table.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __ASM_ACPI_TABLE_H__
+#define __ASM_ACPI_TABLE_H__
+
+/* Dummy header */
+
+#endif /* __ASM_ACPI_TABLE_H__ */
diff --git a/arch/loongarch/include/asm/addrspace.h 
b/arch/loongarch/include/asm/addrspace.h
new file mode 100644
index 000000000000..b61be44587e6
--- /dev/null
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1996, 99 Ralf Baechle
+ * Copyright (C) 2000, 2002  Maciej W. Rozycki
+ * Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
+ */
+#ifndef _ASM_ADDRSPACE_H
+#define _ASM_ADDRSPACE_H
+
+#include <linux/const.h>
+#include <linux/sizes.h>
+
+#include <asm/loongarch.h>
+
+#ifndef IO_BASE
+#define IO_BASE                        CSR_DMW0_BASE
+#endif
+
+#ifndef CACHE_BASE
+#define CACHE_BASE             CSR_DMW1_BASE
+#endif
+
+#ifndef UNCACHE_BASE
+#define UNCACHE_BASE           CSR_DMW0_BASE
+#endif
+
+#define DMW_PABITS     48
+#define TO_PHYS_MASK   ((1ULL << DMW_PABITS) - 1)
+
+
+#define TO_PHYS(x)             (               ((x) & TO_PHYS_MASK))
+#define TO_CACHE(x)            (CACHE_BASE   | ((x) & TO_PHYS_MASK))
+#define TO_UNCACHE(x)          (UNCACHE_BASE | ((x) & TO_PHYS_MASK))
+
+#ifdef __ASSEMBLY__
+#define _ATYPE_
+#define _ATYPE32_
+#define _ATYPE64_
+#else
+#define _ATYPE_                __PTRDIFF_TYPE__
+#define _ATYPE32_      int
+#define _ATYPE64_      __s64
+#endif
+
+#ifdef CONFIG_64BIT
+#define _CONST64_(x)   _UL(x)
+#else
+#define _CONST64_(x)   _ULL(x)
+#endif
+
+/*
+ *  32/64-bit LoongArch address spaces
+ */
+#ifdef __ASSEMBLY__
+#define _ACAST32_
+#define _ACAST64_
+#else
+#define _ACAST32_              (_ATYPE_)(_ATYPE32_)    /* widen if necessary */
+#define _ACAST64_              (_ATYPE64_)             /* do _not_ narrow */
+#endif
+
+#ifdef CONFIG_32BIT
+
+#define UVRANGE                        0x00000000
+#define KPRANGE0               0x80000000
+#define KPRANGE1               0xa0000000
+#define KVRANGE                        0xc0000000
+
+#else
+
+#define XUVRANGE               _CONST64_(0x0000000000000000)
+#define XSPRANGE               _CONST64_(0x4000000000000000)
+#define XKPRANGE               _CONST64_(0x8000000000000000)
+#define XKVRANGE               _CONST64_(0xc000000000000000)
+
+#endif
+
+/*
+ * Returns the physical address of a KPRANGEx / XKPRANGE address
+ */
+#define PHYSADDR(a)            ((_ACAST64_(a)) & TO_PHYS_MASK)
+
+#endif /* _ASM_ADDRSPACE_H */
diff --git a/arch/loongarch/include/asm/arch-generic/entry-init.h 
b/arch/loongarch/include/asm/arch-generic/entry-init.h
new file mode 100644
index 000000000000..a618f66d0d7a
--- /dev/null
+++ b/arch/loongarch/include/asm/arch-generic/entry-init.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_ENTRY_INIT_H
+#define __ASM_ENTRY_INIT_H
+
+       .macro  entry_setup
+       .endm
+
+       .macro  smp_secondary_setup
+       .endm
+
+#endif
diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h
new file mode 100644
index 000000000000..ba379dac3d98
--- /dev/null
+++ b/arch/loongarch/include/asm/asm.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Some useful macros for LoongArch assembler code
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2002  Maciej W. Rozycki
+ */
+#ifndef __ASM_ASM_H
+#define __ASM_ASM_H
+
+#include <asm/regdef.h>
+
+/*
+ * Stack alignment
+ */
+#define STACK_ALIGN    ~(0xf)
+
+/*
+ * Macros to handle different pointer/register sizes for 32/64-bit code
+ */
+
+/*
+ * Size of a register
+ */
+#ifndef __loongarch64
+#define SZREG  4
+#else
+#define SZREG  8
+#endif
+
+/*
+ * Use the following macros in assemblercode to load/store registers,
+ * pointers etc.
+ */
+#if (SZREG == 4)
+#define REG_L          ld.w
+#define REG_S          st.w
+#define REG_ADD                add.w
+#define REG_SUB                sub.w
+#else /* SZREG == 8 */
+#define REG_L          ld.d
+#define REG_S          st.d
+#define REG_ADD                add.d
+#define REG_SUB                sub.d
+#endif
+
+/*
+ * How to add/sub/load/store/shift C int variables.
+ */
+#if (__SIZEOF_INT__ == 4)
+#define INT_ADD                add.w
+#define INT_ADDI       addi.w
+#define INT_SUB                sub.w
+#define INT_L          ld.w
+#define INT_S          st.w
+#define INT_SLL                slli.w
+#define INT_SLLV       sll.w
+#define INT_SRL                srli.w
+#define INT_SRLV       srl.w
+#define INT_SRA                srai.w
+#define INT_SRAV       sra.w
+#endif
+
+#if (__SIZEOF_INT__ == 8)
+#define INT_ADD                add.d
+#define INT_ADDI       addi.d
+#define INT_SUB                sub.d
+#define INT_L          ld.d
+#define INT_S          st.d
+#define INT_SLL                slli.d
+#define INT_SLLV       sll.d
+#define INT_SRL                srli.d
+#define INT_SRLV       srl.d
+#define INT_SRA                srai.d
+#define INT_SRAV       sra.d
+#endif
+
+/*
+ * How to add/sub/load/store/shift C long variables.
+ */
+#if (__SIZEOF_LONG__ == 4)
+#define LONG_ADD       add.w
+#define LONG_ADDI      addi.w
+#define LONG_SUB       sub.w
+#define LONG_L         ld.w
+#define LONG_LI                li.w
+#define LONG_S         st.w
+#define LONG_SLL       slli.w
+#define LONG_SLLV      sll.w
+#define LONG_SRL       srli.w
+#define LONG_SRLV      srl.w
+#define LONG_SRA       srai.w
+#define LONG_SRAV      sra.w
+#define LONG_IOCSRRD   iocsrrd.w
+#define LONG_IOCSRWR   iocsrwr.w
+
+#ifdef __ASSEMBLY__
+#define LONG           .word
+#endif
+#define LONGSIZE       4
+#define LONGMASK       3
+#define LONGLOG                2
+#endif
+
+#if (__SIZEOF_LONG__ == 8)
+#define LONG_ADD       add.d
+#define LONG_ADDI      addi.d
+#define LONG_SUB       sub.d
+#define LONG_L         ld.d
+#define LONG_LI                li.d
+#define LONG_S         st.d
+#define LONG_SLL       slli.d
+#define LONG_SLLV      sll.d
+#define LONG_SRL       srli.d
+#define LONG_SRLV      srl.d
+#define LONG_SRA       srai.d
+#define LONG_SRAV      sra.d
+#define LONG_IOCSRRD   iocsrrd.w
+#define LONG_IOCSRWR   iocsrwr.w
+
+#ifdef __ASSEMBLY__
+#define LONG           .dword
+#endif
+#define LONGSIZE       8
+#define LONGMASK       7
+#define LONGLOG                3
+#endif
+
+/*
+ * How to add/sub/load/store/shift pointers.
+ */
+#if (__SIZEOF_POINTER__ == 4)
+#define PTR_ADD                add.w
+#define PTR_ADDI       addi.w
+#define PTR_SUB                sub.w
+#define PTR_L          ld.w
+#define PTR_S          st.w
+#define PTR_LI         li.w
+#define PTR_SLL                slli.w
+#define PTR_SLLV       sll.w
+#define PTR_SRL                srli.w
+#define PTR_SRLV       srl.w
+#define PTR_SRA                srai.w
+#define PTR_SRAV       sra.w
+#define PTR_MUL                mul.w
+
+#define PTR_SCALESHIFT 2
+
+#ifdef __ASSEMBLY__
+#define PTR            .word
+#endif
+#define PTRSIZE                4
+#define PTRLOG         2
+#endif
+
+#if (__SIZEOF_POINTER__ == 8)
+#define PTR_ADD                add.d
+#define PTR_ADDI       addi.d
+#define PTR_SUB                sub.d
+#define PTR_L          ld.d
+#define PTR_S          st.d
+#define PTR_LI         li.d
+#define PTR_SLL                slli.d
+#define PTR_SLLV       sll.d
+#define PTR_SRL                srli.d
+#define PTR_SRLV       srl.d
+#define PTR_SRA                srai.d
+#define PTR_SRAV       sra.d
+#define PTR_MUL                mul.d
+
+#define PTR_SCALESHIFT 3
+
+#ifdef __ASSEMBLY__
+#define PTR            .dword
+#endif
+#define PTRSIZE                8
+#define PTRLOG         3
+#endif
+
+#endif /* __ASM_ASM_H */
diff --git a/arch/loongarch/include/asm/atomic.h 
b/arch/loongarch/include/asm/atomic.h
new file mode 100644
index 000000000000..abd0b6f5f342
--- /dev/null
+++ b/arch/loongarch/include/asm/atomic.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __LOONGARCH_ATOMIC_H
+#define __LOONGARCH_ATOMIC_H
+
+#include <asm/system.h>
+#include <asm-generic/atomic.h>
+
+#endif
diff --git a/arch/loongarch/include/asm/barrier.h 
b/arch/loongarch/include/asm/barrier.h
new file mode 100644
index 000000000000..952222116f50
--- /dev/null
+++ b/arch/loongarch/include/asm/barrier.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+/*
+ * Hint encoding:
+ *
+ * Bit4: ordering or completion (0: completion, 1: ordering)
+ * Bit3: barrier for previous read (0: true, 1: false)
+ * Bit2: barrier for previous write (0: true, 1: false)
+ * Bit1: barrier for succeeding read (0: true, 1: false)
+ * Bit0: barrier for succeeding write (0: true, 1: false)
+ *
+ * Hint 0x700: barrier for "read after read" from the same address
+ */
+
+#define DBAR(hint) __asm__ __volatile__("dbar %0 " : : "I"(hint) : "memory")
+
+#define crwrw          0b00000
+#define cr_r_          0b00101
+#define c_w_w          0b01010
+
+#define orwrw          0b10000
+#define or_r_          0b10101
+#define o_w_w          0b11010
+
+#define orw_w          0b10010
+#define or_rw          0b10100
+
+#define c_sync()       DBAR(crwrw)
+#define c_rsync()      DBAR(cr_r_)
+#define c_wsync()      DBAR(c_w_w)
+
+#define o_sync()       DBAR(orwrw)
+#define o_rsync()      DBAR(or_r_)
+#define o_wsync()      DBAR(o_w_w)
+
+#define ldacq_mb()     DBAR(or_rw)
+#define strel_mb()     DBAR(orw_w)
+
+#define mb()           c_sync()
+#define rmb()          c_rsync()
+#define wmb()          c_wsync()
+#define iob()          c_sync()
+#define wbflush()      c_sync()
+
+#define __smp_mb()     o_sync()
+#define __smp_rmb()    o_rsync()
+#define __smp_wmb()    o_wsync()
+
+#ifdef CONFIG_SMP
+#define __WEAK_LLSC_MB         "  dbar 0x700      \n"
+#else
+#define __WEAK_LLSC_MB         "                  \n"
+#endif
+
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
+
+/**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 
otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ *
+ * Returns:
+ *     0 - (@index < @size)
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+                                                   unsigned long size)
+{
+       unsigned long mask;
+
+       __asm__ __volatile__(
+               "sltu      %0, %1, %2\n\t"
+#if (__SIZEOF_LONG__ == 4)
+               "sub.w     %0, $zero, %0\n\t"
+#elif (__SIZEOF_LONG__ == 8)
+               "sub.d     %0, $zero, %0\n\t"
+#endif
+               : "=r" (mask)
+               : "r" (index), "r" (size)
+               :);
+
+       return mask;
+}
+
+#define __smp_load_acquire(p)                          \
+({                                                     \
+       typeof(*p) ___p1 = READ_ONCE(*p);               \
+       compiletime_assert_atomic_type(*p);             \
+       ldacq_mb();                                     \
+       ___p1;                                          \
+})
+
+#define __smp_store_release(p, v)                      \
+do {                                                   \
+       compiletime_assert_atomic_type(*p);             \
+       strel_mb();                                     \
+       WRITE_ONCE(*p, v);                              \
+} while (0)
+
+#define __smp_store_mb(p, v)                                                   
\
+do {                                                                           
\
+       union { typeof(p) __val; char __c[1]; } __u =                           
\
+               { .__val = (__force typeof(p)) (v) };                           
\
+       unsigned long __tmp;                                                    
\
+       switch (sizeof(p)) {                                                    
\
+       case 1:                                                                 
\
+               *(volatile __u8 *)&p = *(__u8 *)__u.__c;                    \
+               __smp_mb();                                                     
\
+               break;                                                          
\
+       case 2:                                                                 
\
+               *(volatile __u16 *)&p = *(__u16 *)__u.__c;                  \
+               __smp_mb();                                                     
\
+               break;                                                          
\
+       case 4:                                                                 
\
+               __asm__ __volatile__(                                           
\
+               "amswap_db.w %[tmp], %[val], %[mem]        \n"                  
      \
+               : [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp)                 
    \
+               : [val] "r" (*(__u32 *)__u.__c)                                 
      \
+               : );                                                            
\
+               break;                                                          
\
+       case 8:                                                                 
\
+               __asm__ __volatile__(                                           
\
+               "amswap_db.d %[tmp], %[val], %[mem]        \n"                  
      \
+               : [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp)                 
    \
+               : [val] "r" (*(__u64 *)__u.__c)                                 
      \
+               : );                                                            
\
+               break;                                                          
\
+       }                                                                       
\
+} while (0)
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/loongarch/include/asm/bitops.h 
b/arch/loongarch/include/asm/bitops.h
new file mode 100644
index 000000000000..a5819aa90ced
--- /dev/null
+++ b/arch/loongarch/include/asm/bitops.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 1995, Russell King.
+ * Various bits and pieces copyrights include:
+ * Linus Torvalds (test_bit).
+ *
+ * Copyright (C) 2017 Andes Technology Corporation
+ * Rick Chen, Andes Technology Corporation <r...@andestech.com>
+ *
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ *
+ * Please note that the code in this file should never be included
+ * from user space.  Many of these are not implemented in assembler
+ * since they would be too costly.  Also, they require priviledged
+ * instructions (which are not available from user mode) to ensure
+ * that they are atomic.
+ */
+
+#ifndef __ASM_LOONGARCH_BITOPS_H
+#define __ASM_LOONGARCH_BITOPS_H
+
+#ifdef __KERNEL__
+
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/builtin-ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/fls64.h>
+
+#define PLATFORM_FFS
+
+static inline void __change_bit(int nr, void *addr)
+{
+       int mask;
+       unsigned long *ADDR = (unsigned long *)addr;
+
+       ADDR += nr >> 5;
+       mask = 1 << (nr & 31);
+       *ADDR ^= mask;
+}
+
+static inline int __test_and_set_bit(int nr, void *addr)
+{
+       int mask, retval;
+       unsigned int *a = (unsigned int *)addr;
+
+       a += nr >> 5;
+       mask = 1 << (nr & 0x1f);
+       retval = (mask & *a) != 0;
+       *a |= mask;
+       return retval;
+}
+
+static inline int __test_and_clear_bit(int nr, void *addr)
+{
+       int mask, retval;
+       unsigned int *a = (unsigned int *)addr;
+
+       a += nr >> 5;
+       mask = 1 << (nr & 0x1f);
+       retval = (mask & *a) != 0;
+       *a &= ~mask;
+       return retval;
+}
+
+static inline int __test_and_change_bit(int nr, void *addr)
+{
+       int mask, retval;
+       unsigned int *a = (unsigned int *)addr;
+
+       a += nr >> 5;
+       mask = 1 << (nr & 0x1f);
+       retval = (mask & *a) != 0;
+       *a ^= mask;
+       return retval;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int test_bit(int nr, const void *addr)
+{
+       return ((unsigned char *)addr)[nr >> 3] & (1U << (nr & 7));
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+       int k;
+
+       word = ~word;
+       k = 31;
+       if (word & 0x0000ffff) {
+               k -= 16; word <<= 16;
+       }
+       if (word & 0x00ff0000) {
+               k -= 8;  word <<= 8;
+       }
+       if (word & 0x0f000000) {
+               k -= 4;  word <<= 4;
+       }
+       if (word & 0x30000000) {
+               k -= 2;  word <<= 2;
+       }
+       if (word & 0x40000000)
+               k -= 1;
+
+       return k;
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+/*
+ * redefined in include/linux/bitops.h
+ * #define ffs(x) generic_ffs(x)
+ */
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#define test_and_set_bit               __test_and_set_bit
+#define test_and_clear_bit             __test_and_clear_bit
+
+#define ext2_set_bit                   test_and_set_bit
+#define ext2_clear_bit                 test_and_clear_bit
+#define ext2_test_bit                  test_bit
+#define ext2_find_first_zero_bit       find_first_zero_bit
+#define ext2_find_next_zero_bit                find_next_zero_bit
+
+/* Bitmap functions for the minix filesystem. */
+#define minix_test_and_set_bit(nr, addr)       test_and_set_bit(nr, addr)
+#define minix_set_bit(nr, addr)                        set_bit(nr, addr)
+#define minix_test_and_clear_bit(nr, addr)     test_and_clear_bit(nr, addr)
+#define minix_test_bit(nr, addr)               test_bit(nr, addr)
+#define minix_find_first_zero_bit(addr, size)  find_first_zero_bit(addr, size)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_LOONGARCH_BITOPS_H */
diff --git a/arch/loongarch/include/asm/byteorder.h 
b/arch/loongarch/include/asm/byteorder.h
new file mode 100644
index 000000000000..ba25f25729ac
--- /dev/null
+++ b/arch/loongarch/include/asm/byteorder.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LOONGARCH_BYTEORDER_H
+#define __ASM_LOONGARCH_BYTEORDER_H
+
+#include <asm/types.h>
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#  define __BYTEORDER_HAS_U64__
+#  define __SWAB_64_THRU_32__
+#endif
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#include <linux/byteorder/little_endian.h>
+#else
+#include <linux/byteorder/big_endian.h>
+#endif
+
+#endif
diff --git a/arch/loongarch/include/asm/cache.h 
b/arch/loongarch/include/asm/cache.h
new file mode 100644
index 000000000000..854dd0c0a02e
--- /dev/null
+++ b/arch/loongarch/include/asm/cache.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef _ASM_LOONGARCH_CACHE_H
+#define _ASM_LOONGARCH_CACHE_H
+
+/* cache */
+void cache_flush(void);
+
+#define cache_op(op, addr)                                             \
+       __asm__ __volatile__(                                           \
+       "  cacop   %0, %1                                  \n"        \
+       :                                                               \
+       : "i" (op), "ZC" (*(unsigned char *)(addr)))
+
+#ifdef CONFIG_SYS_CACHELINE_SIZE
+#define ARCH_DMA_MINALIGN      CONFIG_SYS_CACHELINE_SIZE
+#else
+#define ARCH_DMA_MINALIGN      32
+#endif
+
+#endif /* _ASM_LOONGARCH_CACHE_H */
diff --git a/arch/loongarch/include/asm/config.h 
b/arch/loongarch/include/asm/config.h
new file mode 100644
index 000000000000..23eb49847e7b
--- /dev/null
+++ b/arch/loongarch/include/asm/config.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef _ASM_CONFIG_H_
+#define _ASM_CONFIG_H_
+
+/* Dummy header */
+
+#endif
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
new file mode 100644
index 000000000000..e65ef273ed46
--- /dev/null
+++ b/arch/loongarch/include/asm/cpu.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cpu.h: Values of the PRID register used to match up
+ *       various LoongArch CPU types.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+#include <linux/bitops.h>
+
+/*
+ * As described in LoongArch specs from Loongson Technology, the PRID register
+ * (CPUCFG.00) has the following layout:
+ *
+ * +---------------+----------------+------------+--------------------+
+ * | Reserved      | Company ID     | Series ID  |  Product ID        |
+ * +---------------+----------------+------------+--------------------+
+ *  31          24 23            16 15        12 11                 0
+ */
+
+/*
+ * Assigned Company values for bits 23:16 of the PRID register.
+ */
+
+#define PRID_COMP_MASK         0xff0000
+
+#define PRID_COMP_LOONGSON     0x140000
+
+/*
+ * Assigned Series ID values for bits 15:12 of the PRID register. In order
+ * to detect a certain CPU type exactly eventually additional registers may
+ * need to be examined.
+ */
+
+#define PRID_SERIES_MASK       0xf000
+
+#define PRID_SERIES_LA132      0x8000  /* Loongson 32bit */
+#define PRID_SERIES_LA264      0xa000  /* Loongson 64bit, 2-issue */
+#define PRID_SERIES_LA364      0xb000  /* Loongson 64bit, 3-issue */
+#define PRID_SERIES_LA464      0xc000  /* Loongson 64bit, 4-issue */
+#define PRID_SERIES_LA664      0xd000  /* Loongson 64bit, 6-issue */
+
+/*
+ * Particular Product ID values for bits 11:0 of the PRID register.
+ */
+
+#define PRID_PRODUCT_MASK      0x0fff
+
+
+/*
+ * ISA Level encodings
+ *
+ */
+
+#define LOONGARCH_CPU_ISA_LA32R 0x00000001
+#define LOONGARCH_CPU_ISA_LA32S 0x00000002
+#define LOONGARCH_CPU_ISA_LA64  0x00000004
+
+#define LOONGARCH_CPU_ISA_32BIT (LOONGARCH_CPU_ISA_LA32R | 
LOONGARCH_CPU_ISA_LA32S)
+#define LOONGARCH_CPU_ISA_64BIT LOONGARCH_CPU_ISA_LA64
+
+/*
+ * CPU Option encodings
+ */
+#define CPU_FEATURE_CPUCFG             0       /* CPU has CPUCFG */
+#define CPU_FEATURE_LAM                        1       /* CPU has Atomic 
instructions */
+#define CPU_FEATURE_UAL                        2       /* CPU supports 
unaligned access */
+#define CPU_FEATURE_FPU                        3       /* CPU has FPU */
+#define CPU_FEATURE_LSX                        4       /* CPU has LSX (128-bit 
SIMD) */
+#define CPU_FEATURE_LASX               5       /* CPU has LASX (256-bit SIMD) 
*/
+#define CPU_FEATURE_CRC32              6       /* CPU has CRC32 instructions */
+#define CPU_FEATURE_COMPLEX            7       /* CPU has Complex instructions 
*/
+#define CPU_FEATURE_CRYPTO             8       /* CPU has Crypto instructions 
*/
+#define CPU_FEATURE_LVZ                        9       /* CPU has 
Virtualization extension */
+#define CPU_FEATURE_LBT_X86            10      /* CPU has X86 Binary 
Translation */
+#define CPU_FEATURE_LBT_ARM            11      /* CPU has ARM Binary 
Translation */
+#define CPU_FEATURE_LBT_MIPS           12      /* CPU has MIPS Binary 
Translation */
+#define CPU_FEATURE_TLB                        13      /* CPU has TLB */
+#define CPU_FEATURE_CSR                        14      /* CPU has CSR */
+#define CPU_FEATURE_WATCH              15      /* CPU has watchpoint registers 
*/
+#define CPU_FEATURE_VINT               16      /* CPU has vectored interrupts 
*/
+#define CPU_FEATURE_CSRIPI             17      /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI             18      /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH           19      /* CPU has prefetch 
instructions */
+#define CPU_FEATURE_PMP                        20      /* CPU has perfermance 
counter */
+#define CPU_FEATURE_SCALEFREQ          21      /* CPU supports cpufreq scaling 
*/
+#define CPU_FEATURE_FLATMODE           22      /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE          23      /* CPU has EXTIOI interrupt pin 
decode mode */
+#define CPU_FEATURE_GUESTID            24      /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR         25      /* CPU has hypervisor (running 
in VM) */
+#define CPU_FEATURE_PTW                        26      /* CPU has hardware 
page table walker */
+
+#define LOONGARCH_CPU_CPUCFG           BIT_ULL(CPU_FEATURE_CPUCFG)
+#define LOONGARCH_CPU_LAM              BIT_ULL(CPU_FEATURE_LAM)
+#define LOONGARCH_CPU_UAL              BIT_ULL(CPU_FEATURE_UAL)
+#define LOONGARCH_CPU_FPU              BIT_ULL(CPU_FEATURE_FPU)
+#define LOONGARCH_CPU_LSX              BIT_ULL(CPU_FEATURE_LSX)
+#define LOONGARCH_CPU_LASX             BIT_ULL(CPU_FEATURE_LASX)
+#define LOONGARCH_CPU_CRC32            BIT_ULL(CPU_FEATURE_CRC32)
+#define LOONGARCH_CPU_COMPLEX          BIT_ULL(CPU_FEATURE_COMPLEX)
+#define LOONGARCH_CPU_CRYPTO           BIT_ULL(CPU_FEATURE_CRYPTO)
+#define LOONGARCH_CPU_LVZ              BIT_ULL(CPU_FEATURE_LVZ)
+#define LOONGARCH_CPU_LBT_X86          BIT_ULL(CPU_FEATURE_LBT_X86)
+#define LOONGARCH_CPU_LBT_ARM          BIT_ULL(CPU_FEATURE_LBT_ARM)
+#define LOONGARCH_CPU_LBT_MIPS         BIT_ULL(CPU_FEATURE_LBT_MIPS)
+#define LOONGARCH_CPU_TLB              BIT_ULL(CPU_FEATURE_TLB)
+#define LOONGARCH_CPU_CSR              BIT_ULL(CPU_FEATURE_CSR)
+#define LOONGARCH_CPU_WATCH            BIT_ULL(CPU_FEATURE_WATCH)
+#define LOONGARCH_CPU_VINT             BIT_ULL(CPU_FEATURE_VINT)
+#define LOONGARCH_CPU_CSRIPI           BIT_ULL(CPU_FEATURE_CSRIPI)
+#define LOONGARCH_CPU_EXTIOI           BIT_ULL(CPU_FEATURE_EXTIOI)
+#define LOONGARCH_CPU_PREFETCH         BIT_ULL(CPU_FEATURE_PREFETCH)
+#define LOONGARCH_CPU_PMP              BIT_ULL(CPU_FEATURE_PMP)
+#define LOONGARCH_CPU_SCALEFREQ                BIT_ULL(CPU_FEATURE_SCALEFREQ)
+#define LOONGARCH_CPU_FLATMODE         BIT_ULL(CPU_FEATURE_FLATMODE)
+#define LOONGARCH_CPU_EIODECODE                BIT_ULL(CPU_FEATURE_EIODECODE)
+#define LOONGARCH_CPU_GUESTID          BIT_ULL(CPU_FEATURE_GUESTID)
+#define LOONGARCH_CPU_HYPERVISOR       BIT_ULL(CPU_FEATURE_HYPERVISOR)
+#define LOONGARCH_CPU_PTW              BIT_ULL(CPU_FEATURE_PTW)
+
+#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/dma-mapping.h 
b/arch/loongarch/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..87088815b955
--- /dev/null
+++ b/arch/loongarch/include/asm/dma-mapping.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LOONGARCH_DMA_MAPPING_H
+#define __ASM_LOONGARCH_DMA_MAPPING_H
+
+#include <linux/types.h>
+#include <asm/cache.h>
+#include <cpu_func.h>
+#include <linux/dma-direction.h>
+#include <malloc.h>
+
+static inline void *dma_alloc_coherent(size_t len, unsigned long *handle)
+{
+       /* TODO:For non-coherent system allocate from DMW1 */
+       *handle = (unsigned long)memalign(ARCH_DMA_MINALIGN, len);
+       return (void *)*handle;
+}
+
+static inline void dma_free_coherent(void *addr)
+{
+       free(addr);
+}
+
+#endif
diff --git a/arch/loongarch/include/asm/global_data.h 
b/arch/loongarch/include/asm/global_data.h
new file mode 100644
index 000000000000..95b5f45bce2f
--- /dev/null
+++ b/arch/loongarch/include/asm/global_data.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2002
+ * Wolfgang Denk, DENX Software Engineering, w...@denx.de.
+ *
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef        __ASM_GBL_DATA_H
+#define __ASM_GBL_DATA_H
+
+#include <linux/types.h>
+#include <asm/u-boot.h>
+#include <compiler.h>
+
+/* Architecture-specific global data */
+struct arch_global_data {
+#if CONFIG_IS_ENABLED(ACPI)
+       ulong table_start;              /* Start address of ACPI tables */
+       ulong table_end;                /* End address of ACPI tables */
+       ulong table_start_high;         /* Start address of high ACPI tables */
+       ulong table_end_high;           /* End address of high ACPI tables */
+#endif
+#ifdef CONFIG_SMBIOS
+       ulong smbios_start;             /* Start address of SMBIOS table */
+#endif
+};
+
+#include <asm-generic/global_data.h>
+
+/* GD is stored in u0 (per CPU pointer) */
+#define DECLARE_GLOBAL_DATA_PTR register gd_t *gd asm ("$r21")
+
+static inline void set_gd(volatile gd_t *gd_ptr)
+{
+#ifdef CONFIG_64BIT
+       asm volatile("ld.d $r21, %0\n" : : "m"(gd_ptr));
+#else
+       asm volatile("ld.w $r21, %0\n" : : "m"(gd_ptr));
+#endif
+}
+
+#endif /* __ASM_GBL_DATA_H */
diff --git a/arch/loongarch/include/asm/gpio.h 
b/arch/loongarch/include/asm/gpio.h
new file mode 100644
index 000000000000..b2508fc2e9f8
--- /dev/null
+++ b/arch/loongarch/include/asm/gpio.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_GPIO_H
+#define __ASM_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+#endif
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
new file mode 100644
index 000000000000..1fd6ccd9f9a7
--- /dev/null
+++ b/arch/loongarch/include/asm/io.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Andes Technology Corporation
+ * Rick Chen, Andes Technology Corporation <r...@andestech.com>
+ *
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LOONGARCH_IO_H
+#define __ASM_LOONGARCH_IO_H
+
+#include <linux/types.h>
+#include <asm/addrspace.h>
+#include <asm/barrier.h>
+#include <asm/byteorder.h>
+
+static inline void sync(void)
+{
+}
+
+#define __arch_getb(a)                 (*(volatile unsigned char *)(a))
+#define __arch_getw(a)                 (*(volatile unsigned short *)(a))
+#define __arch_getl(a)                 (*(volatile unsigned int *)(a))
+#define __arch_getq(a)                 (*(volatile unsigned long long *)(a))
+
+#define __arch_putb(v, a)              (*(volatile unsigned char *)(a) = (v))
+#define __arch_putw(v, a)              (*(volatile unsigned short *)(a) = (v))
+#define __arch_putl(v, a)              (*(volatile unsigned int *)(a) = (v))
+#define __arch_putq(v, a)              (*(volatile unsigned long long *)(a) = 
(v))
+
+#define __raw_writeb(v, a)             __arch_putb(v, a)
+#define __raw_writew(v, a)             __arch_putw(v, a)
+#define __raw_writel(v, a)             __arch_putl(v, a)
+#define __raw_writeq(v, a)             __arch_putq(v, a)
+
+#define __raw_readb(a)                 __arch_getb(a)
+#define __raw_readw(a)                 __arch_getw(a)
+#define __raw_readl(a)                 __arch_getl(a)
+#define __raw_readq(a)                 __arch_getq(a)
+
+/* adding for cadence_qspi_apb.c */
+#define memcpy_fromio(a, c, l)         memcpy((a), (c), (l))
+#define memcpy_toio(c, a, l)           memcpy((c), (a), (l))
+
+#define dmb()          mb()
+#define __iormb()      rmb()
+#define __iowmb()      wmb()
+
+static inline void writeb(u8 val, volatile void __iomem *addr)
+{
+       __iowmb();
+       __arch_putb(val, addr);
+}
+
+static inline void writew(u16 val, volatile void __iomem *addr)
+{
+       __iowmb();
+       __arch_putw(val, addr);
+}
+
+static inline void writel(u32 val, volatile void __iomem *addr)
+{
+       __iowmb();
+       __arch_putl(val, addr);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+       __iowmb();
+       __arch_putq(val, addr);
+}
+
+static inline u8 readb(const volatile void __iomem *addr)
+{
+       u8      val;
+
+       val = __arch_getb(addr);
+       __iormb();
+       return val;
+}
+
+static inline u16 readw(const volatile void __iomem *addr)
+{
+       u16     val;
+
+       val = __arch_getw(addr);
+       __iormb();
+       return val;
+}
+
+static inline u32 readl(const volatile void __iomem *addr)
+{
+       u32     val;
+
+       val = __arch_getl(addr);
+       __iormb();
+       return val;
+}
+
+static inline u64 readq(const volatile void __iomem *addr)
+{
+       u64     val;
+
+       val = __arch_getq(addr);
+       __iormb();
+       return val;
+}
+
+/*
+ * The compiler seems to be incapable of optimising constants
+ * properly.  Spell it out to the compiler in some cases.
+ * These are only valid for small values of "off" (< 1<<12)
+ */
+#define __raw_base_writeb(val, base, off)      __arch_base_putb(val, base, off)
+#define __raw_base_writew(val, base, off)      __arch_base_putw(val, base, off)
+#define __raw_base_writel(val, base, off)      __arch_base_putl(val, base, off)
+
+#define __raw_base_readb(base, off)    __arch_base_getb(base, off)
+#define __raw_base_readw(base, off)    __arch_base_getw(base, off)
+#define __raw_base_readl(base, off)    __arch_base_getl(base, off)
+
+#define out_arch(type, endian, a, v)   __raw_write##type(cpu_to_##endian(v), a)
+#define in_arch(type, endian, a)       endian##_to_cpu(__raw_read##type(a))
+
+#define out_le32(a, v)                 out_arch(l, le32, a, v)
+#define out_le16(a, v)                 out_arch(w, le16, a, v)
+
+#define in_le32(a)                     in_arch(l, le32, a)
+#define in_le16(a)                     in_arch(w, le16, a)
+
+#define out_be32(a, v)                 out_arch(l, be32, a, v)
+#define out_be16(a, v)                 out_arch(w, be16, a, v)
+
+#define in_be32(a)                     in_arch(l, be32, a)
+#define in_be16(a)                     in_arch(w, be16, a)
+
+#define out_8(a, v)                    __raw_writeb(v, a)
+#define in_8(a)                                __raw_readb(a)
+
+/*
+ * Clear and set bits in one shot. These macros can be used to clear and
+ * set multiple bits in a register using a single call. These macros can
+ * also be used to set a multiple-bit bit pattern using a mask, by
+ * specifying the mask in the 'clear' parameter and the new bit pattern
+ * in the 'set' parameter.
+ */
+
+#define clrbits(type, addr, clear) \
+       out_##type((addr), in_##type(addr) & ~(clear))
+
+#define setbits(type, addr, set) \
+       out_##type((addr), in_##type(addr) | (set))
+
+#define clrsetbits(type, addr, clear, set) \
+       out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
+#define setbits_be32(addr, set) setbits(be32, addr, set)
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+
+#define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
+#define setbits_le32(addr, set) setbits(le32, addr, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+
+#define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
+#define setbits_be16(addr, set) setbits(be16, addr, set)
+#define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
+
+#define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
+#define setbits_le16(addr, set) setbits(le16, addr, set)
+#define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
+
+#define clrbits_8(addr, clear) clrbits(8, addr, clear)
+#define setbits_8(addr, set) setbits(8, addr, set)
+#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
+
+
+/*
+ *  IO port access primitives
+ *  -------------------------
+ *
+ * LoongArch doesn't have special IO access instructions just like
+ * ARM and RISC-V all IO is either memory mapped or IOCSR mapped.
+ *
+ * Note that these are defined to perform little endian accesses
+ * only.  Their primary purpose is to access PCI and ISA peripherals.
+ */
+#ifdef __io
+#define outb(v, p)                     __raw_writeb(v, __io(p))
+#define outw(v, p)                     __raw_writew(cpu_to_le16(v), __io(p))
+#define outl(v, p)                     __raw_writel(cpu_to_le32(v), __io(p))
+
+#define inb(p) ({ unsigned int __v = __raw_readb(__io(p)); __v; })
+#define inw(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; })
+#define inl(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; })
+
+#define outsb(p, d, l)                 writesb(__io(p), d, l)
+#define outsw(p, d, l)                 writesw(__io(p), d, l)
+#define outsl(p, d, l)                 writesl(__io(p), d, l)
+
+#define insb(p, d, l)                  readsb(__io(p), d, l)
+#define insw(p, d, l)                  readsw(__io(p), d, l)
+#define insl(p, d, l)                  readsl(__io(p), d, l)
+
+static inline void readsb(const volatile void __iomem *addr, void *data,
+                         unsigned int bytelen)
+{
+       unsigned char *ptr;
+       unsigned char *ptr2;
+
+       ptr = (unsigned char *)addr;
+       ptr2 = (unsigned char *)data;
+
+       while (bytelen) {
+               *ptr2 = *ptr;
+               ptr2++;
+               bytelen--;
+       }
+}
+
+static inline void readsw(const volatile void __iomem *addr, void *data,
+                         unsigned int wordlen)
+{
+       unsigned short *ptr;
+       unsigned short *ptr2;
+
+       ptr = (unsigned short *)addr;
+       ptr2 = (unsigned short *)data;
+
+       while (wordlen) {
+               *ptr2 = *ptr;
+               ptr2++;
+               wordlen--;
+       }
+}
+
+static inline void readsl(const volatile void __iomem *addr, void *data,
+                         unsigned int longlen)
+{
+       unsigned int *ptr;
+       unsigned int *ptr2;
+
+       ptr = (unsigned int *)addr;
+       ptr2 = (unsigned int *)data;
+
+       while (longlen) {
+               *ptr2 = *ptr;
+               ptr2++;
+               longlen--;
+       }
+}
+
+static inline void writesb(volatile void __iomem *addr, const void *data,
+                          unsigned int bytelen)
+{
+       unsigned char *ptr;
+       unsigned char *ptr2;
+
+       ptr = (unsigned char *)addr;
+       ptr2 = (unsigned char *)data;
+
+       while (bytelen) {
+               *ptr = *ptr2;
+               ptr2++;
+               bytelen--;
+       }
+}
+
+static inline void writesw(volatile void __iomem *addr, const void *data,
+                          unsigned int wordlen)
+{
+       unsigned short *ptr;
+       unsigned short *ptr2;
+
+       ptr = (unsigned short *)addr;
+       ptr2 = (unsigned short *)data;
+
+       while (wordlen) {
+               *ptr = *ptr2;
+               ptr2++;
+               wordlen--;
+       }
+}
+
+static inline void writesl(volatile void __iomem *addr, const void *data,
+                          unsigned int longlen)
+{
+       unsigned int *ptr;
+       unsigned int *ptr2;
+
+       ptr = (unsigned int *)addr;
+       ptr2 = (unsigned int *)data;
+
+       while (longlen) {
+               *ptr = *ptr2;
+               ptr2++;
+               longlen--;
+       }
+}
+
+#define readsb readsb
+#define readsw readsw
+#define readsl readsl
+#define writesb writesb
+#define writesw writesw
+#define writesl writesl
+
+#endif
+
+#define outb_p(val, port)              outb((val), (port))
+#define outw_p(val, port)              outw((val), (port))
+#define outl_p(val, port)              outl((val), (port))
+#define inb_p(port)                    inb((port))
+#define inw_p(port)                    inw((port))
+#define inl_p(port)                    inl((port))
+
+#define outsb_p(port, from, len)       outsb(port, from, len)
+#define outsw_p(port, from, len)       outsw(port, from, len)
+#define outsl_p(port, from, len)       outsl(port, from, len)
+#define insb_p(port, to, len)          insb(port, to, len)
+#define insw_p(port, to, len)          insw(port, to, len)
+#define insl_p(port, to, len)          insl(port, to, len)
+
+/*
+ * Unordered I/O memory access primitives.  These are even more relaxed than
+ * the relaxed versions, as they don't even order accesses between successive
+ * operations to the I/O regions.
+ */
+#define readb_cpu(c)           ({ u8  __r = __raw_readb(c); __r; })
+#define readw_cpu(c)           ({ u16 __r = le16_to_cpu((__force 
__le16)__raw_readw(c)); __r; })
+#define readl_cpu(c)           ({ u32 __r = le32_to_cpu((__force 
__le32)__raw_readl(c)); __r; })
+
+#define writeb_cpu(v, c)       ((void)__raw_writeb((v), (c)))
+#define writew_cpu(v, c)       ((void)__raw_writew((__force 
u16)cpu_to_le16(v), (c)))
+#define writel_cpu(v, c)       ((void)__raw_writel((__force 
u32)cpu_to_le32(v), (c)))
+
+#ifdef CONFIG_64BIT
+#define readq_cpu(c)           ({ u64 __r = le64_to_cpu((__force 
__le64)__raw_readq(c)); __r; })
+#define writeq_cpu(v, c)       ((void)__raw_writeq((__force 
u64)cpu_to_le64(v), (c)))
+#endif
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses.  These are defined to order the indicated access (either a read or
+ * write) with all other I/O memory accesses to the same peripheral. Since the
+ * platform specification defines that all I/O regions are strongly ordered on
+ * channel 0, no explicit fences are required to enforce this ordering.
+ */
+/* FIXME: These are now the same as asm-generic */
+#define __io_rbr()             do {} while (0)
+#define __io_rar()             do {} while (0)
+#define __io_rbw()             do {} while (0)
+#define __io_raw()             do {} while (0)
+
+#define readb_relaxed(c)       ({ u8  __v; __io_rbr(); __v = readb_cpu(c); 
__io_rar(); __v; })
+#define readw_relaxed(c)       ({ u16 __v; __io_rbr(); __v = readw_cpu(c); 
__io_rar(); __v; })
+#define readl_relaxed(c)       ({ u32 __v; __io_rbr(); __v = readl_cpu(c); 
__io_rar(); __v; })
+
+#define writeb_relaxed(v, c)   ({ __io_rbw(); writeb_cpu((v), (c)); 
__io_raw(); })
+#define writew_relaxed(v, c)   ({ __io_rbw(); writew_cpu((v), (c)); 
__io_raw(); })
+#define writel_relaxed(v, c)   ({ __io_rbw(); writel_cpu((v), (c)); 
__io_raw(); })
+
+#ifdef CONFIG_64BIT
+#define readq_relaxed(c)       ({ u64 __v; __io_rbr(); __v = readq_cpu(c); 
__io_rar(); __v; })
+#define writeq_relaxed(v, c)   ({ __io_rbw(); writeq_cpu((v), (c)); 
__io_raw(); })
+#endif
+
+static inline phys_addr_t virt_to_phys(volatile const void *address)
+{
+       return TO_PHYS((unsigned long)address);
+}
+#define virt_to_phys virt_to_phys
+
+static inline void *phys_to_virt(phys_addr_t address)
+{
+       /* Assume it is always for U-Boot memory access */
+       return (void *)(address);
+}
+#define phys_to_virt phys_to_virt
+
+/* These two needs to be uncaced */
+#define MAP_NOCACHE            1
+#define MAP_WRCOMBINE  MAP_NOCACHE
+
+static inline void *map_physmem(phys_addr_t paddr, unsigned long len,
+                               unsigned long flags)
+{
+       if (flags == MAP_NOCACHE)
+               return (void *)TO_UNCACHE(paddr);
+
+       /* Assume cached mapping is always for U-Boot memory access */
+       return (void *)(paddr);
+}
+#define map_physmem map_physmem
+
+#include <asm-generic/io.h>
+
+#endif /* __ASM_LOONGARCH_IO_H */
diff --git a/arch/loongarch/include/asm/linkage.h 
b/arch/loongarch/include/asm/linkage.h
new file mode 100644
index 000000000000..f004bdd8efe3
--- /dev/null
+++ b/arch/loongarch/include/asm/linkage.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+/* Dummy header */
+
+#endif
diff --git a/arch/loongarch/include/asm/loongarch.h 
b/arch/loongarch/include/asm/loongarch.h
new file mode 100644
index 000000000000..f6c8cc372349
--- /dev/null
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -0,0 +1,1468 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+#ifndef _ASM_LOONGARCH_H
+#define _ASM_LOONGARCH_H
+
+#include <linux/bitops.h>
+#include <linux/const.h>
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <larchintrin.h>
+
+/* CPUCFG */
+#define read_cpucfg(reg) __cpucfg(reg)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ *  Configure language
+ */
+#ifdef __ASSEMBLY__
+#define _ULCAST_
+#define _U64CAST_
+#else
+#define _ULCAST_ (unsigned long)
+#define _U64CAST_ (u64)
+#endif
+
+#ifdef __ASSEMBLY__
+
+/* LoongArch Registers */
+#define REG_ZERO       0x0
+#define REG_RA         0x1
+#define REG_TP         0x2
+#define REG_SP         0x3
+#define REG_A0         0x4 /* Reused as V0 for return value */
+#define REG_A1         0x5 /* Reused as V1 for return value */
+#define REG_A2         0x6
+#define REG_A3         0x7
+#define REG_A4         0x8
+#define REG_A5         0x9
+#define REG_A6         0xa
+#define REG_A7         0xb
+#define REG_T0         0xc
+#define REG_T1         0xd
+#define REG_T2         0xe
+#define REG_T3         0xf
+#define REG_T4         0x10
+#define REG_T5         0x11
+#define REG_T6         0x12
+#define REG_T7         0x13
+#define REG_T8         0x14
+#define REG_U0         0x15 /* Kernel uses it as percpu base */
+#define REG_FP         0x16
+#define REG_S0         0x17
+#define REG_S1         0x18
+#define REG_S2         0x19
+#define REG_S3         0x1a
+#define REG_S4         0x1b
+#define REG_S5         0x1c
+#define REG_S6         0x1d
+#define REG_S7         0x1e
+#define REG_S8         0x1f
+
+#endif /* __ASSEMBLY__ */
+
+/* Bit fields for CPUCFG registers */
+#define LOONGARCH_CPUCFG0              0x0
+#define  CPUCFG0_PRID                  GENMASK(31, 0)
+
+#define LOONGARCH_CPUCFG1              0x1
+#define  CPUCFG1_ISGR32                        BIT(0)
+#define  CPUCFG1_ISGR64                        BIT(1)
+#define  CPUCFG1_PAGING                        BIT(2)
+#define  CPUCFG1_IOCSR                 BIT(3)
+#define  CPUCFG1_PABITS                        GENMASK(11, 4)
+#define  CPUCFG1_VABITS                        GENMASK(19, 12)
+#define  CPUCFG1_UAL                   BIT(20)
+#define  CPUCFG1_RI                    BIT(21)
+#define  CPUCFG1_EP                    BIT(22)
+#define  CPUCFG1_RPLV                  BIT(23)
+#define  CPUCFG1_HUGEPG                        BIT(24)
+#define  CPUCFG1_CRC32                 BIT(25)
+#define  CPUCFG1_MSGINT                        BIT(26)
+
+#define LOONGARCH_CPUCFG2              0x2
+#define  CPUCFG2_FP                    BIT(0)
+#define  CPUCFG2_FPSP                  BIT(1)
+#define  CPUCFG2_FPDP                  BIT(2)
+#define  CPUCFG2_FPVERS                        GENMASK(5, 3)
+#define  CPUCFG2_LSX                   BIT(6)
+#define  CPUCFG2_LASX                  BIT(7)
+#define  CPUCFG2_COMPLEX               BIT(8)
+#define  CPUCFG2_CRYPTO                        BIT(9)
+#define  CPUCFG2_LVZP                  BIT(10)
+#define  CPUCFG2_LVZVER                        GENMASK(13, 11)
+#define  CPUCFG2_LLFTP                 BIT(14)
+#define  CPUCFG2_LLFTPREV              GENMASK(17, 15)
+#define  CPUCFG2_X86BT                 BIT(18)
+#define  CPUCFG2_ARMBT                 BIT(19)
+#define  CPUCFG2_MIPSBT                        BIT(20)
+#define  CPUCFG2_LSPW                  BIT(21)
+#define  CPUCFG2_LAM                   BIT(22)
+#define  CPUCFG2_PTW                   BIT(24)
+
+#define LOONGARCH_CPUCFG3              0x3
+#define  CPUCFG3_CCDMA                 BIT(0)
+#define  CPUCFG3_SFB                   BIT(1)
+#define  CPUCFG3_UCACC                 BIT(2)
+#define  CPUCFG3_LLEXC                 BIT(3)
+#define  CPUCFG3_SCDLY                 BIT(4)
+#define  CPUCFG3_LLDBAR                        BIT(5)
+#define  CPUCFG3_ITLBT                 BIT(6)
+#define  CPUCFG3_ICACHET               BIT(7)
+#define  CPUCFG3_SPW_LVL               GENMASK(10, 8)
+#define  CPUCFG3_SPW_HG_HF             BIT(11)
+#define  CPUCFG3_RVA                   BIT(12)
+#define  CPUCFG3_RVAMAX                        GENMASK(16, 13)
+
+#define LOONGARCH_CPUCFG4              0x4
+#define  CPUCFG4_CCFREQ                        GENMASK(31, 0)
+
+#define LOONGARCH_CPUCFG5              0x5
+#define  CPUCFG5_CCMUL                 GENMASK(15, 0)
+#define  CPUCFG5_CCDIV                 GENMASK(31, 16)
+
+#define LOONGARCH_CPUCFG6              0x6
+#define  CPUCFG6_PMP                   BIT(0)
+#define  CPUCFG6_PAMVER                        GENMASK(3, 1)
+#define  CPUCFG6_PMNUM                 GENMASK(7, 4)
+#define  CPUCFG6_PMBITS                        GENMASK(13, 8)
+#define  CPUCFG6_UPM                   BIT(14)
+
+#define LOONGARCH_CPUCFG16             0x10
+#define  CPUCFG16_L1_IUPRE             BIT(0)
+#define  CPUCFG16_L1_IUUNIFY           BIT(1)
+#define  CPUCFG16_L1_DPRE              BIT(2)
+#define  CPUCFG16_L2_IUPRE             BIT(3)
+#define  CPUCFG16_L2_IUUNIFY           BIT(4)
+#define  CPUCFG16_L2_IUPRIV            BIT(5)
+#define  CPUCFG16_L2_IUINCL            BIT(6)
+#define  CPUCFG16_L2_DPRE              BIT(7)
+#define  CPUCFG16_L2_DPRIV             BIT(8)
+#define  CPUCFG16_L2_DINCL             BIT(9)
+#define  CPUCFG16_L3_IUPRE             BIT(10)
+#define  CPUCFG16_L3_IUUNIFY           BIT(11)
+#define  CPUCFG16_L3_IUPRIV            BIT(12)
+#define  CPUCFG16_L3_IUINCL            BIT(13)
+#define  CPUCFG16_L3_DPRE              BIT(14)
+#define  CPUCFG16_L3_DPRIV             BIT(15)
+#define  CPUCFG16_L3_DINCL             BIT(16)
+
+#define LOONGARCH_CPUCFG17             0x11
+#define LOONGARCH_CPUCFG18             0x12
+#define LOONGARCH_CPUCFG19             0x13
+#define LOONGARCH_CPUCFG20             0x14
+#define  CPUCFG_CACHE_WAYS_M           GENMASK(15, 0)
+#define  CPUCFG_CACHE_SETS_M           GENMASK(23, 16)
+#define  CPUCFG_CACHE_LSIZE_M          GENMASK(30, 24)
+#define  CPUCFG_CACHE_WAYS             0
+#define  CPUCFG_CACHE_SETS             16
+#define  CPUCFG_CACHE_LSIZE            24
+
+#define LOONGARCH_CPUCFG48             0x30
+#define  CPUCFG48_MCSR_LCK             BIT(0)
+#define  CPUCFG48_NAP_EN               BIT(1)
+#define  CPUCFG48_VFPU_CG              BIT(2)
+#define  CPUCFG48_RAM_CG               BIT(3)
+
+/*
+ * CPUCFG index area: 0x40000000 -- 0x400000ff
+ * SW emulation for KVM hypervirsor
+ */
+#define CPUCFG_KVM_BASE                        0x40000000
+#define CPUCFG_KVM_SIZE                        0x100
+
+#define CPUCFG_KVM_SIG                 (CPUCFG_KVM_BASE + 0)
+#define  KVM_SIGNATURE                 "KVM\0"
+#define CPUCFG_KVM_FEATURE             (CPUCFG_KVM_BASE + 4)
+#define  KVM_FEATURE_IPI               BIT(1)
+
+#ifndef __ASSEMBLY__
+
+/* CSR */
+#define csr_read32(reg) __csrrd_w(reg)
+#define csr_read64(reg) __csrrd_d(reg)
+#define csr_write32(val, reg) __csrwr_w(val, reg)
+#define csr_write64(val, reg) __csrwr_d(val, reg)
+#define csr_xchg32(val, mask, reg) __csrxchg_w(val, mask, reg)
+#define csr_xchg64(val, mask, reg) __csrxchg_d(val, mask, reg)
+
+/* IOCSR */
+#define iocsr_read32(reg) __iocsrrd_w(reg)
+#define iocsr_read64(reg) __iocsrrd_d(reg)
+#define iocsr_write32(val, reg) __iocsrwr_w(val, reg)
+#define iocsr_write64(val, reg) __iocsrwr_d(val, reg)
+
+#endif /* !__ASSEMBLY__ */
+
+/* CSR register number */
+
+/* Basic CSR registers */
+#define LOONGARCH_CSR_CRMD             0x0     /* Current mode info */
+#define  CSR_CRMD_WE_SHIFT             9
+#define  CSR_CRMD_WE                   (_ULCAST_(0x1) << CSR_CRMD_WE_SHIFT)
+#define  CSR_CRMD_DACM_SHIFT           7
+#define  CSR_CRMD_DACM_WIDTH           2
+#define  CSR_CRMD_DACM                 (_ULCAST_(0x3) << CSR_CRMD_DACM_SHIFT)
+#define  CSR_CRMD_DACF_SHIFT           5
+#define  CSR_CRMD_DACF_WIDTH           2
+#define  CSR_CRMD_DACF                 (_ULCAST_(0x3) << CSR_CRMD_DACF_SHIFT)
+#define  CSR_CRMD_PG_SHIFT             4
+#define  CSR_CRMD_PG                   (_ULCAST_(0x1) << CSR_CRMD_PG_SHIFT)
+#define  CSR_CRMD_DA_SHIFT             3
+#define  CSR_CRMD_DA                   (_ULCAST_(0x1) << CSR_CRMD_DA_SHIFT)
+#define  CSR_CRMD_IE_SHIFT             2
+#define  CSR_CRMD_IE                   (_ULCAST_(0x1) << CSR_CRMD_IE_SHIFT)
+#define  CSR_CRMD_PLV_SHIFT            0
+#define  CSR_CRMD_PLV_WIDTH            2
+#define  CSR_CRMD_PLV                  (_ULCAST_(0x3) << CSR_CRMD_PLV_SHIFT)
+
+#define PLV_KERN                       0
+#define PLV_USER                       3
+#define PLV_MASK                       0x3
+
+#define LOONGARCH_CSR_PRMD             0x1     /* Prev-exception mode info */
+#define  CSR_PRMD_PWE_SHIFT            3
+#define  CSR_PRMD_PWE                  (_ULCAST_(0x1) << CSR_PRMD_PWE_SHIFT)
+#define  CSR_PRMD_PIE_SHIFT            2
+#define  CSR_PRMD_PIE                  (_ULCAST_(0x1) << CSR_PRMD_PIE_SHIFT)
+#define  CSR_PRMD_PPLV_SHIFT           0
+#define  CSR_PRMD_PPLV_WIDTH           2
+#define  CSR_PRMD_PPLV                 (_ULCAST_(0x3) << CSR_PRMD_PPLV_SHIFT)
+
+#define LOONGARCH_CSR_EUEN             0x2     /* Extended unit enable */
+#define  CSR_EUEN_LBTEN_SHIFT          3
+#define  CSR_EUEN_LBTEN                        (_ULCAST_(0x1) << 
CSR_EUEN_LBTEN_SHIFT)
+#define  CSR_EUEN_LASXEN_SHIFT         2
+#define  CSR_EUEN_LASXEN               (_ULCAST_(0x1) << CSR_EUEN_LASXEN_SHIFT)
+#define  CSR_EUEN_LSXEN_SHIFT          1
+#define  CSR_EUEN_LSXEN                        (_ULCAST_(0x1) << 
CSR_EUEN_LSXEN_SHIFT)
+#define  CSR_EUEN_FPEN_SHIFT           0
+#define  CSR_EUEN_FPEN                 (_ULCAST_(0x1) << CSR_EUEN_FPEN_SHIFT)
+
+#define LOONGARCH_CSR_MISC             0x3     /* Misc config */
+
+#define LOONGARCH_CSR_ECFG             0x4     /* Exception config */
+#define  CSR_ECFG_VS_SHIFT             16
+#define  CSR_ECFG_VS_WIDTH             3
+#define  CSR_ECFG_VS_SHIFT_END         (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH 
- 1)
+#define  CSR_ECFG_VS                   (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
+#define  CSR_ECFG_IM_SHIFT             0
+#define  CSR_ECFG_IM_WIDTH             14
+#define  CSR_ECFG_IM                   (_ULCAST_(0x3fff) << CSR_ECFG_IM_SHIFT)
+
+#define LOONGARCH_CSR_ESTAT            0x5     /* Exception status */
+#define  CSR_ESTAT_ESUBCODE_SHIFT      22
+#define  CSR_ESTAT_ESUBCODE_WIDTH      9
+#define  CSR_ESTAT_ESUBCODE            (_ULCAST_(0x1ff) << 
CSR_ESTAT_ESUBCODE_SHIFT)
+#define  CSR_ESTAT_EXC_SHIFT           16
+#define  CSR_ESTAT_EXC_WIDTH           6
+#define  CSR_ESTAT_EXC                 (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
+#define  CSR_ESTAT_IS_SHIFT            0
+#define  CSR_ESTAT_IS_WIDTH            14
+#define  CSR_ESTAT_IS                  (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT)
+
+#define LOONGARCH_CSR_ERA              0x6     /* ERA */
+
+#define LOONGARCH_CSR_BADV             0x7     /* Bad virtual address */
+
+#define LOONGARCH_CSR_BADI             0x8     /* Bad instruction */
+
+#define LOONGARCH_CSR_EENTRY           0xc     /* Exception entry */
+
+/* TLB related CSR registers */
+#define LOONGARCH_CSR_TLBIDX           0x10    /* TLB Index, EHINV, PageSize, 
NP */
+#define  CSR_TLBIDX_EHINV_SHIFT                31
+#define  CSR_TLBIDX_EHINV              (_ULCAST_(1) << CSR_TLBIDX_EHINV_SHIFT)
+#define  CSR_TLBIDX_PS_SHIFT           24
+#define  CSR_TLBIDX_PS_WIDTH           6
+#define  CSR_TLBIDX_PS                 (_ULCAST_(0x3f) << CSR_TLBIDX_PS_SHIFT)
+#define  CSR_TLBIDX_IDX_SHIFT          0
+#define  CSR_TLBIDX_IDX_WIDTH          12
+#define  CSR_TLBIDX_IDX                        (_ULCAST_(0xfff) << 
CSR_TLBIDX_IDX_SHIFT)
+#define  CSR_TLBIDX_SIZEM              0x3f000000
+#define  CSR_TLBIDX_SIZE               CSR_TLBIDX_PS_SHIFT
+#define  CSR_TLBIDX_IDXM               0xfff
+#define  CSR_INVALID_ENTRY(e)          (CSR_TLBIDX_EHINV | e)
+
+#define LOONGARCH_CSR_TLBEHI           0x11    /* TLB EntryHi */
+
+#define LOONGARCH_CSR_TLBELO0          0x12    /* TLB EntryLo0 */
+#define  CSR_TLBLO0_RPLV_SHIFT         63
+#define  CSR_TLBLO0_RPLV               (_ULCAST_(0x1) << CSR_TLBLO0_RPLV_SHIFT)
+#define  CSR_TLBLO0_NX_SHIFT           62
+#define  CSR_TLBLO0_NX                 (_ULCAST_(0x1) << CSR_TLBLO0_NX_SHIFT)
+#define  CSR_TLBLO0_NR_SHIFT           61
+#define  CSR_TLBLO0_NR                 (_ULCAST_(0x1) << CSR_TLBLO0_NR_SHIFT)
+#define  CSR_TLBLO0_PFN_SHIFT          12
+#define  CSR_TLBLO0_PFN_WIDTH          36
+#define  CSR_TLBLO0_PFN                        (_ULCAST_(0xfffffffff) << 
CSR_TLBLO0_PFN_SHIFT)
+#define  CSR_TLBLO0_GLOBAL_SHIFT       6
+#define  CSR_TLBLO0_GLOBAL             (_ULCAST_(0x1) << 
CSR_TLBLO0_GLOBAL_SHIFT)
+#define  CSR_TLBLO0_CCA_SHIFT          4
+#define  CSR_TLBLO0_CCA_WIDTH          2
+#define  CSR_TLBLO0_CCA                        (_ULCAST_(0x3) << 
CSR_TLBLO0_CCA_SHIFT)
+#define  CSR_TLBLO0_PLV_SHIFT          2
+#define  CSR_TLBLO0_PLV_WIDTH          2
+#define  CSR_TLBLO0_PLV                        (_ULCAST_(0x3) << 
CSR_TLBLO0_PLV_SHIFT)
+#define  CSR_TLBLO0_WE_SHIFT           1
+#define  CSR_TLBLO0_WE                 (_ULCAST_(0x1) << CSR_TLBLO0_WE_SHIFT)
+#define  CSR_TLBLO0_V_SHIFT            0
+#define  CSR_TLBLO0_V                  (_ULCAST_(0x1) << CSR_TLBLO0_V_SHIFT)
+
+#define LOONGARCH_CSR_TLBELO1          0x13    /* TLB EntryLo1 */
+#define  CSR_TLBLO1_RPLV_SHIFT         63
+#define  CSR_TLBLO1_RPLV               (_ULCAST_(0x1) << CSR_TLBLO1_RPLV_SHIFT)
+#define  CSR_TLBLO1_NX_SHIFT           62
+#define  CSR_TLBLO1_NX                 (_ULCAST_(0x1) << CSR_TLBLO1_NX_SHIFT)
+#define  CSR_TLBLO1_NR_SHIFT           61
+#define  CSR_TLBLO1_NR                 (_ULCAST_(0x1) << CSR_TLBLO1_NR_SHIFT)
+#define  CSR_TLBLO1_PFN_SHIFT          12
+#define  CSR_TLBLO1_PFN_WIDTH          36
+#define  CSR_TLBLO1_PFN                        (_ULCAST_(0xfffffffff) << 
CSR_TLBLO1_PFN_SHIFT)
+#define  CSR_TLBLO1_GLOBAL_SHIFT       6
+#define  CSR_TLBLO1_GLOBAL             (_ULCAST_(0x1) << 
CSR_TLBLO1_GLOBAL_SHIFT)
+#define  CSR_TLBLO1_CCA_SHIFT          4
+#define  CSR_TLBLO1_CCA_WIDTH          2
+#define  CSR_TLBLO1_CCA                        (_ULCAST_(0x3) << 
CSR_TLBLO1_CCA_SHIFT)
+#define  CSR_TLBLO1_PLV_SHIFT          2
+#define  CSR_TLBLO1_PLV_WIDTH          2
+#define  CSR_TLBLO1_PLV                        (_ULCAST_(0x3) << 
CSR_TLBLO1_PLV_SHIFT)
+#define  CSR_TLBLO1_WE_SHIFT           1
+#define  CSR_TLBLO1_WE                 (_ULCAST_(0x1) << CSR_TLBLO1_WE_SHIFT)
+#define  CSR_TLBLO1_V_SHIFT            0
+#define  CSR_TLBLO1_V                  (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
+
+#define LOONGARCH_CSR_GTLBC            0x15    /* Guest TLB control */
+#define  CSR_GTLBC_TGID_SHIFT          16
+#define  CSR_GTLBC_TGID_WIDTH          8
+#define  CSR_GTLBC_TGID_SHIFT_END      (CSR_GTLBC_TGID_SHIFT + 
CSR_GTLBC_TGID_WIDTH - 1)
+#define  CSR_GTLBC_TGID                        (_ULCAST_(0xff) << 
CSR_GTLBC_TGID_SHIFT)
+#define  CSR_GTLBC_TOTI_SHIFT          13
+#define  CSR_GTLBC_TOTI                        (_ULCAST_(0x1) << 
CSR_GTLBC_TOTI_SHIFT)
+#define  CSR_GTLBC_USETGID_SHIFT       12
+#define  CSR_GTLBC_USETGID             (_ULCAST_(0x1) << 
CSR_GTLBC_USETGID_SHIFT)
+#define  CSR_GTLBC_GMTLBSZ_SHIFT       0
+#define  CSR_GTLBC_GMTLBSZ_WIDTH       6
+#define  CSR_GTLBC_GMTLBSZ             (_ULCAST_(0x3f) << 
CSR_GTLBC_GMTLBSZ_SHIFT)
+
+#define LOONGARCH_CSR_TRGP             0x16    /* TLBR read guest info */
+#define  CSR_TRGP_RID_SHIFT            16
+#define  CSR_TRGP_RID_WIDTH            8
+#define  CSR_TRGP_RID                  (_ULCAST_(0xff) << CSR_TRGP_RID_SHIFT)
+#define  CSR_TRGP_GTLB_SHIFT           0
+#define  CSR_TRGP_GTLB                 (1 << CSR_TRGP_GTLB_SHIFT)
+
+#define LOONGARCH_CSR_ASID             0x18    /* ASID */
+#define  CSR_ASID_BIT_SHIFT            16      /* ASIDBits */
+#define  CSR_ASID_BIT_WIDTH            8
+#define  CSR_ASID_BIT                  (_ULCAST_(0xff) << CSR_ASID_BIT_SHIFT)
+#define  CSR_ASID_ASID_SHIFT           0
+#define  CSR_ASID_ASID_WIDTH           10
+#define  CSR_ASID_ASID                 (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT)
+
+#define LOONGARCH_CSR_PGDL             0x19    /* Page table base address when 
VA[VALEN-1] = 0 */
+
+#define LOONGARCH_CSR_PGDH             0x1a    /* Page table base address when 
VA[VALEN-1] = 1 */
+
+#define LOONGARCH_CSR_PGD              0x1b    /* Page table base */
+
+#define LOONGARCH_CSR_PWCTL0           0x1c    /* PWCtl0 */
+#define  CSR_PWCTL0_PTEW_SHIFT         30
+#define  CSR_PWCTL0_PTEW_WIDTH         2
+#define  CSR_PWCTL0_PTEW               (_ULCAST_(0x3) << CSR_PWCTL0_PTEW_SHIFT)
+#define  CSR_PWCTL0_DIR1WIDTH_SHIFT    25
+#define  CSR_PWCTL0_DIR1WIDTH_WIDTH    5
+#define  CSR_PWCTL0_DIR1WIDTH          (_ULCAST_(0x1f) << 
CSR_PWCTL0_DIR1WIDTH_SHIFT)
+#define  CSR_PWCTL0_DIR1BASE_SHIFT     20
+#define  CSR_PWCTL0_DIR1BASE_WIDTH     5
+#define  CSR_PWCTL0_DIR1BASE           (_ULCAST_(0x1f) << 
CSR_PWCTL0_DIR1BASE_SHIFT)
+#define  CSR_PWCTL0_DIR0WIDTH_SHIFT    15
+#define  CSR_PWCTL0_DIR0WIDTH_WIDTH    5
+#define  CSR_PWCTL0_DIR0WIDTH          (_ULCAST_(0x1f) << 
CSR_PWCTL0_DIR0WIDTH_SHIFT)
+#define  CSR_PWCTL0_DIR0BASE_SHIFT     10
+#define  CSR_PWCTL0_DIR0BASE_WIDTH     5
+#define  CSR_PWCTL0_DIR0BASE           (_ULCAST_(0x1f) << 
CSR_PWCTL0_DIR0BASE_SHIFT)
+#define  CSR_PWCTL0_PTWIDTH_SHIFT      5
+#define  CSR_PWCTL0_PTWIDTH_WIDTH      5
+#define  CSR_PWCTL0_PTWIDTH            (_ULCAST_(0x1f) << 
CSR_PWCTL0_PTWIDTH_SHIFT)
+#define  CSR_PWCTL0_PTBASE_SHIFT       0
+#define  CSR_PWCTL0_PTBASE_WIDTH       5
+#define  CSR_PWCTL0_PTBASE             (_ULCAST_(0x1f) << 
CSR_PWCTL0_PTBASE_SHIFT)
+
+#define LOONGARCH_CSR_PWCTL1           0x1d    /* PWCtl1 */
+#define  CSR_PWCTL1_PTW_SHIFT          24
+#define  CSR_PWCTL1_PTW_WIDTH          1
+#define  CSR_PWCTL1_PTW                        (_ULCAST_(0x1) << 
CSR_PWCTL1_PTW_SHIFT)
+#define  CSR_PWCTL1_DIR3WIDTH_SHIFT    18
+#define  CSR_PWCTL1_DIR3WIDTH_WIDTH    5
+#define  CSR_PWCTL1_DIR3WIDTH          (_ULCAST_(0x1f) << 
CSR_PWCTL1_DIR3WIDTH_SHIFT)
+#define  CSR_PWCTL1_DIR3BASE_SHIFT     12
+#define  CSR_PWCTL1_DIR3BASE_WIDTH     5
+#define  CSR_PWCTL1_DIR3BASE           (_ULCAST_(0x1f) << 
CSR_PWCTL0_DIR3BASE_SHIFT)
+#define  CSR_PWCTL1_DIR2WIDTH_SHIFT    6
+#define  CSR_PWCTL1_DIR2WIDTH_WIDTH    5
+#define  CSR_PWCTL1_DIR2WIDTH          (_ULCAST_(0x1f) << 
CSR_PWCTL1_DIR2WIDTH_SHIFT)
+#define  CSR_PWCTL1_DIR2BASE_SHIFT     0
+#define  CSR_PWCTL1_DIR2BASE_WIDTH     5
+#define  CSR_PWCTL1_DIR2BASE           (_ULCAST_(0x1f) << 
CSR_PWCTL0_DIR2BASE_SHIFT)
+
+#define LOONGARCH_CSR_STLBPGSIZE       0x1e
+#define  CSR_STLBPGSIZE_PS_WIDTH       6
+#define  CSR_STLBPGSIZE_PS             (_ULCAST_(0x3f))
+
+#define LOONGARCH_CSR_RVACFG           0x1f
+#define  CSR_RVACFG_RDVA_WIDTH         4
+#define  CSR_RVACFG_RDVA               (_ULCAST_(0xf))
+
+/* Config CSR registers */
+#define LOONGARCH_CSR_CPUID            0x20    /* CPU core id */
+#define  CSR_CPUID_COREID_WIDTH                9
+#define  CSR_CPUID_COREID              _ULCAST_(0x1ff)
+
+#define LOONGARCH_CSR_PRCFG1           0x21    /* Config1 */
+#define  CSR_CONF1_VSMAX_SHIFT         12
+#define  CSR_CONF1_VSMAX_WIDTH         3
+#define  CSR_CONF1_VSMAX               (_ULCAST_(7) << CSR_CONF1_VSMAX_SHIFT)
+#define  CSR_CONF1_TMRBITS_SHIFT       4
+#define  CSR_CONF1_TMRBITS_WIDTH       8
+#define  CSR_CONF1_TMRBITS             (_ULCAST_(0xff) << 
CSR_CONF1_TMRBITS_SHIFT)
+#define  CSR_CONF1_KSNUM_WIDTH         4
+#define  CSR_CONF1_KSNUM               _ULCAST_(0xf)
+
+#define LOONGARCH_CSR_PRCFG2           0x22    /* Config2 */
+#define  CSR_CONF2_PGMASK_SUPP         0x3ffff000
+
+#define LOONGARCH_CSR_PRCFG3           0x23    /* Config3 */
+#define  CSR_CONF3_STLBIDX_SHIFT       20
+#define  CSR_CONF3_STLBIDX_WIDTH       6
+#define  CSR_CONF3_STLBIDX             (_ULCAST_(0x3f) << 
CSR_CONF3_STLBIDX_SHIFT)
+#define  CSR_CONF3_STLBWAYS_SHIFT      12
+#define  CSR_CONF3_STLBWAYS_WIDTH      8
+#define  CSR_CONF3_STLBWAYS            (_ULCAST_(0xff) << 
CSR_CONF3_STLBWAYS_SHIFT)
+#define  CSR_CONF3_MTLBSIZE_SHIFT      4
+#define  CSR_CONF3_MTLBSIZE_WIDTH      8
+#define  CSR_CONF3_MTLBSIZE            (_ULCAST_(0xff) << 
CSR_CONF3_MTLBSIZE_SHIFT)
+#define  CSR_CONF3_TLBTYPE_SHIFT       0
+#define  CSR_CONF3_TLBTYPE_WIDTH       4
+#define  CSR_CONF3_TLBTYPE             (_ULCAST_(0xf) << 
CSR_CONF3_TLBTYPE_SHIFT)
+
+/* KSave registers */
+#define LOONGARCH_CSR_KS0              0x30
+#define LOONGARCH_CSR_KS1              0x31
+#define LOONGARCH_CSR_KS2              0x32
+#define LOONGARCH_CSR_KS3              0x33
+#define LOONGARCH_CSR_KS4              0x34
+#define LOONGARCH_CSR_KS5              0x35
+#define LOONGARCH_CSR_KS6              0x36
+#define LOONGARCH_CSR_KS7              0x37
+#define LOONGARCH_CSR_KS8              0x38
+
+/* Exception allocated KS0, KS1 and KS2 statically */
+#define EXCEPTION_KS0                  LOONGARCH_CSR_KS0
+#define EXCEPTION_KS1                  LOONGARCH_CSR_KS1
+#define EXCEPTION_KS2                  LOONGARCH_CSR_KS2
+#define EXC_KSAVE_MASK                 (1 << 0 | 1 << 1 | 1 << 2)
+
+/* Percpu-data base allocated KS3 statically */
+#define PERCPU_BASE_KS                 LOONGARCH_CSR_KS3
+#define PERCPU_KSAVE_MASK              (1 << 3)
+
+/* KVM allocated KS4 and KS5 statically */
+#define KVM_VCPU_KS                    LOONGARCH_CSR_KS4
+#define KVM_TEMP_KS                    LOONGARCH_CSR_KS5
+#define KVM_KSAVE_MASK                 (1 << 4 | 1 << 5)
+
+/* Timer registers */
+#define LOONGARCH_CSR_TMID             0x40    /* Timer ID */
+
+#define LOONGARCH_CSR_TCFG             0x41    /* Timer config */
+#define  CSR_TCFG_VAL_SHIFT            2
+#define         CSR_TCFG_VAL_WIDTH             48
+#define  CSR_TCFG_VAL                  (_ULCAST_(0x3fffffffffff) << 
CSR_TCFG_VAL_SHIFT)
+#define  CSR_TCFG_PERIOD_SHIFT         1
+#define  CSR_TCFG_PERIOD               (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT)
+#define  CSR_TCFG_EN                   (_ULCAST_(0x1))
+
+#define LOONGARCH_CSR_TVAL             0x42    /* Timer value */
+
+#define LOONGARCH_CSR_CNTC             0x43    /* Timer offset */
+
+#define LOONGARCH_CSR_TINTCLR          0x44    /* Timer interrupt clear */
+#define  CSR_TINTCLR_TI_SHIFT          0
+#define  CSR_TINTCLR_TI                        (1 << CSR_TINTCLR_TI_SHIFT)
+
+/* Guest registers */
+#define LOONGARCH_CSR_GSTAT            0x50    /* Guest status */
+#define  CSR_GSTAT_GID_SHIFT           16
+#define  CSR_GSTAT_GID_WIDTH           8
+#define  CSR_GSTAT_GID_SHIFT_END       (CSR_GSTAT_GID_SHIFT + 
CSR_GSTAT_GID_WIDTH - 1)
+#define  CSR_GSTAT_GID                 (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
+#define  CSR_GSTAT_GIDBIT_SHIFT                4
+#define  CSR_GSTAT_GIDBIT_WIDTH                6
+#define  CSR_GSTAT_GIDBIT              (_ULCAST_(0x3f) << 
CSR_GSTAT_GIDBIT_SHIFT)
+#define  CSR_GSTAT_PVM_SHIFT           1
+#define  CSR_GSTAT_PVM                 (_ULCAST_(0x1) << CSR_GSTAT_PVM_SHIFT)
+#define  CSR_GSTAT_VM_SHIFT            0
+#define  CSR_GSTAT_VM                  (_ULCAST_(0x1) << CSR_GSTAT_VM_SHIFT)
+
+#define LOONGARCH_CSR_GCFG             0x51    /* Guest config */
+#define  CSR_GCFG_GPERF_SHIFT          24
+#define  CSR_GCFG_GPERF_WIDTH          3
+#define  CSR_GCFG_GPERF                        (_ULCAST_(0x7) << 
CSR_GCFG_GPERF_SHIFT)
+#define  CSR_GCFG_GCI_SHIFT            20
+#define  CSR_GCFG_GCI_WIDTH            2
+#define  CSR_GCFG_GCI                  (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT)
+#define  CSR_GCFG_GCI_ALL              (_ULCAST_(0x0) << CSR_GCFG_GCI_SHIFT)
+#define  CSR_GCFG_GCI_HIT              (_ULCAST_(0x1) << CSR_GCFG_GCI_SHIFT)
+#define  CSR_GCFG_GCI_SECURE           (_ULCAST_(0x2) << CSR_GCFG_GCI_SHIFT)
+#define  CSR_GCFG_GCIP_SHIFT           16
+#define  CSR_GCFG_GCIP                 (_ULCAST_(0xf) << CSR_GCFG_GCIP_SHIFT)
+#define  CSR_GCFG_GCIP_ALL             (_ULCAST_(0x1) << CSR_GCFG_GCIP_SHIFT)
+#define  CSR_GCFG_GCIP_HIT             (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT 
+ 1))
+#define  CSR_GCFG_GCIP_SECURE          (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT 
+ 2))
+#define  CSR_GCFG_TORU_SHIFT           15
+#define  CSR_GCFG_TORU                 (_ULCAST_(0x1) << CSR_GCFG_TORU_SHIFT)
+#define  CSR_GCFG_TORUP_SHIFT          14
+#define  CSR_GCFG_TORUP                        (_ULCAST_(0x1) << 
CSR_GCFG_TORUP_SHIFT)
+#define  CSR_GCFG_TOP_SHIFT            13
+#define  CSR_GCFG_TOP                  (_ULCAST_(0x1) << CSR_GCFG_TOP_SHIFT)
+#define  CSR_GCFG_TOPP_SHIFT           12
+#define  CSR_GCFG_TOPP                 (_ULCAST_(0x1) << CSR_GCFG_TOPP_SHIFT)
+#define  CSR_GCFG_TOE_SHIFT            11
+#define  CSR_GCFG_TOE                  (_ULCAST_(0x1) << CSR_GCFG_TOE_SHIFT)
+#define  CSR_GCFG_TOEP_SHIFT           10
+#define  CSR_GCFG_TOEP                 (_ULCAST_(0x1) << CSR_GCFG_TOEP_SHIFT)
+#define  CSR_GCFG_TIT_SHIFT            9
+#define  CSR_GCFG_TIT                  (_ULCAST_(0x1) << CSR_GCFG_TIT_SHIFT)
+#define  CSR_GCFG_TITP_SHIFT           8
+#define  CSR_GCFG_TITP                 (_ULCAST_(0x1) << CSR_GCFG_TITP_SHIFT)
+#define  CSR_GCFG_SIT_SHIFT            7
+#define  CSR_GCFG_SIT                  (_ULCAST_(0x1) << CSR_GCFG_SIT_SHIFT)
+#define  CSR_GCFG_SITP_SHIFT           6
+#define  CSR_GCFG_SITP                 (_ULCAST_(0x1) << CSR_GCFG_SITP_SHIFT)
+#define  CSR_GCFG_MATC_SHITF           4
+#define  CSR_GCFG_MATC_WIDTH           2
+#define  CSR_GCFG_MATC_MASK            (_ULCAST_(0x3) << CSR_GCFG_MATC_SHITF)
+#define  CSR_GCFG_MATC_GUEST           (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
+#define  CSR_GCFG_MATC_ROOT            (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
+#define  CSR_GCFG_MATC_NEST            (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
+#define  CSR_GCFG_MATP_NEST_SHIFT      2
+#define  CSR_GCFG_MATP_NEST            (_ULCAST_(0x1) << 
CSR_GCFG_MATP_NEST_SHIFT)
+#define  CSR_GCFG_MATP_ROOT_SHIFT      1
+#define  CSR_GCFG_MATP_ROOT            (_ULCAST_(0x1) << 
CSR_GCFG_MATP_ROOT_SHIFT)
+#define  CSR_GCFG_MATP_GUEST_SHIFT     0
+#define  CSR_GCFG_MATP_GUEST           (_ULCAST_(0x1) << 
CSR_GCFG_MATP_GUEST_SHIFT)
+
+#define LOONGARCH_CSR_GINTC            0x52    /* Guest interrupt control */
+#define  CSR_GINTC_HC_SHIFT            16
+#define  CSR_GINTC_HC_WIDTH            8
+#define  CSR_GINTC_HC                  (_ULCAST_(0xff) << CSR_GINTC_HC_SHIFT)
+#define  CSR_GINTC_PIP_SHIFT           8
+#define  CSR_GINTC_PIP_WIDTH           8
+#define  CSR_GINTC_PIP                 (_ULCAST_(0xff) << CSR_GINTC_PIP_SHIFT)
+#define  CSR_GINTC_VIP_SHIFT           0
+#define  CSR_GINTC_VIP_WIDTH           8
+#define  CSR_GINTC_VIP                 (_ULCAST_(0xff))
+
+#define LOONGARCH_CSR_GCNTC            0x53    /* Guest timer offset */
+
+/* LLBCTL register */
+#define LOONGARCH_CSR_LLBCTL           0x60    /* LLBit control */
+#define  CSR_LLBCTL_ROLLB_SHIFT                0
+#define  CSR_LLBCTL_ROLLB              (_ULCAST_(1) << CSR_LLBCTL_ROLLB_SHIFT)
+#define  CSR_LLBCTL_WCLLB_SHIFT                1
+#define  CSR_LLBCTL_WCLLB              (_ULCAST_(1) << CSR_LLBCTL_WCLLB_SHIFT)
+#define  CSR_LLBCTL_KLO_SHIFT          2
+#define  CSR_LLBCTL_KLO                        (_ULCAST_(1) << 
CSR_LLBCTL_KLO_SHIFT)
+
+/* Implement dependent */
+#define LOONGARCH_CSR_IMPCTL1          0x80    /* Loongson config1 */
+#define  CSR_MISPEC_SHIFT              20
+#define  CSR_MISPEC_WIDTH              8
+#define  CSR_MISPEC                    (_ULCAST_(0xff) << CSR_MISPEC_SHIFT)
+#define  CSR_SSEN_SHIFT                        18
+#define  CSR_SSEN                      (_ULCAST_(1) << CSR_SSEN_SHIFT)
+#define  CSR_SCRAND_SHIFT              17
+#define  CSR_SCRAND                    (_ULCAST_(1) << CSR_SCRAND_SHIFT)
+#define  CSR_LLEXCL_SHIFT              16
+#define  CSR_LLEXCL                    (_ULCAST_(1) << CSR_LLEXCL_SHIFT)
+#define  CSR_DISVC_SHIFT               15
+#define  CSR_DISVC                     (_ULCAST_(1) << CSR_DISVC_SHIFT)
+#define  CSR_VCLRU_SHIFT               14
+#define  CSR_VCLRU                     (_ULCAST_(1) << CSR_VCLRU_SHIFT)
+#define  CSR_DCLRU_SHIFT               13
+#define  CSR_DCLRU                     (_ULCAST_(1) << CSR_DCLRU_SHIFT)
+#define  CSR_FASTLDQ_SHIFT             12
+#define  CSR_FASTLDQ                   (_ULCAST_(1) << CSR_FASTLDQ_SHIFT)
+#define  CSR_USERCAC_SHIFT             11
+#define  CSR_USERCAC                   (_ULCAST_(1) << CSR_USERCAC_SHIFT)
+#define  CSR_ANTI_MISPEC_SHIFT         10
+#define  CSR_ANTI_MISPEC               (_ULCAST_(1) << CSR_ANTI_MISPEC_SHIFT)
+#define  CSR_AUTO_FLUSHSFB_SHIFT       9
+#define  CSR_AUTO_FLUSHSFB             (_ULCAST_(1) << CSR_AUTO_FLUSHSFB_SHIFT)
+#define  CSR_STFILL_SHIFT              8
+#define  CSR_STFILL                    (_ULCAST_(1) << CSR_STFILL_SHIFT)
+#define  CSR_LIFEP_SHIFT               7
+#define  CSR_LIFEP                     (_ULCAST_(1) << CSR_LIFEP_SHIFT)
+#define  CSR_LLSYNC_SHIFT              6
+#define  CSR_LLSYNC                    (_ULCAST_(1) << CSR_LLSYNC_SHIFT)
+#define  CSR_BRBTDIS_SHIFT             5
+#define  CSR_BRBTDIS                   (_ULCAST_(1) << CSR_BRBTDIS_SHIFT)
+#define  CSR_RASDIS_SHIFT              4
+#define  CSR_RASDIS                    (_ULCAST_(1) << CSR_RASDIS_SHIFT)
+#define  CSR_STPRE_SHIFT               2
+#define  CSR_STPRE_WIDTH               2
+#define  CSR_STPRE                     (_ULCAST_(3) << CSR_STPRE_SHIFT)
+#define  CSR_INSTPRE_SHIFT             1
+#define  CSR_INSTPRE                   (_ULCAST_(1) << CSR_INSTPRE_SHIFT)
+#define  CSR_DATAPRE_SHIFT             0
+#define  CSR_DATAPRE                   (_ULCAST_(1) << CSR_DATAPRE_SHIFT)
+
+#define LOONGARCH_CSR_IMPCTL2          0x81    /* Loongson config2 */
+#define  CSR_FLUSH_MTLB_SHIFT          0
+#define  CSR_FLUSH_MTLB                        (_ULCAST_(1) << 
CSR_FLUSH_MTLB_SHIFT)
+#define  CSR_FLUSH_STLB_SHIFT          1
+#define  CSR_FLUSH_STLB                        (_ULCAST_(1) << 
CSR_FLUSH_STLB_SHIFT)
+#define  CSR_FLUSH_DTLB_SHIFT          2
+#define  CSR_FLUSH_DTLB                        (_ULCAST_(1) << 
CSR_FLUSH_DTLB_SHIFT)
+#define  CSR_FLUSH_ITLB_SHIFT          3
+#define  CSR_FLUSH_ITLB                        (_ULCAST_(1) << 
CSR_FLUSH_ITLB_SHIFT)
+#define  CSR_FLUSH_BTAC_SHIFT          4
+#define  CSR_FLUSH_BTAC                        (_ULCAST_(1) << 
CSR_FLUSH_BTAC_SHIFT)
+
+#define LOONGARCH_CSR_GNMI             0x82
+
+/* TLB Refill registers */
+#define LOONGARCH_CSR_TLBRENTRY                0x88    /* TLB refill exception 
entry */
+#define LOONGARCH_CSR_TLBRBADV         0x89    /* TLB refill badvaddr */
+#define LOONGARCH_CSR_TLBRERA          0x8a    /* TLB refill ERA */
+#define LOONGARCH_CSR_TLBRSAVE         0x8b    /* KSave for TLB refill 
exception */
+#define LOONGARCH_CSR_TLBRELO0         0x8c    /* TLB refill entrylo0 */
+#define LOONGARCH_CSR_TLBRELO1         0x8d    /* TLB refill entrylo1 */
+#define LOONGARCH_CSR_TLBREHI          0x8e    /* TLB refill entryhi */
+#define  CSR_TLBREHI_PS_SHIFT          0
+#define  CSR_TLBREHI_PS                        (_ULCAST_(0x3f) << 
CSR_TLBREHI_PS_SHIFT)
+#define LOONGARCH_CSR_TLBRPRMD         0x8f    /* TLB refill mode info */
+
+/* Machine Error registers */
+#define LOONGARCH_CSR_MERRCTL          0x90    /* MERRCTL */
+#define LOONGARCH_CSR_MERRINFO1                0x91    /* MError info1 */
+#define LOONGARCH_CSR_MERRINFO2                0x92    /* MError info2 */
+#define LOONGARCH_CSR_MERRENTRY                0x93    /* MError exception 
entry */
+#define LOONGARCH_CSR_MERRERA          0x94    /* MError exception ERA */
+#define LOONGARCH_CSR_MERRSAVE         0x95    /* KSave for machine error 
exception */
+
+#define LOONGARCH_CSR_CTAG             0x98    /* TagLo + TagHi */
+
+#define LOONGARCH_CSR_PRID             0xc0
+
+/* Shadow MCSR : 0xc0 ~ 0xff */
+#define LOONGARCH_CSR_MCSR0            0xc0    /* CPUCFG0 and CPUCFG1 */
+#define  MCSR0_INT_IMPL_SHIFT          58
+#define  MCSR0_INT_IMPL                        0
+#define  MCSR0_IOCSR_BRD_SHIFT         57
+#define  MCSR0_IOCSR_BRD               (_ULCAST_(1) << MCSR0_IOCSR_BRD_SHIFT)
+#define  MCSR0_HUGEPG_SHIFT            56
+#define  MCSR0_HUGEPG                  (_ULCAST_(1) << MCSR0_HUGEPG_SHIFT)
+#define  MCSR0_RPLMTLB_SHIFT           55
+#define  MCSR0_RPLMTLB                 (_ULCAST_(1) << MCSR0_RPLMTLB_SHIFT)
+#define  MCSR0_EP_SHIFT                        54
+#define  MCSR0_EP                      (_ULCAST_(1) << MCSR0_EP_SHIFT)
+#define  MCSR0_RI_SHIFT                        53
+#define  MCSR0_RI                      (_ULCAST_(1) << MCSR0_RI_SHIFT)
+#define  MCSR0_UAL_SHIFT               52
+#define  MCSR0_UAL                     (_ULCAST_(1) << MCSR0_UAL_SHIFT)
+#define  MCSR0_VABIT_SHIFT             44
+#define  MCSR0_VABIT_WIDTH             8
+#define  MCSR0_VABIT                   (_ULCAST_(0xff) << MCSR0_VABIT_SHIFT)
+#define  VABIT_DEFAULT                 0x2f
+#define  MCSR0_PABIT_SHIFT             36
+#define  MCSR0_PABIT_WIDTH             8
+#define  MCSR0_PABIT                   (_ULCAST_(0xff) << MCSR0_PABIT_SHIFT)
+#define  PABIT_DEFAULT                 0x2f
+#define  MCSR0_IOCSR_SHIFT             35
+#define  MCSR0_IOCSR                   (_ULCAST_(1) << MCSR0_IOCSR_SHIFT)
+#define  MCSR0_PAGING_SHIFT            34
+#define  MCSR0_PAGING                  (_ULCAST_(1) << MCSR0_PAGING_SHIFT)
+#define  MCSR0_GR64_SHIFT              33
+#define  MCSR0_GR64                    (_ULCAST_(1) << MCSR0_GR64_SHIFT)
+#define  GR64_DEFAULT                  1
+#define  MCSR0_GR32_SHIFT              32
+#define  MCSR0_GR32                    (_ULCAST_(1) << MCSR0_GR32_SHIFT)
+#define  GR32_DEFAULT                  0
+#define  MCSR0_PRID_WIDTH              32
+#define  MCSR0_PRID                    0x14C010
+
+#define LOONGARCH_CSR_MCSR1            0xc1    /* CPUCFG2 and CPUCFG3 */
+#define  MCSR1_HPFOLD_SHIFT            43
+#define  MCSR1_HPFOLD                  (_ULCAST_(1) << MCSR1_HPFOLD_SHIFT)
+#define  MCSR1_SPW_LVL_SHIFT           40
+#define  MCSR1_SPW_LVL_WIDTH           3
+#define  MCSR1_SPW_LVL                 (_ULCAST_(7) << MCSR1_SPW_LVL_SHIFT)
+#define  MCSR1_ICACHET_SHIFT           39
+#define  MCSR1_ICACHET                 (_ULCAST_(1) << MCSR1_ICACHET_SHIFT)
+#define  MCSR1_ITLBT_SHIFT             38
+#define  MCSR1_ITLBT                   (_ULCAST_(1) << MCSR1_ITLBT_SHIFT)
+#define  MCSR1_LLDBAR_SHIFT            37
+#define  MCSR1_LLDBAR                  (_ULCAST_(1) << MCSR1_LLDBAR_SHIFT)
+#define  MCSR1_SCDLY_SHIFT             36
+#define  MCSR1_SCDLY                   (_ULCAST_(1) << MCSR1_SCDLY_SHIFT)
+#define  MCSR1_LLEXC_SHIFT             35
+#define  MCSR1_LLEXC                   (_ULCAST_(1) << MCSR1_LLEXC_SHIFT)
+#define  MCSR1_UCACC_SHIFT             34
+#define  MCSR1_UCACC                   (_ULCAST_(1) << MCSR1_UCACC_SHIFT)
+#define  MCSR1_SFB_SHIFT               33
+#define  MCSR1_SFB                     (_ULCAST_(1) << MCSR1_SFB_SHIFT)
+#define  MCSR1_CCDMA_SHIFT             32
+#define  MCSR1_CCDMA                   (_ULCAST_(1) << MCSR1_CCDMA_SHIFT)
+#define  MCSR1_LAMO_SHIFT              22
+#define  MCSR1_LAMO                    (_ULCAST_(1) << MCSR1_LAMO_SHIFT)
+#define  MCSR1_LSPW_SHIFT              21
+#define  MCSR1_LSPW                    (_ULCAST_(1) << MCSR1_LSPW_SHIFT)
+#define  MCSR1_MIPSBT_SHIFT            20
+#define  MCSR1_MIPSBT                  (_ULCAST_(1) << MCSR1_MIPSBT_SHIFT)
+#define  MCSR1_ARMBT_SHIFT             19
+#define  MCSR1_ARMBT                   (_ULCAST_(1) << MCSR1_ARMBT_SHIFT)
+#define  MCSR1_X86BT_SHIFT             18
+#define  MCSR1_X86BT                   (_ULCAST_(1) << MCSR1_X86BT_SHIFT)
+#define  MCSR1_LLFTPVERS_SHIFT         15
+#define  MCSR1_LLFTPVERS_WIDTH         3
+#define  MCSR1_LLFTPVERS               (_ULCAST_(7) << MCSR1_LLFTPVERS_SHIFT)
+#define  MCSR1_LLFTP_SHIFT             14
+#define  MCSR1_LLFTP                   (_ULCAST_(1) << MCSR1_LLFTP_SHIFT)
+#define  MCSR1_VZVERS_SHIFT            11
+#define  MCSR1_VZVERS_WIDTH            3
+#define  MCSR1_VZVERS                  (_ULCAST_(7) << MCSR1_VZVERS_SHIFT)
+#define  MCSR1_VZ_SHIFT                        10
+#define  MCSR1_VZ                      (_ULCAST_(1) << MCSR1_VZ_SHIFT)
+#define  MCSR1_CRYPTO_SHIFT            9
+#define  MCSR1_CRYPTO                  (_ULCAST_(1) << MCSR1_CRYPTO_SHIFT)
+#define  MCSR1_COMPLEX_SHIFT           8
+#define  MCSR1_COMPLEX                 (_ULCAST_(1) << MCSR1_COMPLEX_SHIFT)
+#define  MCSR1_LASX_SHIFT              7
+#define  MCSR1_LASX                    (_ULCAST_(1) << MCSR1_LASX_SHIFT)
+#define  MCSR1_LSX_SHIFT               6
+#define  MCSR1_LSX                     (_ULCAST_(1) << MCSR1_LSX_SHIFT)
+#define  MCSR1_FPVERS_SHIFT            3
+#define  MCSR1_FPVERS_WIDTH            3
+#define  MCSR1_FPVERS                  (_ULCAST_(7) << MCSR1_FPVERS_SHIFT)
+#define  MCSR1_FPDP_SHIFT              2
+#define  MCSR1_FPDP                    (_ULCAST_(1) << MCSR1_FPDP_SHIFT)
+#define  MCSR1_FPSP_SHIFT              1
+#define  MCSR1_FPSP                    (_ULCAST_(1) << MCSR1_FPSP_SHIFT)
+#define  MCSR1_FP_SHIFT                        0
+#define  MCSR1_FP                      (_ULCAST_(1) << MCSR1_FP_SHIFT)
+
+#define LOONGARCH_CSR_MCSR2            0xc2    /* CPUCFG4 and CPUCFG5 */
+#define  MCSR2_CCDIV_SHIFT             48
+#define  MCSR2_CCDIV_WIDTH             16
+#define  MCSR2_CCDIV                   (_ULCAST_(0xffff) << MCSR2_CCDIV_SHIFT)
+#define  MCSR2_CCMUL_SHIFT             32
+#define  MCSR2_CCMUL_WIDTH             16
+#define  MCSR2_CCMUL                   (_ULCAST_(0xffff) << MCSR2_CCMUL_SHIFT)
+#define  MCSR2_CCFREQ_WIDTH            32
+#define  MCSR2_CCFREQ                  (_ULCAST_(0xffffffff))
+#define  CCFREQ_DEFAULT                        0x5f5e100       /* 100MHz */
+
+#define LOONGARCH_CSR_MCSR3            0xc3    /* CPUCFG6 */
+#define  MCSR3_UPM_SHIFT               14
+#define  MCSR3_UPM                     (_ULCAST_(1) << MCSR3_UPM_SHIFT)
+#define  MCSR3_PMBITS_SHIFT            8
+#define  MCSR3_PMBITS_WIDTH            6
+#define  MCSR3_PMBITS                  (_ULCAST_(0x3f) << MCSR3_PMBITS_SHIFT)
+#define  PMBITS_DEFAULT                        0x40
+#define  MCSR3_PMNUM_SHIFT             4
+#define  MCSR3_PMNUM_WIDTH             4
+#define  MCSR3_PMNUM                   (_ULCAST_(0xf) << MCSR3_PMNUM_SHIFT)
+#define  MCSR3_PAMVER_SHIFT            1
+#define  MCSR3_PAMVER_WIDTH            3
+#define  MCSR3_PAMVER                  (_ULCAST_(0x7) << MCSR3_PAMVER_SHIFT)
+#define  MCSR3_PMP_SHIFT               0
+#define  MCSR3_PMP                     (_ULCAST_(1) << MCSR3_PMP_SHIFT)
+
+#define LOONGARCH_CSR_MCSR8            0xc8    /* CPUCFG16 and CPUCFG17 */
+#define  MCSR8_L1I_SIZE_SHIFT          56
+#define  MCSR8_L1I_SIZE_WIDTH          7
+#define  MCSR8_L1I_SIZE                        (_ULCAST_(0x7f) << 
MCSR8_L1I_SIZE_SHIFT)
+#define  MCSR8_L1I_IDX_SHIFT           48
+#define  MCSR8_L1I_IDX_WIDTH           8
+#define  MCSR8_L1I_IDX                 (_ULCAST_(0xff) << MCSR8_L1I_IDX_SHIFT)
+#define  MCSR8_L1I_WAY_SHIFT           32
+#define  MCSR8_L1I_WAY_WIDTH           16
+#define  MCSR8_L1I_WAY                 (_ULCAST_(0xffff) << 
MCSR8_L1I_WAY_SHIFT)
+#define  MCSR8_L3DINCL_SHIFT           16
+#define  MCSR8_L3DINCL                 (_ULCAST_(1) << MCSR8_L3DINCL_SHIFT)
+#define  MCSR8_L3DPRIV_SHIFT           15
+#define  MCSR8_L3DPRIV                 (_ULCAST_(1) << MCSR8_L3DPRIV_SHIFT)
+#define  MCSR8_L3DPRE_SHIFT            14
+#define  MCSR8_L3DPRE                  (_ULCAST_(1) << MCSR8_L3DPRE_SHIFT)
+#define  MCSR8_L3IUINCL_SHIFT          13
+#define  MCSR8_L3IUINCL                        (_ULCAST_(1) << 
MCSR8_L3IUINCL_SHIFT)
+#define  MCSR8_L3IUPRIV_SHIFT          12
+#define  MCSR8_L3IUPRIV                        (_ULCAST_(1) << 
MCSR8_L3IUPRIV_SHIFT)
+#define  MCSR8_L3IUUNIFY_SHIFT         11
+#define  MCSR8_L3IUUNIFY               (_ULCAST_(1) << MCSR8_L3IUUNIFY_SHIFT)
+#define  MCSR8_L3IUPRE_SHIFT           10
+#define  MCSR8_L3IUPRE                 (_ULCAST_(1) << MCSR8_L3IUPRE_SHIFT)
+#define  MCSR8_L2DINCL_SHIFT           9
+#define  MCSR8_L2DINCL                 (_ULCAST_(1) << MCSR8_L2DINCL_SHIFT)
+#define  MCSR8_L2DPRIV_SHIFT           8
+#define  MCSR8_L2DPRIV                 (_ULCAST_(1) << MCSR8_L2DPRIV_SHIFT)
+#define  MCSR8_L2DPRE_SHIFT            7
+#define  MCSR8_L2DPRE                  (_ULCAST_(1) << MCSR8_L2DPRE_SHIFT)
+#define  MCSR8_L2IUINCL_SHIFT          6
+#define  MCSR8_L2IUINCL                        (_ULCAST_(1) << 
MCSR8_L2IUINCL_SHIFT)
+#define  MCSR8_L2IUPRIV_SHIFT          5
+#define  MCSR8_L2IUPRIV                        (_ULCAST_(1) << 
MCSR8_L2IUPRIV_SHIFT)
+#define  MCSR8_L2IUUNIFY_SHIFT         4
+#define  MCSR8_L2IUUNIFY               (_ULCAST_(1) << MCSR8_L2IUUNIFY_SHIFT)
+#define  MCSR8_L2IUPRE_SHIFT           3
+#define  MCSR8_L2IUPRE                 (_ULCAST_(1) << MCSR8_L2IUPRE_SHIFT)
+#define  MCSR8_L1DPRE_SHIFT            2
+#define  MCSR8_L1DPRE                  (_ULCAST_(1) << MCSR8_L1DPRE_SHIFT)
+#define  MCSR8_L1IUUNIFY_SHIFT         1
+#define  MCSR8_L1IUUNIFY               (_ULCAST_(1) << MCSR8_L1IUUNIFY_SHIFT)
+#define  MCSR8_L1IUPRE_SHIFT           0
+#define  MCSR8_L1IUPRE                 (_ULCAST_(1) << MCSR8_L1IUPRE_SHIFT)
+
+#define LOONGARCH_CSR_MCSR9            0xc9    /* CPUCFG18 and CPUCFG19 */
+#define  MCSR9_L2U_SIZE_SHIFT          56
+#define  MCSR9_L2U_SIZE_WIDTH          7
+#define  MCSR9_L2U_SIZE                        (_ULCAST_(0x7f) << 
MCSR9_L2U_SIZE_SHIFT)
+#define  MCSR9_L2U_IDX_SHIFT           48
+#define  MCSR9_L2U_IDX_WIDTH           8
+#define  MCSR9_L2U_IDX                 (_ULCAST_(0xff) << MCSR9_IDX_LOG_SHIFT)
+#define  MCSR9_L2U_WAY_SHIFT           32
+#define  MCSR9_L2U_WAY_WIDTH           16
+#define  MCSR9_L2U_WAY                 (_ULCAST_(0xffff) << 
MCSR9_L2U_WAY_SHIFT)
+#define  MCSR9_L1D_SIZE_SHIFT          24
+#define  MCSR9_L1D_SIZE_WIDTH          7
+#define  MCSR9_L1D_SIZE                        (_ULCAST_(0x7f) << 
MCSR9_L1D_SIZE_SHIFT)
+#define  MCSR9_L1D_IDX_SHIFT           16
+#define  MCSR9_L1D_IDX_WIDTH           8
+#define  MCSR9_L1D_IDX                 (_ULCAST_(0xff) << MCSR9_L1D_IDX_SHIFT)
+#define  MCSR9_L1D_WAY_SHIFT           0
+#define  MCSR9_L1D_WAY_WIDTH           16
+#define  MCSR9_L1D_WAY                 (_ULCAST_(0xffff) << 
MCSR9_L1D_WAY_SHIFT)
+
+#define LOONGARCH_CSR_MCSR10           0xca    /* CPUCFG20 */
+#define  MCSR10_L3U_SIZE_SHIFT         24
+#define  MCSR10_L3U_SIZE_WIDTH         7
+#define  MCSR10_L3U_SIZE               (_ULCAST_(0x7f) << 
MCSR10_L3U_SIZE_SHIFT)
+#define  MCSR10_L3U_IDX_SHIFT          16
+#define  MCSR10_L3U_IDX_WIDTH          8
+#define  MCSR10_L3U_IDX                        (_ULCAST_(0xff) << 
MCSR10_L3U_IDX_SHIFT)
+#define  MCSR10_L3U_WAY_SHIFT          0
+#define  MCSR10_L3U_WAY_WIDTH          16
+#define  MCSR10_L3U_WAY                        (_ULCAST_(0xffff) << 
MCSR10_L3U_WAY_SHIFT)
+
+#define LOONGARCH_CSR_MCSR24           0xf0    /* cpucfg48 */
+#define  MCSR24_RAMCG_SHIFT            3
+#define  MCSR24_RAMCG                  (_ULCAST_(1) << MCSR24_RAMCG_SHIFT)
+#define  MCSR24_VFPUCG_SHIFT           2
+#define  MCSR24_VFPUCG                 (_ULCAST_(1) << MCSR24_VFPUCG_SHIFT)
+#define  MCSR24_NAPEN_SHIFT            1
+#define  MCSR24_NAPEN                  (_ULCAST_(1) << MCSR24_NAPEN_SHIFT)
+#define  MCSR24_MCSRLOCK_SHIFT         0
+#define  MCSR24_MCSRLOCK               (_ULCAST_(1) << MCSR24_MCSRLOCK_SHIFT)
+
+/* Uncached accelerate windows registers */
+#define LOONGARCH_CSR_UCAWIN           0x100
+#define LOONGARCH_CSR_UCAWIN0_LO       0x102
+#define LOONGARCH_CSR_UCAWIN0_HI       0x103
+#define LOONGARCH_CSR_UCAWIN1_LO       0x104
+#define LOONGARCH_CSR_UCAWIN1_HI       0x105
+#define LOONGARCH_CSR_UCAWIN2_LO       0x106
+#define LOONGARCH_CSR_UCAWIN2_HI       0x107
+#define LOONGARCH_CSR_UCAWIN3_LO       0x108
+#define LOONGARCH_CSR_UCAWIN3_HI       0x109
+
+/* Direct Map windows registers */
+#define LOONGARCH_CSR_DMWIN0           0x180   /* 64 direct map win0: MEM & IF 
*/
+#define LOONGARCH_CSR_DMWIN1           0x181   /* 64 direct map win1: MEM & IF 
*/
+#define LOONGARCH_CSR_DMWIN2           0x182   /* 64 direct map win2: MEM */
+#define LOONGARCH_CSR_DMWIN3           0x183   /* 64 direct map win3: MEM */
+
+/* Direct Map window 0/1 */
+#define CSR_DMW0_PLV0          _CONST64_(1 << 0)
+#define CSR_DMW0_VSEG          _CONST64_(0x8000)
+#define CSR_DMW0_BASE          (CSR_DMW0_VSEG << DMW_PABITS)
+#define CSR_DMW0_INIT          (CSR_DMW0_BASE | CSR_DMW0_PLV0)
+
+#define CSR_DMW1_PLV0          _CONST64_(1 << 0)
+#define CSR_DMW1_MAT           _CONST64_(1 << 4)
+#define CSR_DMW1_VSEG          _CONST64_(0x9000)
+#define CSR_DMW1_BASE          (CSR_DMW1_VSEG << DMW_PABITS)
+#define CSR_DMW1_INIT          (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+
+/* Performance Counter registers */
+#define LOONGARCH_CSR_PERFCTRL0                0x200   /* 32 perf event 0 
config */
+#define LOONGARCH_CSR_PERFCNTR0                0x201   /* 64 perf event 0 
count value */
+#define LOONGARCH_CSR_PERFCTRL1                0x202   /* 32 perf event 1 
config */
+#define LOONGARCH_CSR_PERFCNTR1                0x203   /* 64 perf event 1 
count value */
+#define LOONGARCH_CSR_PERFCTRL2                0x204   /* 32 perf event 2 
config */
+#define LOONGARCH_CSR_PERFCNTR2                0x205   /* 64 perf event 2 
count value */
+#define LOONGARCH_CSR_PERFCTRL3                0x206   /* 32 perf event 3 
config */
+#define LOONGARCH_CSR_PERFCNTR3                0x207   /* 64 perf event 3 
count value */
+#define  CSR_PERFCTRL_PLV0             (_ULCAST_(1) << 16)
+#define  CSR_PERFCTRL_PLV1             (_ULCAST_(1) << 17)
+#define  CSR_PERFCTRL_PLV2             (_ULCAST_(1) << 18)
+#define  CSR_PERFCTRL_PLV3             (_ULCAST_(1) << 19)
+#define  CSR_PERFCTRL_IE               (_ULCAST_(1) << 20)
+#define  CSR_PERFCTRL_EVENT            0x3ff
+
+/* Debug registers */
+#define LOONGARCH_CSR_MWPC             0x300   /* data breakpoint config */
+#define LOONGARCH_CSR_MWPS             0x301   /* data breakpoint status */
+
+#define LOONGARCH_CSR_DB0ADDR          0x310   /* data breakpoint 0 address */
+#define LOONGARCH_CSR_DB0MASK          0x311   /* data breakpoint 0 mask */
+#define LOONGARCH_CSR_DB0CTRL          0x312   /* data breakpoint 0 control */
+#define LOONGARCH_CSR_DB0ASID          0x313   /* data breakpoint 0 asid */
+
+#define LOONGARCH_CSR_DB1ADDR          0x318   /* data breakpoint 1 address */
+#define LOONGARCH_CSR_DB1MASK          0x319   /* data breakpoint 1 mask */
+#define LOONGARCH_CSR_DB1CTRL          0x31a   /* data breakpoint 1 control */
+#define LOONGARCH_CSR_DB1ASID          0x31b   /* data breakpoint 1 asid */
+
+#define LOONGARCH_CSR_DB2ADDR          0x320   /* data breakpoint 2 address */
+#define LOONGARCH_CSR_DB2MASK          0x321   /* data breakpoint 2 mask */
+#define LOONGARCH_CSR_DB2CTRL          0x322   /* data breakpoint 2 control */
+#define LOONGARCH_CSR_DB2ASID          0x323   /* data breakpoint 2 asid */
+
+#define LOONGARCH_CSR_DB3ADDR          0x328   /* data breakpoint 3 address */
+#define LOONGARCH_CSR_DB3MASK          0x329   /* data breakpoint 3 mask */
+#define LOONGARCH_CSR_DB3CTRL          0x32a   /* data breakpoint 3 control */
+#define LOONGARCH_CSR_DB3ASID          0x32b   /* data breakpoint 3 asid */
+
+#define LOONGARCH_CSR_DB4ADDR          0x330   /* data breakpoint 4 address */
+#define LOONGARCH_CSR_DB4MASK          0x331   /* data breakpoint 4 maks */
+#define LOONGARCH_CSR_DB4CTRL          0x332   /* data breakpoint 4 control */
+#define LOONGARCH_CSR_DB4ASID          0x333   /* data breakpoint 4 asid */
+
+#define LOONGARCH_CSR_DB5ADDR          0x338   /* data breakpoint 5 address */
+#define LOONGARCH_CSR_DB5MASK          0x339   /* data breakpoint 5 mask */
+#define LOONGARCH_CSR_DB5CTRL          0x33a   /* data breakpoint 5 control */
+#define LOONGARCH_CSR_DB5ASID          0x33b   /* data breakpoint 5 asid */
+
+#define LOONGARCH_CSR_DB6ADDR          0x340   /* data breakpoint 6 address */
+#define LOONGARCH_CSR_DB6MASK          0x341   /* data breakpoint 6 mask */
+#define LOONGARCH_CSR_DB6CTRL          0x342   /* data breakpoint 6 control */
+#define LOONGARCH_CSR_DB6ASID          0x343   /* data breakpoint 6 asid */
+
+#define LOONGARCH_CSR_DB7ADDR          0x348   /* data breakpoint 7 address */
+#define LOONGARCH_CSR_DB7MASK          0x349   /* data breakpoint 7 mask */
+#define LOONGARCH_CSR_DB7CTRL          0x34a   /* data breakpoint 7 control */
+#define LOONGARCH_CSR_DB7ASID          0x34b   /* data breakpoint 7 asid */
+
+#define LOONGARCH_CSR_FWPC             0x380   /* instruction breakpoint 
config */
+#define LOONGARCH_CSR_FWPS             0x381   /* instruction breakpoint 
status */
+
+#define LOONGARCH_CSR_IB0ADDR          0x390   /* inst breakpoint 0 address */
+#define LOONGARCH_CSR_IB0MASK          0x391   /* inst breakpoint 0 mask */
+#define LOONGARCH_CSR_IB0CTRL          0x392   /* inst breakpoint 0 control */
+#define LOONGARCH_CSR_IB0ASID          0x393   /* inst breakpoint 0 asid */
+
+#define LOONGARCH_CSR_IB1ADDR          0x398   /* inst breakpoint 1 address */
+#define LOONGARCH_CSR_IB1MASK          0x399   /* inst breakpoint 1 mask */
+#define LOONGARCH_CSR_IB1CTRL          0x39a   /* inst breakpoint 1 control */
+#define LOONGARCH_CSR_IB1ASID          0x39b   /* inst breakpoint 1 asid */
+
+#define LOONGARCH_CSR_IB2ADDR          0x3a0   /* inst breakpoint 2 address */
+#define LOONGARCH_CSR_IB2MASK          0x3a1   /* inst breakpoint 2 mask */
+#define LOONGARCH_CSR_IB2CTRL          0x3a2   /* inst breakpoint 2 control */
+#define LOONGARCH_CSR_IB2ASID          0x3a3   /* inst breakpoint 2 asid */
+
+#define LOONGARCH_CSR_IB3ADDR          0x3a8   /* inst breakpoint 3 address */
+#define LOONGARCH_CSR_IB3MASK          0x3a9   /* breakpoint 3 mask */
+#define LOONGARCH_CSR_IB3CTRL          0x3aa   /* inst breakpoint 3 control */
+#define LOONGARCH_CSR_IB3ASID          0x3ab   /* inst breakpoint 3 asid */
+
+#define LOONGARCH_CSR_IB4ADDR          0x3b0   /* inst breakpoint 4 address */
+#define LOONGARCH_CSR_IB4MASK          0x3b1   /* inst breakpoint 4 mask */
+#define LOONGARCH_CSR_IB4CTRL          0x3b2   /* inst breakpoint 4 control */
+#define LOONGARCH_CSR_IB4ASID          0x3b3   /* inst breakpoint 4 asid */
+
+#define LOONGARCH_CSR_IB5ADDR          0x3b8   /* inst breakpoint 5 address */
+#define LOONGARCH_CSR_IB5MASK          0x3b9   /* inst breakpoint 5 mask */
+#define LOONGARCH_CSR_IB5CTRL          0x3ba   /* inst breakpoint 5 control */
+#define LOONGARCH_CSR_IB5ASID          0x3bb   /* inst breakpoint 5 asid */
+
+#define LOONGARCH_CSR_IB6ADDR          0x3c0   /* inst breakpoint 6 address */
+#define LOONGARCH_CSR_IB6MASK          0x3c1   /* inst breakpoint 6 mask */
+#define LOONGARCH_CSR_IB6CTRL          0x3c2   /* inst breakpoint 6 control */
+#define LOONGARCH_CSR_IB6ASID          0x3c3   /* inst breakpoint 6 asid */
+
+#define LOONGARCH_CSR_IB7ADDR          0x3c8   /* inst breakpoint 7 address */
+#define LOONGARCH_CSR_IB7MASK          0x3c9   /* inst breakpoint 7 mask */
+#define LOONGARCH_CSR_IB7CTRL          0x3ca   /* inst breakpoint 7 control */
+#define LOONGARCH_CSR_IB7ASID          0x3cb   /* inst breakpoint 7 asid */
+
+#define LOONGARCH_CSR_DEBUG            0x500   /* debug config */
+#define LOONGARCH_CSR_DERA             0x501   /* debug era */
+#define LOONGARCH_CSR_DESAVE           0x502   /* debug save */
+
+#define CSR_FWPC_SKIP_SHIFT            16
+#define CSR_FWPC_SKIP                  (_ULCAST_(1) << CSR_FWPC_SKIP_SHIFT)
+
+/*
+ * CSR_ECFG IM
+ */
+#define ECFG0_IM               0x00001fff
+#define ECFGB_SIP0             0
+#define ECFGF_SIP0             (_ULCAST_(1) << ECFGB_SIP0)
+#define ECFGB_SIP1             1
+#define ECFGF_SIP1             (_ULCAST_(1) << ECFGB_SIP1)
+#define ECFGB_IP0              2
+#define ECFGF_IP0              (_ULCAST_(1) << ECFGB_IP0)
+#define ECFGB_IP1              3
+#define ECFGF_IP1              (_ULCAST_(1) << ECFGB_IP1)
+#define ECFGB_IP2              4
+#define ECFGF_IP2              (_ULCAST_(1) << ECFGB_IP2)
+#define ECFGB_IP3              5
+#define ECFGF_IP3              (_ULCAST_(1) << ECFGB_IP3)
+#define ECFGB_IP4              6
+#define ECFGF_IP4              (_ULCAST_(1) << ECFGB_IP4)
+#define ECFGB_IP5              7
+#define ECFGF_IP5              (_ULCAST_(1) << ECFGB_IP5)
+#define ECFGB_IP6              8
+#define ECFGF_IP6              (_ULCAST_(1) << ECFGB_IP6)
+#define ECFGB_IP7              9
+#define ECFGF_IP7              (_ULCAST_(1) << ECFGB_IP7)
+#define ECFGB_PMC              10
+#define ECFGF_PMC              (_ULCAST_(1) << ECFGB_PMC)
+#define ECFGB_TIMER            11
+#define ECFGF_TIMER            (_ULCAST_(1) << ECFGB_TIMER)
+#define ECFGB_IPI              12
+#define ECFGF_IPI              (_ULCAST_(1) << ECFGB_IPI)
+#define ECFGF(hwirq)           (_ULCAST_(1) << hwirq)
+
+#define ESTATF_IP              0x00003fff
+
+#define LOONGARCH_IOCSR_FEATURES       0x8
+#define  IOCSRF_TEMP                   BIT_ULL(0)
+#define  IOCSRF_NODECNT                        BIT_ULL(1)
+#define  IOCSRF_MSI                    BIT_ULL(2)
+#define  IOCSRF_EXTIOI                 BIT_ULL(3)
+#define  IOCSRF_CSRIPI                 BIT_ULL(4)
+#define  IOCSRF_FREQCSR                        BIT_ULL(5)
+#define  IOCSRF_FREQSCALE              BIT_ULL(6)
+#define  IOCSRF_DVFSV1                 BIT_ULL(7)
+#define  IOCSRF_EIODECODE              BIT_ULL(9)
+#define  IOCSRF_FLATMODE               BIT_ULL(10)
+#define  IOCSRF_VM                     BIT_ULL(11)
+
+#define LOONGARCH_IOCSR_VENDOR         0x10
+
+#define LOONGARCH_IOCSR_CPUNAME                0x20
+
+#define LOONGARCH_IOCSR_NODECNT                0x408
+
+#define LOONGARCH_IOCSR_MISC_FUNC      0x420
+#define  IOCSR_MISC_FUNC_TIMER_RESET   BIT_ULL(21)
+#define  IOCSR_MISC_FUNC_EXT_IOI_EN    BIT_ULL(48)
+
+#define LOONGARCH_IOCSR_CPUTEMP                0x428
+
+/* PerCore CSR, only accessible by local cores */
+#define LOONGARCH_IOCSR_IPI_STATUS     0x1000
+#define LOONGARCH_IOCSR_IPI_EN         0x1004
+#define LOONGARCH_IOCSR_IPI_SET                0x1008
+#define LOONGARCH_IOCSR_IPI_CLEAR      0x100c
+#define LOONGARCH_IOCSR_MBUF0          0x1020
+#define LOONGARCH_IOCSR_MBUF1          0x1028
+#define LOONGARCH_IOCSR_MBUF2          0x1030
+#define LOONGARCH_IOCSR_MBUF3          0x1038
+
+#define LOONGARCH_IOCSR_IPI_SEND       0x1040
+#define  IOCSR_IPI_SEND_IP_SHIFT       0
+#define  IOCSR_IPI_SEND_CPU_SHIFT      16
+#define  IOCSR_IPI_SEND_BLOCKING       BIT(31)
+
+#define LOONGARCH_IOCSR_MBUF_SEND      0x1048
+#define  IOCSR_MBUF_SEND_BLOCKING      BIT_ULL(31)
+#define  IOCSR_MBUF_SEND_BOX_SHIFT     2
+#define  IOCSR_MBUF_SEND_BOX_LO(box)   (box << 1)
+#define  IOCSR_MBUF_SEND_BOX_HI(box)   ((box << 1) + 1)
+#define  IOCSR_MBUF_SEND_CPU_SHIFT     16
+#define  IOCSR_MBUF_SEND_BUF_SHIFT     32
+#define  IOCSR_MBUF_SEND_H32_MASK      0xFFFFFFFF00000000ULL
+
+#define LOONGARCH_IOCSR_ANY_SEND       0x1158
+#define  IOCSR_ANY_SEND_BLOCKING       BIT_ULL(31)
+#define  IOCSR_ANY_SEND_CPU_SHIFT      16
+#define  IOCSR_ANY_SEND_MASK_SHIFT     27
+#define  IOCSR_ANY_SEND_BUF_SHIFT      32
+#define  IOCSR_ANY_SEND_H32_MASK       0xFFFFFFFF00000000ULL
+
+/* Register offset and bit definition for CSR access */
+#define LOONGARCH_IOCSR_TIMER_CFG       0x1060
+#define LOONGARCH_IOCSR_TIMER_TICK      0x1070
+#define  IOCSR_TIMER_CFG_RESERVED       (_ULCAST_(1) << 63)
+#define  IOCSR_TIMER_CFG_PERIODIC       (_ULCAST_(1) << 62)
+#define  IOCSR_TIMER_CFG_EN             (_ULCAST_(1) << 61)
+#define  IOCSR_TIMER_MASK              0x0ffffffffffffULL
+#define  IOCSR_TIMER_INITVAL_RST        (_ULCAST_(0xffff) << 48)
+
+#define LOONGARCH_IOCSR_EXTIOI_NODEMAP_BASE    0x14a0
+#define LOONGARCH_IOCSR_EXTIOI_IPMAP_BASE      0x14c0
+#define LOONGARCH_IOCSR_EXTIOI_EN_BASE         0x1600
+#define LOONGARCH_IOCSR_EXTIOI_BOUNCE_BASE     0x1680
+#define LOONGARCH_IOCSR_EXTIOI_ISR_BASE                0x1800
+#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE      0x1c00
+#define IOCSR_EXTIOI_VECTOR_NUM                        256
+
+#ifndef __ASSEMBLY__
+
+static __always_inline u64 drdtime(void)
+{
+       u64 val = 0;
+
+       __asm__ __volatile__(
+               "rdtime.d %0, $zero\n\t"
+               : "=r"(val)
+               :
+               );
+       return val;
+}
+
+static __always_inline u32 rdtimeh(void)
+{
+       u32 val = 0;
+
+       __asm__ __volatile__(
+               "rdtimeh.w %0, $zero\n\t"
+               : "=r"(val)
+               :
+               );
+       return val;
+}
+
+static __always_inline u32 rdtimel(void)
+{
+       u32 val = 0;
+
+       __asm__ __volatile__(
+               "rdtimel.w %0, $zero\n\t"
+               : "=r"(val)
+               :
+               );
+       return val;
+}
+
+static inline unsigned int get_csr_cpuid(void)
+{
+       return csr_read32(LOONGARCH_CSR_CPUID);
+}
+
+static inline void csr_any_send(unsigned int addr, unsigned int data,
+                               unsigned int data_mask, unsigned int cpu)
+{
+       uint64_t val = 0;
+
+       val = IOCSR_ANY_SEND_BLOCKING | addr;
+       val |= (cpu << IOCSR_ANY_SEND_CPU_SHIFT);
+       val |= (data_mask << IOCSR_ANY_SEND_MASK_SHIFT);
+       val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT);
+       iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND);
+}
+
+static inline unsigned int read_csr_excode(void)
+{
+       return (csr_read32(LOONGARCH_CSR_ESTAT) & CSR_ESTAT_EXC) >> 
CSR_ESTAT_EXC_SHIFT;
+}
+
+static inline void write_csr_index(unsigned int idx)
+{
+       csr_xchg32(idx, CSR_TLBIDX_IDXM, LOONGARCH_CSR_TLBIDX);
+}
+
+static inline unsigned int read_csr_pagesize(void)
+{
+       return (csr_read32(LOONGARCH_CSR_TLBIDX) & CSR_TLBIDX_SIZEM) >> 
CSR_TLBIDX_SIZE;
+}
+
+static inline void write_csr_pagesize(unsigned int size)
+{
+       csr_xchg32(size << CSR_TLBIDX_SIZE, CSR_TLBIDX_SIZEM, 
LOONGARCH_CSR_TLBIDX);
+}
+
+static inline unsigned int read_csr_tlbrefill_pagesize(void)
+{
+       return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> 
CSR_TLBREHI_PS_SHIFT;
+}
+
+static inline void write_csr_tlbrefill_pagesize(unsigned int size)
+{
+       csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, 
LOONGARCH_CSR_TLBREHI);
+}
+
+#define read_csr_asid()                        csr_read32(LOONGARCH_CSR_ASID)
+#define write_csr_asid(val)            csr_write32(val, LOONGARCH_CSR_ASID)
+#define read_csr_entryhi()             csr_read64(LOONGARCH_CSR_TLBEHI)
+#define write_csr_entryhi(val)         csr_write64(val, LOONGARCH_CSR_TLBEHI)
+#define read_csr_entrylo0()            csr_read64(LOONGARCH_CSR_TLBELO0)
+#define write_csr_entrylo0(val)                csr_write64(val, 
LOONGARCH_CSR_TLBELO0)
+#define read_csr_entrylo1()            csr_read64(LOONGARCH_CSR_TLBELO1)
+#define write_csr_entrylo1(val)                csr_write64(val, 
LOONGARCH_CSR_TLBELO1)
+#define read_csr_ecfg()                        csr_read32(LOONGARCH_CSR_ECFG)
+#define write_csr_ecfg(val)            csr_write32(val, LOONGARCH_CSR_ECFG)
+#define read_csr_estat()               csr_read32(LOONGARCH_CSR_ESTAT)
+#define write_csr_estat(val)           csr_write32(val, LOONGARCH_CSR_ESTAT)
+#define read_csr_tlbidx()              csr_read32(LOONGARCH_CSR_TLBIDX)
+#define write_csr_tlbidx(val)          csr_write32(val, LOONGARCH_CSR_TLBIDX)
+#define read_csr_euen()                        csr_read32(LOONGARCH_CSR_EUEN)
+#define write_csr_euen(val)            csr_write32(val, LOONGARCH_CSR_EUEN)
+#define read_csr_cpuid()               csr_read32(LOONGARCH_CSR_CPUID)
+#define read_csr_prcfg1()              csr_read64(LOONGARCH_CSR_PRCFG1)
+#define write_csr_prcfg1(val)          csr_write64(val, LOONGARCH_CSR_PRCFG1)
+#define read_csr_prcfg2()              csr_read64(LOONGARCH_CSR_PRCFG2)
+#define write_csr_prcfg2(val)          csr_write64(val, LOONGARCH_CSR_PRCFG2)
+#define read_csr_prcfg3()              csr_read64(LOONGARCH_CSR_PRCFG3)
+#define write_csr_prcfg3(val)          csr_write64(val, LOONGARCH_CSR_PRCFG3)
+#define read_csr_stlbpgsize()          csr_read32(LOONGARCH_CSR_STLBPGSIZE)
+#define write_csr_stlbpgsize(val)      csr_write32(val, 
LOONGARCH_CSR_STLBPGSIZE)
+#define read_csr_rvacfg()              csr_read32(LOONGARCH_CSR_RVACFG)
+#define write_csr_rvacfg(val)          csr_write32(val, LOONGARCH_CSR_RVACFG)
+#define write_csr_tintclear(val)       csr_write32(val, LOONGARCH_CSR_TINTCLR)
+#define read_csr_impctl1()             csr_read64(LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl1(val)         csr_write64(val, LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl2(val)         csr_write64(val, LOONGARCH_CSR_IMPCTL2)
+
+#define read_csr_perfctrl0()           csr_read64(LOONGARCH_CSR_PERFCTRL0)
+#define read_csr_perfcntr0()           csr_read64(LOONGARCH_CSR_PERFCNTR0)
+#define read_csr_perfctrl1()           csr_read64(LOONGARCH_CSR_PERFCTRL1)
+#define read_csr_perfcntr1()           csr_read64(LOONGARCH_CSR_PERFCNTR1)
+#define read_csr_perfctrl2()           csr_read64(LOONGARCH_CSR_PERFCTRL2)
+#define read_csr_perfcntr2()           csr_read64(LOONGARCH_CSR_PERFCNTR2)
+#define read_csr_perfctrl3()           csr_read64(LOONGARCH_CSR_PERFCTRL3)
+#define read_csr_perfcntr3()           csr_read64(LOONGARCH_CSR_PERFCNTR3)
+#define write_csr_perfctrl0(val)       csr_write64(val, 
LOONGARCH_CSR_PERFCTRL0)
+#define write_csr_perfcntr0(val)       csr_write64(val, 
LOONGARCH_CSR_PERFCNTR0)
+#define write_csr_perfctrl1(val)       csr_write64(val, 
LOONGARCH_CSR_PERFCTRL1)
+#define write_csr_perfcntr1(val)       csr_write64(val, 
LOONGARCH_CSR_PERFCNTR1)
+#define write_csr_perfctrl2(val)       csr_write64(val, 
LOONGARCH_CSR_PERFCTRL2)
+#define write_csr_perfcntr2(val)       csr_write64(val, 
LOONGARCH_CSR_PERFCNTR2)
+#define write_csr_perfctrl3(val)       csr_write64(val, 
LOONGARCH_CSR_PERFCTRL3)
+#define write_csr_perfcntr3(val)       csr_write64(val, 
LOONGARCH_CSR_PERFCNTR3)
+
+/*
+ * Manipulate bits in a register.
+ */
+#define __BUILD_CSR_COMMON(name)                               \
+static inline unsigned long                                    \
+set_##name(unsigned long set)                                  \
+{                                                              \
+       unsigned long res, new;                                 \
+                                                               \
+       res = read_##name();                                    \
+       new = res | set;                                        \
+       write_##name(new);                                      \
+                                                               \
+       return res;                                             \
+}                                                              \
+                                                               \
+static inline unsigned long                                    \
+clear_##name(unsigned long clear)                              \
+{                                                              \
+       unsigned long res, new;                                 \
+                                                               \
+       res = read_##name();                                    \
+       new = res & ~clear;                                 \
+       write_##name(new);                                      \
+                                                               \
+       return res;                                             \
+}                                                              \
+                                                               \
+static inline unsigned long                                    \
+change_##name(unsigned long change, unsigned long val)         \
+{                                                              \
+       unsigned long res, new;                                 \
+                                                               \
+       res = read_##name();                                    \
+       new = res & ~change;                                        \
+       new |= (val & change);                                      \
+       write_##name(new);                                      \
+                                                               \
+       return res;                                             \
+}
+
+#define __BUILD_CSR_OP(name)   __BUILD_CSR_COMMON(csr_##name)
+
+__BUILD_CSR_OP(euen)
+__BUILD_CSR_OP(ecfg)
+__BUILD_CSR_OP(tlbidx)
+
+#define set_csr_estat(val)     \
+       csr_xchg32(val, val, LOONGARCH_CSR_ESTAT)
+#define clear_csr_estat(val)   \
+       csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT)
+
+#endif /* __ASSEMBLY__ */
+
+/* Generic EntryLo bit definitions */
+#define ENTRYLO_V              (_ULCAST_(1) << 0)
+#define ENTRYLO_D              (_ULCAST_(1) << 1)
+#define ENTRYLO_PLV_SHIFT      2
+#define ENTRYLO_PLV            (_ULCAST_(3) << ENTRYLO_PLV_SHIFT)
+#define ENTRYLO_C_SHIFT                4
+#define ENTRYLO_C              (_ULCAST_(3) << ENTRYLO_C_SHIFT)
+#define ENTRYLO_G              (_ULCAST_(1) << 6)
+#define ENTRYLO_NR             (_ULCAST_(1) << 61)
+#define ENTRYLO_NX             (_ULCAST_(1) << 62)
+
+/* Values for PageSize register */
+#define PS_4K          0x0000000c
+#define PS_8K          0x0000000d
+#define PS_16K         0x0000000e
+#define PS_32K         0x0000000f
+#define PS_64K         0x00000010
+#define PS_128K                0x00000011
+#define PS_256K                0x00000012
+#define PS_512K                0x00000013
+#define PS_1M          0x00000014
+#define PS_2M          0x00000015
+#define PS_4M          0x00000016
+#define PS_8M          0x00000017
+#define PS_16M         0x00000018
+#define PS_32M         0x00000019
+#define PS_64M         0x0000001a
+#define PS_128M                0x0000001b
+#define PS_256M                0x0000001c
+#define PS_512M                0x0000001d
+#define PS_1G          0x0000001e
+
+
+/* ExStatus.ExcCode */
+#define EXCCODE_RSV            0       /* Reserved */
+#define EXCCODE_TLBL           1       /* TLB miss on a load */
+#define EXCCODE_TLBS           2       /* TLB miss on a store */
+#define EXCCODE_TLBI           3       /* TLB miss on a ifetch */
+#define EXCCODE_TLBM           4       /* TLB modified fault */
+#define EXCCODE_TLBNR          5       /* TLB Read-Inhibit exception */
+#define EXCCODE_TLBNX          6       /* TLB Execution-Inhibit exception */
+#define EXCCODE_TLBPE          7       /* TLB Privilege Error */
+#define EXCCODE_ADE            8       /* Address Error */
+       #define EXSUBCODE_ADEF          0       /* Fetch Instruction */
+       #define EXSUBCODE_ADEM          1       /* Access Memory*/
+#define EXCCODE_ALE            9       /* Unalign Access */
+#define EXCCODE_BCE            10      /* Bounds Check Error */
+#define EXCCODE_SYS            11      /* System call */
+#define EXCCODE_BP             12      /* Breakpoint */
+#define EXCCODE_INE            13      /* Inst. Not Exist */
+#define EXCCODE_IPE            14      /* Inst. Privileged Error */
+#define EXCCODE_FPDIS          15      /* FPU Disabled */
+#define EXCCODE_LSXDIS         16      /* LSX Disabled */
+#define EXCCODE_LASXDIS                17      /* LASX Disabled */
+#define EXCCODE_FPE            18      /* Floating Point Exception */
+       #define EXCSUBCODE_FPE          0       /* Floating Point Exception */
+       #define EXCSUBCODE_VFPE         1       /* Vector Exception */
+#define EXCCODE_WATCH          19      /* WatchPoint Exception */
+       #define EXCSUBCODE_WPEF         0       /* ... on Instruction Fetch */
+       #define EXCSUBCODE_WPEM         1       /* ... on Memory Accesses */
+#define EXCCODE_BTDIS          20      /* Binary Trans. Disabled */
+#define EXCCODE_BTE            21      /* Binary Trans. Exception */
+#define EXCCODE_GSPR           22      /* Guest Privileged Error */
+#define EXCCODE_HVC            23      /* Hypercall */
+#define EXCCODE_GCM            24      /* Guest CSR modified */
+       #define EXCSUBCODE_GCSC         0       /* Software caused */
+       #define EXCSUBCODE_GCHC         1       /* Hardware caused */
+#define EXCCODE_SE             25      /* Security */
+
+/* Interrupt numbers */
+#define INT_SWI0       0       /* Software Interrupts */
+#define INT_SWI1       1
+#define INT_HWI0       2       /* Hardware Interrupts */
+#define INT_HWI1       3
+#define INT_HWI2       4
+#define INT_HWI3       5
+#define INT_HWI4       6
+#define INT_HWI5       7
+#define INT_HWI6       8
+#define INT_HWI7       9
+#define INT_PCOV       10      /* Performance Counter Overflow */
+#define INT_TI         11      /* Timer */
+#define INT_IPI                12
+#define INT_NMI                13
+
+/* ExcCodes corresponding to interrupts */
+#define EXCCODE_INT_NUM                (INT_NMI + 1)
+#define EXCCODE_INT_START      64
+#define EXCCODE_INT_END                (EXCCODE_INT_START + EXCCODE_INT_NUM - 
1)
+
+/* FPU Status Register Names */
+#ifndef CONFIG_AS_HAS_FCSR_CLASS
+#define LOONGARCH_FCSR0        $r0
+#define LOONGARCH_FCSR1        $r1
+#define LOONGARCH_FCSR2        $r2
+#define LOONGARCH_FCSR3        $r3
+#else
+#define LOONGARCH_FCSR0        $fcsr0
+#define LOONGARCH_FCSR1        $fcsr1
+#define LOONGARCH_FCSR2        $fcsr2
+#define LOONGARCH_FCSR3        $fcsr3
+#endif
+
+/* FPU Status Register Values */
+#define FPU_CSR_RSVD   0xe0e0fce0
+
+/*
+ * X the exception cause indicator
+ * E the exception enable
+ * S the sticky/flag bit
+ */
+#define FPU_CSR_ALL_X  0x1f000000
+#define FPU_CSR_INV_X  0x10000000
+#define FPU_CSR_DIV_X  0x08000000
+#define FPU_CSR_OVF_X  0x04000000
+#define FPU_CSR_UDF_X  0x02000000
+#define FPU_CSR_INE_X  0x01000000
+
+#define FPU_CSR_ALL_S  0x001f0000
+#define FPU_CSR_INV_S  0x00100000
+#define FPU_CSR_DIV_S  0x00080000
+#define FPU_CSR_OVF_S  0x00040000
+#define FPU_CSR_UDF_S  0x00020000
+#define FPU_CSR_INE_S  0x00010000
+
+#define FPU_CSR_ALL_E  0x0000001f
+#define FPU_CSR_INV_E  0x00000010
+#define FPU_CSR_DIV_E  0x00000008
+#define FPU_CSR_OVF_E  0x00000004
+#define FPU_CSR_UDF_E  0x00000002
+#define FPU_CSR_INE_E  0x00000001
+
+/* Bits 8 and 9 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM     0x300
+#define FPU_CSR_RN     0x000   /* nearest */
+#define FPU_CSR_RZ     0x100   /* towards zero */
+#define FPU_CSR_RU     0x200   /* towards +Infinity */
+#define FPU_CSR_RD     0x300   /* towards -Infinity */
+
+/* Bit 6 of FPU Status Register specify the LBT TOP simulation mode */
+#define FPU_CSR_TM_SHIFT       0x6
+#define FPU_CSR_TM             (_ULCAST_(1) << FPU_CSR_TM_SHIFT)
+
+#define read_fcsr(source)      \
+({     \
+       unsigned int __res;     \
+\
+       __asm__ __volatile__(   \
+       "  movfcsr2gr      %0, "__stringify(source)" \n"    \
+       : "=r" (__res));      \
+       __res;  \
+})
+
+#define write_fcsr(dest, val) \
+do {   \
+       __asm__ __volatile__(   \
+       "  movgr2fcsr      "__stringify(dest)", %0       \n"        \
+       : : "r" (val));       \
+} while (0)
+
+#endif /* _ASM_LOONGARCH_H */
diff --git a/arch/loongarch/include/asm/posix_types.h 
b/arch/loongarch/include/asm/posix_types.h
new file mode 100644
index 000000000000..0ed5fa4c5fae
--- /dev/null
+++ b/arch/loongarch/include/asm/posix_types.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/include/asm-arm/posix_types.h
+ *
+ * Copyright (C) 1996-1998 Russell King.
+ *
+ * Copyright (C) 2011 Andes Technology Corporation
+ * Copyright (C) 2010 Shawn Lin (nobuh...@andestech.com)
+ * Copyright (C) 2011 Macpaul Lin (macp...@andestech.com)
+ * Copyright (C) 2017 Rick Chen (r...@andestech.com)
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+#ifndef __ARCH_LOONGARCH_POSIX_TYPES_H
+#define __ARCH_LOONGARCH_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.  Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned short         __kernel_dev_t;
+typedef unsigned long          __kernel_ino_t;
+typedef unsigned short         __kernel_mode_t;
+typedef unsigned short         __kernel_nlink_t;
+typedef long                   __kernel_off_t;
+typedef int                    __kernel_pid_t;
+typedef unsigned short         __kernel_ipc_pid_t;
+typedef unsigned short         __kernel_uid_t;
+typedef unsigned short         __kernel_gid_t;
+#ifdef __GNUC__
+typedef __SIZE_TYPE__          __kernel_size_t;
+#else
+typedef unsigned long          __kernel_size_t;
+#endif
+typedef long                   __kernel_ssize_t;
+typedef long                   __kernel_ptrdiff_t;
+typedef long                   __kernel_time_t;
+typedef long                   __kernel_suseconds_t;
+typedef long                   __kernel_clock_t;
+typedef int                    __kernel_daddr_t;
+typedef char                   *__kernel_caddr_t;
+typedef unsigned short         __kernel_uid16_t;
+typedef unsigned short         __kernel_gid16_t;
+typedef unsigned int           __kernel_uid32_t;
+typedef unsigned int           __kernel_gid32_t;
+
+typedef unsigned short         __kernel_old_uid_t;
+typedef unsigned short         __kernel_old_gid_t;
+
+#ifdef __GNUC__
+typedef long long              __kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+       int     val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+       int     __val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+} __kernel_fsid_t;
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#undef __FD_SET
+#define __FD_SET(_fd, fdsetp) \
+       typeof(_fd) (fd) = (_fd); \
+       (((fd_set *)fdsetp)->fds_bits[fd >> 5] |= (1 << (fd & 31)))
+
+#undef __FD_CLR
+#define __FD_CLR(_fd, fdsetp) \
+       typeof(_fd) (fd) = (_fd); \
+       (((fd_set *)fdsetp)->fds_bits[fd >> 5] &= ~(1 << (fd & 31)))
+
+#undef __FD_ISSET
+#define __FD_ISSET(_fd, fdsetp) \
+       typeof(_fd) (fd) = (_fd); \
+       ((((fd_set *)fdsetp)->fds_bits[fd >> 5] & (1 << (fd & 31))) != 0)
+
+#undef __FD_ZERO
+#define __FD_ZERO(_fdsetp) \
+       typeof(_fdsetp) (fd) = (_fdsetp); \
+       (memset(fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
+
+#endif
+
+#endif /* __ARCH_LOONGARCH_POSIX_TYPES_H */
diff --git a/arch/loongarch/include/asm/processor.h 
b/arch/loongarch/include/asm/processor.h
new file mode 100644
index 000000000000..7242a3e6ca7e
--- /dev/null
+++ b/arch/loongarch/include/asm/processor.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LOONGARCH_PROCESSOR_H
+#define __ASM_LOONGARCH_PROCESSOR_H
+
+/* Dummy header */
+
+#endif /* __ASM_LOONGARCH_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/ptrace.h 
b/arch/loongarch/include/asm/ptrace.h
new file mode 100644
index 000000000000..1b3c2b5b8f16
--- /dev/null
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LOONGARCH_PTRACE_H
+#define __ASM_LOONGARCH_PTRACE_H
+
+struct pt_regs {
+       /* Main processor registers. */
+       unsigned long regs[32];
+
+       /* Original syscall arg0. */
+       unsigned long orig_a0;
+
+       /* Special CSR registers. */
+       unsigned long csr_era;
+       unsigned long csr_badvaddr;
+       unsigned long csr_crmd;
+       unsigned long csr_prmd;
+       unsigned long csr_euen;
+       unsigned long csr_ecfg;
+       unsigned long csr_estat;
+       unsigned long __last[];
+} __aligned(8);
+
+#ifdef CONFIG_64BIT
+#define REG_FMT "%016lx"
+#else
+#define REG_FMT "%08lx"
+#endif
+
+#endif /* __ASM_LOONGARCH_PTRACE_H */
diff --git a/arch/loongarch/include/asm/regdef.h 
b/arch/loongarch/include/asm/regdef.h
new file mode 100644
index 000000000000..5fbc0d4757e3
--- /dev/null
+++ b/arch/loongarch/include/asm/regdef.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+#ifndef _ASM_REGDEF_H
+#define _ASM_REGDEF_H
+
+#define zero   $r0     /* wired zero */
+#define ra     $r1     /* return address */
+#define tp     $r2
+#define sp     $r3     /* stack pointer */
+#define a0     $r4     /* argument registers, a0/a1 reused as v0/v1 for return 
value */
+#define a1     $r5
+#define a2     $r6
+#define a3     $r7
+#define a4     $r8
+#define a5     $r9
+#define a6     $r10
+#define a7     $r11
+#define t0     $r12    /* caller saved */
+#define t1     $r13
+#define t2     $r14
+#define t3     $r15
+#define t4     $r16
+#define t5     $r17
+#define t6     $r18
+#define t7     $r19
+#define t8     $r20
+#define u0     $r21
+#define fp     $r22    /* frame pointer */
+#define s0     $r23    /* callee saved */
+#define s1     $r24
+#define s2     $r25
+#define s3     $r26
+#define s4     $r27
+#define s5     $r28
+#define s6     $r29
+#define s7     $r30
+#define s8     $r31
+
+#endif /* _ASM_REGDEF_H */
diff --git a/arch/loongarch/include/asm/sections.h 
b/arch/loongarch/include/asm/sections.h
new file mode 100644
index 000000000000..9f5009a1f235
--- /dev/null
+++ b/arch/loongarch/include/asm/sections.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __ASM_LOONGARCH_SECTIONS_H
+#define __ASM_LOONGARCH_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+#endif
diff --git a/arch/loongarch/include/asm/setjmp.h 
b/arch/loongarch/include/asm/setjmp.h
new file mode 100644
index 000000000000..295198a38964
--- /dev/null
+++ b/arch/loongarch/include/asm/setjmp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef _SETJMP_H_
+#define _SETJMP_H_
+
+/*
+ * This really should be opaque, but the EFI implementation wrongly
+ * assumes that a 'struct jmp_buf_data' is defined.
+ */
+struct jmp_buf_data {
+       unsigned long s_regs[9];        /* s0 - 8 */
+       unsigned long fp;
+       unsigned long sp;
+       unsigned long ra;
+};
+
+typedef struct jmp_buf_data jmp_buf[1];
+
+int setjmp(jmp_buf jmp);
+void longjmp(jmp_buf jmp, int ret);
+
+#endif /* _SETJMP_H_ */
diff --git a/arch/loongarch/include/asm/spl.h b/arch/loongarch/include/asm/spl.h
new file mode 100644
index 000000000000..b8b19f65cb36
--- /dev/null
+++ b/arch/loongarch/include/asm/spl.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_SPL_H
+#define __ASM_SPL_H
+
+/* Dummy header */
+
+#endif
diff --git a/arch/loongarch/include/asm/string.h 
b/arch/loongarch/include/asm/string.h
new file mode 100644
index 000000000000..09877956de9a
--- /dev/null
+++ b/arch/loongarch/include/asm/string.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_STRING_H
+#define __ASM_STRING_H
+
+/* Dummy header */
+
+#endif
diff --git a/arch/loongarch/include/asm/system.h 
b/arch/loongarch/include/asm/system.h
new file mode 100644
index 000000000000..79c47418d52c
--- /dev/null
+++ b/arch/loongarch/include/asm/system.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LOONGARCH_SYSTEM_H
+#define __ASM_LOONGARCH_SYSTEM_H
+
+#include <asm/loongarch.h>
+
+struct event;
+
+/*
+ * Interrupt configuration macros
+ */
+
+static inline void arch_local_irq_enable(void)
+{
+       u32 flags = CSR_CRMD_IE;
+
+       __asm__ __volatile__(
+               "csrxchg %[val], %[mask], %[reg]\n\t"
+               : [val] "+r" (flags)
+               : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+}
+
+#define local_irq_enable arch_local_irq_enable
+
+static inline void arch_local_irq_disable(void)
+{
+       u32 flags = 0;
+
+       __asm__ __volatile__(
+               "csrxchg %[val], %[mask], %[reg]\n\t"
+               : [val] "+r" (flags)
+               : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+}
+
+#define local_irq_disable arch_local_irq_disable
+
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags = 0;
+
+       __asm__ __volatile__(
+               "csrxchg %[val], %[mask], %[reg]\n\t"
+               : [val] "+r" (flags)
+               : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+       return flags;
+}
+
+#define local_irq_save(__flags)                                 \
+       do {                                                        \
+               __flags = arch_local_irq_save(CSR_SSTATUS, SR_SIE) & SR_SIE; \
+       } while (0)
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       __asm__ __volatile__(
+               "csrxchg %[val], %[mask], %[reg]\n\t"
+               : [val] "+r" (flags)
+               : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+}
+
+#define local_irq_restore(__flags)              \
+       do {                                        \
+               arch_local_irq_restore(__flags); \
+       } while (0)
+
+#endif
diff --git a/arch/loongarch/include/asm/types.h 
b/arch/loongarch/include/asm/types.h
new file mode 100644
index 000000000000..414e8f9160a0
--- /dev/null
+++ b/arch/loongarch/include/asm/types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2011 Andes Technology Corporation
+ * Copyright (C) 2010 Shawn Lin (nobuh...@andestech.com)
+ * Copyright (C) 2011 Macpaul Lin (macp...@andestech.com)
+ * Copyright (C) 2017 Rick Chen (r...@andestech.com)
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LOONGARCH_TYPES_H
+#define __ASM_LOONGARCH_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+typedef unsigned short umode_t;
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG _LOONGARCH_SZLONG
+
+#include <stddef.h>
+
+#ifdef CONFIG_DMA_ADDR_T_64BIT
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif
+
+typedef unsigned long long phys_addr_t;
+typedef unsigned long long phys_size_t;
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/loongarch/include/asm/u-boot-loongarch.h 
b/arch/loongarch/include/asm/u-boot-loongarch.h
new file mode 100644
index 000000000000..7f21d2a3e963
--- /dev/null
+++ b/arch/loongarch/include/asm/u-boot-loongarch.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2002
+ * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
+ * Marius Groeger <mgroe...@sysgo.de>
+ *
+ * Copyright (C) 2017 Andes Technology Corporation
+ * Rick Chen, Andes Technology Corporation <r...@andestech.com>
+ *
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef _U_BOOT_LOONGARCH_H_
+#define _U_BOOT_LOONGARCH_H_
+
+/* cpu/.../cpu.c */
+int cleanup_before_linux(void);
+
+/* board/.../... */
+int board_init(void);
+void board_quiesce_devices(void);
+
+#endif
diff --git a/arch/loongarch/include/asm/u-boot.h 
b/arch/loongarch/include/asm/u-boot.h
new file mode 100644
index 000000000000..123a7f09a01c
--- /dev/null
+++ b/arch/loongarch/include/asm/u-boot.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2002
+ * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
+ * Marius Groeger <mgroe...@sysgo.de>
+ *
+ * Copyright (C) 2017 Andes Technology Corporation
+ * Rick Chen, Andes Technology Corporation <r...@andestech.com>
+ *
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ *
+ ********************************************************************
+ * NOTE: This header file defines an interface to U-Boot. Including
+ * this (unmodified) header file in another file is considered normal
+ * use of U-Boot, and does *not* fall under the heading of "derived
+ * work".
+ ********************************************************************
+ */
+
+#ifndef _U_BOOT_H_
+#define _U_BOOT_H_     1
+
+/* Use the generic board which requires a unified bd_info */
+#include <asm-generic/u-boot.h>
+#include <asm/u-boot-loongarch.h>
+
+/* For image.h:image_check_target_arch() */
+#define IH_ARCH_DEFAULT IH_ARCH_LOONGARCH
+
+#endif /* _U_BOOT_H_ */
diff --git a/arch/loongarch/include/asm/unaligned.h 
b/arch/loongarch/include/asm/unaligned.h
new file mode 100644
index 000000000000..65cd4340ce93
--- /dev/null
+++ b/arch/loongarch/include/asm/unaligned.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2024 Jiaxun Yang <jiaxun.y...@flygoat.com>
+ */
+
+#ifndef __ASM_LOONGARCH_UNALIGNED_H
+#define __ASM_LOONGARCH_UNALIGNED_H
+
+#include <asm-generic/unaligned.h>
+
+#endif
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
new file mode 100644
index 000000000000..3dbed94cc624
--- /dev/null
+++ b/arch/loongarch/lib/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2024 Jiaxun yang <jiaxun.y...@flygoat.com>
+#
+



Reply via email to