Merge "fix(xilinx): fix coding style violations" into integration
diff --git a/Makefile b/Makefile
index cac3e12..fb50f0c 100644
--- a/Makefile
+++ b/Makefile
@@ -945,6 +945,9 @@
 # Variables for use with documentation build using Sphinx tool
 DOCS_PATH		?=	docs
 
+# Defination of SIMICS flag
+SIMICS_BUILD	?=	0
+
 ################################################################################
 # Include BL specific makefiles
 ################################################################################
@@ -1055,6 +1058,7 @@
         ENABLE_FEAT_FGT \
         ENABLE_FEAT_AMUv1 \
         ENABLE_FEAT_ECV \
+        SIMICS_BUILD \
 )))
 
 $(eval $(call assert_numerics,\
@@ -1172,6 +1176,7 @@
         ENABLE_FEAT_FGT \
         ENABLE_FEAT_AMUv1 \
         ENABLE_FEAT_ECV \
+        SIMICS_BUILD \
 )))
 
 ifeq (${SANITIZE_UB},trap)
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 9401811..b7d1168 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -29,6 +29,10 @@
    platform contains at least 1 CPU that requires dynamic mitigation.
    Defaults to 0.
 
+-  ``WORKAROUND_CVE_2022_23960``: Enables mitigation for `CVE-2022-23960`_.
+   This build option should be set to 1 if the target platform contains at
+   least 1 CPU that requires this mitigation. Defaults to 1.
+
 .. _arm_cpu_macros_errata_workarounds:
 
 CPU Errata Workarounds
@@ -585,6 +589,7 @@
 
 .. _CVE-2017-5715: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715
 .. _CVE-2018-3639: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639
+.. _CVE-2022-23960: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23960
 .. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/index.html
 .. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/index.html
 .. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index 009eb90..10b0a0b 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -224,6 +224,7 @@
 DEFINE_SYSOP_PARAM_FUNC(xpaci)
 
 void flush_dcache_range(uintptr_t addr, size_t size);
+void flush_dcache_to_popa_range(uintptr_t addr, size_t size);
 void clean_dcache_range(uintptr_t addr, size_t size);
 void inv_dcache_range(uintptr_t addr, size_t size);
 bool is_dcache_enabled(void);
@@ -274,8 +275,10 @@
 DEFINE_SYSOP_TYPE_FUNC(dmb, st)
 DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
 DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, osh)
 DEFINE_SYSOP_TYPE_FUNC(dsb, nsh)
 DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dsb, oshst)
 DEFINE_SYSOP_TYPE_FUNC(dmb, oshld)
 DEFINE_SYSOP_TYPE_FUNC(dmb, oshst)
 DEFINE_SYSOP_TYPE_FUNC(dmb, osh)
@@ -610,14 +613,13 @@
 }
 
 /*
- * Invalidate cached copies of GPT entries
- * from TLBs by physical address
+ * Invalidate TLBs of GPT entries by Physical address, last level.
  *
  * @pa: the starting address for the range
  *      of invalidation
  * @size: size of the range of invalidation
  */
-void gpt_tlbi_by_pa(uint64_t pa, size_t size);
+void gpt_tlbi_by_pa_ll(uint64_t pa, size_t size);
 
 
 /* Previously defined accessor functions with incomplete register names  */
diff --git a/include/lib/cpus/aarch64/cortex_a710.h b/include/lib/cpus/aarch64/cortex_a710.h
index ec62421..09614ee 100644
--- a/include/lib/cpus/aarch64/cortex_a710.h
+++ b/include/lib/cpus/aarch64/cortex_a710.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_A710_MIDR					U(0x410FD470)
 
+/* Cortex-A710 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A710_BHB_LOOP_COUNT				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a76.h b/include/lib/cpus/aarch64/cortex_a76.h
index a61825f..74fb6e9 100644
--- a/include/lib/cpus/aarch64/cortex_a76.h
+++ b/include/lib/cpus/aarch64/cortex_a76.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,38 +10,41 @@
 #include <lib/utils_def.h>
 
 /* Cortex-A76 MIDR for revision 0 */
-#define CORTEX_A76_MIDR		U(0x410fd0b0)
+#define CORTEX_A76_MIDR						U(0x410fd0b0)
+
+/* Cortex-A76 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A76_BHB_LOOP_COUNT				U(24)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define CORTEX_A76_CPUPWRCTLR_EL1	S3_0_C15_C2_7
-#define CORTEX_A76_CPUECTLR_EL1		S3_0_C15_C1_4
+#define CORTEX_A76_CPUPWRCTLR_EL1				S3_0_C15_C2_7
+#define CORTEX_A76_CPUECTLR_EL1					S3_0_C15_C1_4
 
-#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2	(ULL(3) << 24)
-#define CORTEX_A76_CPUECTLR_EL1_BIT_51		(ULL(1) << 51)
+#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2			(ULL(3) << 24)
+#define CORTEX_A76_CPUECTLR_EL1_BIT_51				(ULL(1) << 51)
 
 /*******************************************************************************
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
-#define CORTEX_A76_CPUACTLR_EL1		S3_0_C15_C1_0
+#define CORTEX_A76_CPUACTLR_EL1					S3_0_C15_C1_0
 
 #define CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION	(ULL(1) << 6)
 
-#define CORTEX_A76_CPUACTLR_EL1_BIT_13	(ULL(1) << 13)
+#define CORTEX_A76_CPUACTLR_EL1_BIT_13				(ULL(1) << 13)
 
-#define CORTEX_A76_CPUACTLR2_EL1	S3_0_C15_C1_1
+#define CORTEX_A76_CPUACTLR2_EL1				S3_0_C15_C1_1
 
-#define CORTEX_A76_CPUACTLR2_EL1_BIT_2	(ULL(1) << 2)
+#define CORTEX_A76_CPUACTLR2_EL1_BIT_2				(ULL(1) << 2)
 
 #define CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE	(ULL(1) << 16)
 
-#define CORTEX_A76_CPUACTLR3_EL1	S3_0_C15_C1_2
+#define CORTEX_A76_CPUACTLR3_EL1				S3_0_C15_C1_2
 
-#define CORTEX_A76_CPUACTLR3_EL1_BIT_10	(ULL(1) << 10)
+#define CORTEX_A76_CPUACTLR3_EL1_BIT_10				(ULL(1) << 10)
 
 
 /* Definitions of register field mask in CORTEX_A76_CPUPWRCTLR_EL1 */
-#define CORTEX_A76_CORE_PWRDN_EN_MASK	U(0x1)
+#define CORTEX_A76_CORE_PWRDN_EN_MASK				U(0x1)
 
 #endif /* CORTEX_A76_H */
diff --git a/include/lib/cpus/aarch64/cortex_a77.h b/include/lib/cpus/aarch64/cortex_a77.h
index 5753e90..4a87168 100644
--- a/include/lib/cpus/aarch64/cortex_a77.h
+++ b/include/lib/cpus/aarch64/cortex_a77.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,6 +12,9 @@
 /* Cortex-A77 MIDR */
 #define CORTEX_A77_MIDR					U(0x410FD0D0)
 
+/* Cortex-A77 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A77_BHB_LOOP_COUNT			U(24)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a78.h b/include/lib/cpus/aarch64/cortex_a78.h
index 42b0833..f3cb39f 100644
--- a/include/lib/cpus/aarch64/cortex_a78.h
+++ b/include/lib/cpus/aarch64/cortex_a78.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,9 @@
 
 #define CORTEX_A78_MIDR					U(0x410FD410)
 
+/* Cortex-A78 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A78_BHB_LOOP_COUNT			U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_x2.h b/include/lib/cpus/aarch64/cortex_x2.h
index e3d0fa9..62530e2 100644
--- a/include/lib/cpus/aarch64/cortex_x2.h
+++ b/include/lib/cpus/aarch64/cortex_x2.h
@@ -9,6 +9,9 @@
 
 #define CORTEX_X2_MIDR						U(0x410FD480)
 
+/* Cortex-X2 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X2_BHB_LOOP_COUNT       				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_n1.h b/include/lib/cpus/aarch64/neoverse_n1.h
index b50befa..b6b8d8d 100644
--- a/include/lib/cpus/aarch64/neoverse_n1.h
+++ b/include/lib/cpus/aarch64/neoverse_n1.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,58 +10,61 @@
 #include <lib/utils_def.h>
 
 /* Neoverse N1 MIDR for revision 0 */
-#define NEOVERSE_N1_MIDR		U(0x410fd0c0)
+#define NEOVERSE_N1_MIDR				U(0x410fd0c0)
+
+/* Neoverse N1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N1_BHB_LOOP_COUNT			U(24)
 
 /* Exception Syndrome register EC code for IC Trap */
-#define NEOVERSE_N1_EC_IC_TRAP		U(0x1f)
+#define NEOVERSE_N1_EC_IC_TRAP				U(0x1f)
 
 /*******************************************************************************
  * CPU Power Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUPWRCTLR_EL1	S3_0_C15_C2_7
+#define NEOVERSE_N1_CPUPWRCTLR_EL1			S3_0_C15_C2_7
 
 /* Definitions of register field mask in NEOVERSE_N1_CPUPWRCTLR_EL1 */
-#define NEOVERSE_N1_CORE_PWRDN_EN_MASK	U(0x1)
+#define NEOVERSE_N1_CORE_PWRDN_EN_MASK			U(0x1)
 
-#define NEOVERSE_N1_ACTLR_AMEN_BIT	(U(1) << 4)
+#define NEOVERSE_N1_ACTLR_AMEN_BIT			(U(1) << 4)
 
-#define NEOVERSE_N1_AMU_NR_COUNTERS	U(5)
-#define NEOVERSE_N1_AMU_GROUP0_MASK	U(0x1f)
+#define NEOVERSE_N1_AMU_NR_COUNTERS			U(5)
+#define NEOVERSE_N1_AMU_GROUP0_MASK			U(0x1f)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUECTLR_EL1	S3_0_C15_C1_4
+#define NEOVERSE_N1_CPUECTLR_EL1			S3_0_C15_C1_4
 
-#define NEOVERSE_N1_WS_THR_L2_MASK	(ULL(3) << 24)
+#define NEOVERSE_N1_WS_THR_L2_MASK			(ULL(3) << 24)
 #define NEOVERSE_N1_CPUECTLR_EL1_MM_TLBPF_DIS_BIT	(ULL(1) << 51)
 #define NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT		(ULL(1) << 0)
 
 /*******************************************************************************
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
-#define NEOVERSE_N1_CPUACTLR_EL1	S3_0_C15_C1_0
+#define NEOVERSE_N1_CPUACTLR_EL1			S3_0_C15_C1_0
 
-#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6	(ULL(1) << 6)
-#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13	(ULL(1) << 13)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6			(ULL(1) << 6)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13			(ULL(1) << 13)
 
-#define NEOVERSE_N1_CPUACTLR2_EL1	S3_0_C15_C1_1
+#define NEOVERSE_N1_CPUACTLR2_EL1			S3_0_C15_C1_1
 
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0		(ULL(1) << 0)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2		(ULL(1) << 2)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11	(ULL(1) << 11)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15	(ULL(1) << 15)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16	(ULL(1) << 16)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59	(ULL(1) << 59)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0			(ULL(1) << 0)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2			(ULL(1) << 2)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11		(ULL(1) << 11)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15		(ULL(1) << 15)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16		(ULL(1) << 16)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59		(ULL(1) << 59)
 
-#define NEOVERSE_N1_CPUACTLR3_EL1	S3_0_C15_C1_2
+#define NEOVERSE_N1_CPUACTLR3_EL1			S3_0_C15_C1_2
 
-#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10	(ULL(1) << 10)
+#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10		(ULL(1) << 10)
 
 /* Instruction patching registers */
-#define CPUPSELR_EL3	S3_6_C15_C8_0
-#define CPUPCR_EL3	S3_6_C15_C8_1
-#define CPUPOR_EL3	S3_6_C15_C8_2
-#define CPUPMR_EL3	S3_6_C15_C8_3
+#define CPUPSELR_EL3					S3_6_C15_C8_0
+#define CPUPCR_EL3					S3_6_C15_C8_1
+#define CPUPOR_EL3					S3_6_C15_C8_2
+#define CPUPMR_EL3					S3_6_C15_C8_3
 
 #endif /* NEOVERSE_N1_H */
diff --git a/include/lib/cpus/aarch64/neoverse_n2.h b/include/lib/cpus/aarch64/neoverse_n2.h
index a1e676e..0452b39 100644
--- a/include/lib/cpus/aarch64/neoverse_n2.h
+++ b/include/lib/cpus/aarch64/neoverse_n2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,9 @@
 /* Neoverse N2 ID register for revision r0p0 */
 #define NEOVERSE_N2_MIDR				U(0x410FD490)
 
+/* Neoverse N2 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N2_BHB_LOOP_COUNT			U(32)
+
 /*******************************************************************************
  * CPU Power control register
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_v1.h b/include/lib/cpus/aarch64/neoverse_v1.h
index e43c907..a904c04 100644
--- a/include/lib/cpus/aarch64/neoverse_v1.h
+++ b/include/lib/cpus/aarch64/neoverse_v1.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define NEOVERSE_V1_MIDR					U(0x410FD400)
 
+/* Neoverse V1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_V1_BHB_LOOP_COUNT				U(32)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/gpt_rme/gpt_rme.h b/include/lib/gpt_rme/gpt_rme.h
index 379b915..94a88b0 100644
--- a/include/lib/gpt_rme/gpt_rme.h
+++ b/include/lib/gpt_rme/gpt_rme.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -35,6 +35,13 @@
 #define GPT_GPI_ANY			U(0xF)
 #define GPT_GPI_VAL_MASK		UL(0xF)
 
+#define GPT_NSE_SECURE			U(0b00)
+#define GPT_NSE_ROOT			U(0b01)
+#define GPT_NSE_NS			U(0b10)
+#define GPT_NSE_REALM			U(0b11)
+
+#define GPT_NSE_SHIFT                   U(62)
+
 /* PAS attribute GPI definitions. */
 #define GPT_PAS_ATTR_GPI_SHIFT		U(0)
 #define GPT_PAS_ATTR_GPI_MASK		U(0xF)
@@ -262,15 +269,12 @@
  *   base: Base address of the region to transition, must be aligned to granule
  *         size.
  *   size: Size of region to transition, must be aligned to granule size.
- *   src_sec_state: Security state of the caller.
- *   target_pas: Target PAS of the specified memory region.
+ *   src_sec_state: Security state of the originating SMC invoking the API.
  *
  * Return
  *    Negative Linux error code in the event of a failure, 0 for success.
  */
-int gpt_transition_pas(uint64_t base,
-		       size_t size,
-		       unsigned int src_sec_state,
-		       unsigned int target_pas);
+int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state);
+int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state);
 
 #endif /* GPT_RME_H */
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index d1f3847..6faf545 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,7 @@
 #include <asm_macros.S>
 
 	.globl	flush_dcache_range
+	.globl	flush_dcache_to_popa_range
 	.globl	clean_dcache_range
 	.globl	inv_dcache_range
 	.globl	dcsw_op_louis
@@ -63,6 +64,35 @@
 endfunc inv_dcache_range
 
 
+	/*
+	 * On implementations with FEAT_MTE2,
+	 * Root firmware must issue DC_CIGDPAPA instead of DC_CIPAPA ,
+	 * in order to additionally clean and invalidate Allocation Tags
+	 * associated with the affected locations.
+	 *
+	 * ------------------------------------------
+	 * Clean+Invalidate by PA to POPA
+	 * from base address till size.
+	 * 'x0' = addr, 'x1' = size
+	 * ------------------------------------------
+	 */
+func flush_dcache_to_popa_range
+	/* Exit early if size is zero */
+	cbz	x1, exit_loop_dc_cipapa
+	dcache_line_size x2, x3
+	sub	x3, x2, #1
+	bic	x0, x0, x3
+	add	x1, x1, x0
+loop_dc_cipapa:
+	sys	#6, c7, c14, #1, x0 /* DC CIPAPA,<Xt> */
+	add	x0, x0, x2
+	cmp	x0, x1
+	b.lo	loop_dc_cipapa
+	dsb	osh
+exit_loop_dc_cipapa:
+	ret
+endfunc	flush_dcache_to_popa_range
+
 	/* ---------------------------------------------------------------
 	 * Data cache operations by set/way to the level specified
 	 *
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index 01531ca..e8110b0 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,7 +15,7 @@
 	.globl	zero_normalmem
 	.globl	zeromem
 	.globl	memcpy16
-	.globl	gpt_tlbi_by_pa
+	.globl	gpt_tlbi_by_pa_ll
 
 	.globl	disable_mmu_el1
 	.globl	disable_mmu_el3
@@ -599,7 +599,7 @@
  * TODO: Currently only supports size of 4KB,
  * support other sizes as well.
  */
-func gpt_tlbi_by_pa
+func gpt_tlbi_by_pa_ll
 #if ENABLE_ASSERTIONS
 	cmp	x1, #PAGE_SIZE_4KB
 	ASM_ASSERT(eq)
@@ -607,7 +607,7 @@
 	ASM_ASSERT(eq)
 #endif
 	lsr	x0, x0, #FOUR_KB_SHIFT	/* 4KB size encoding is zero */
-	sys	#6, c8, c4, #3, x0 	/* TLBI RPAOS, <Xt> */
+	sys	#6, c8, c4, #7, x0 	/* TLBI RPALOS, <Xt> */
 	dsb	sy
 	ret
-endfunc gpt_tlbi_by_pa
+endfunc gpt_tlbi_by_pa_ll
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index 4d5d949..aea62ae 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_a710.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex A710 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Cortex-A710 Erratum 1987031.
  * This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710. It is still
@@ -305,6 +310,15 @@
 	b       cpu_rev_var_ls
 endfunc check_errata_2282622
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -344,6 +358,7 @@
 	report_errata ERRATA_A710_2267065, cortex_a710, 2267065
 	report_errata ERRATA_A710_2136059, cortex_a710, 2136059
 	report_errata ERRATA_A710_2282622, cortex_a710, 2282622
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a710, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -404,6 +419,15 @@
 	bl	errata_a710_2282622_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A710 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+         */
+	adr	x0, wa_cve_vbar_cortex_a710
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc cortex_a710_reset_func
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 4f7f4bb..114d0f5 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -7,11 +7,11 @@
 #include <arch.h>
 #include <asm_macros.S>
 #include <common/bl_common.h>
-#include <context.h>
 #include <cortex_a76.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
 #include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -35,59 +35,17 @@
 	 *
 	 * The macro saves x2-x3 to the context. In the fast path
 	 * x0-x3 registers do not need to be restored as the calling
-	 * context will have saved them.
+	 * context will have saved them. The macro also saves
+	 * x29-x30 to the context in the sync_exception path.
 	 */
 	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
 	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-
 	.if \_is_sync_exception
-		/*
-		 * Ensure SMC is coming from A64/A32 state on #0
-		 * with W0 = SMCCC_ARCH_WORKAROUND_2
-		 *
-		 * This sequence evaluates as:
-		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
-		 * allowing use of a single branch operation
-		 */
-		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
-		cmp	x0, x2
-		mrs	x3, esr_el3
-		mov_imm	w2, \_esr_el3_val
-		ccmp	w2, w3, #0, eq
-		/*
-		 * Static predictor will predict a fall-through, optimizing
-		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
-		 */
-		bne	1f
-
-		/*
-		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
-		 * fast path.
-		 */
-		cmp	x1, xzr /* enable/disable check */
-
-		/*
-		 * When the calling context wants mitigation disabled,
-		 * we program the mitigation disable function in the
-		 * CPU context, which gets invoked on subsequent exits from
-		 * EL3 via the `el3_exit` function. Otherwise NULL is
-		 * programmed in the CPU context, which results in caller's
-		 * inheriting the EL3 mitigation state (enabled) on subsequent
-		 * `el3_exit`.
-		 */
-		mov	x0, xzr
-		adr	x1, cortex_a76_disable_wa_cve_2018_3639
-		csel	x1, x1, x0, eq
-		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
-
-		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
-		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
-		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
-		csel	x3, x3, x1, eq
-		msr	CORTEX_A76_CPUACTLR2_EL1, x3
-		exception_return /* exception_return contains ISB */
+	stp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+	mov_imm	w2, \_esr_el3_val
+	bl	apply_cve_2018_3639_sync_wa
+	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
 	.endif
-1:
 	/*
 	 * Always enable v4 mitigation during EL3 execution. This is not
 	 * required for the fast path above because it does not perform any
@@ -105,8 +63,10 @@
 	 */
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	.endm
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 
-vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
+#if DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960
+vector_base cortex_a76_wa_cve_vbar
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_EL0 : 0x0 - 0x200
@@ -153,22 +113,54 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry cortex_a76_sync_exception_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	sync_exception_aarch64
 end_vector_entry cortex_a76_sync_exception_aarch64
 
 vector_entry cortex_a76_irq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	irq_aarch64
 end_vector_entry cortex_a76_irq_aarch64
 
 vector_entry cortex_a76_fiq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	fiq_aarch64
 end_vector_entry cortex_a76_fiq_aarch64
 
 vector_entry cortex_a76_serror_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	serror_aarch64
 end_vector_entry cortex_a76_serror_aarch64
 
@@ -177,24 +169,130 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry cortex_a76_sync_exception_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	sync_exception_aarch32
 end_vector_entry cortex_a76_sync_exception_aarch32
 
 vector_entry cortex_a76_irq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	irq_aarch32
 end_vector_entry cortex_a76_irq_aarch32
 
 vector_entry cortex_a76_fiq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	fiq_aarch32
 end_vector_entry cortex_a76_fiq_aarch32
 
 vector_entry cortex_a76_serror_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
 	b	serror_aarch32
 end_vector_entry cortex_a76_serror_aarch32
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+	/*
+	 * -----------------------------------------------------------------
+	 * This function applies the mitigation for CVE-2018-3639
+	 * specifically for sync exceptions. It implements a fast path
+	 * where `SMCCC_ARCH_WORKAROUND_2` SMC calls from a lower EL
+	 * running in AArch64 will go through the fast and return early.
+	 *
+	 * In the fast path x0-x3 registers do not need to be restored as the
+	 * calling context will have saved them.
+	 *
+	 * Caller must pass value of esr_el3 to compare via x2.
+	 * Save and restore these registers outside of this function from the
+	 * context before jumping to the main runtime vector table entry.
+	 *
+	 * Shall clobber: x0-x3, x30
+	 * -----------------------------------------------------------------
+	 */
+func apply_cve_2018_3639_sync_wa
+	/*
+	 * Ensure SMC is coming from A64/A32 state on #0
+	 * with W0 = SMCCC_ARCH_WORKAROUND_2
+	 *
+	 * This sequence evaluates as:
+	 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
+	 * allowing use of a single branch operation
+	 * X2 populated outside this function with the SMC FID.
+	 */
+	orr	w3, wzr, #SMCCC_ARCH_WORKAROUND_2
+	cmp	x0, x3
+	mrs	x3, esr_el3
+
+	ccmp	w2, w3, #0, eq
+	/*
+	 * Static predictor will predict a fall-through, optimizing
+	 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
+	 */
+	bne	1f
+
+	/*
+	* The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
+	* fast path.
+	*/
+	cmp	x1, xzr /* enable/disable check */
+
+	/*
+	 * When the calling context wants mitigation disabled,
+	 * we program the mitigation disable function in the
+	 * CPU context, which gets invoked on subsequent exits from
+	 * EL3 via the `el3_exit` function. Otherwise NULL is
+	 * programmed in the CPU context, which results in caller's
+	 * inheriting the EL3 mitigation state (enabled) on subsequent
+	 * `el3_exit`.
+	 */
+	mov	x0, xzr
+	adr	x1, cortex_a76_disable_wa_cve_2018_3639
+	csel	x1, x1, x0, eq
+	str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+
+	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
+	orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	csel	x3, x3, x1, eq
+	msr	CORTEX_A76_CPUACTLR2_EL1, x3
+	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+	/*
+	* `SMCCC_ARCH_WORKAROUND_2`fast path return to lower EL.
+	*/
+	exception_return /* exception_return contains ISB */
+1:
+	ret
+endfunc apply_cve_2018_3639_sync_wa
 #endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 
 	/* --------------------------------------------------
@@ -519,6 +617,15 @@
 #endif
 endfunc check_errata_1165522
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif /* WORKAROUND_CVE_2022_23960 */
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A76.
 	 * Shall clobber: x0-x19
@@ -590,16 +697,31 @@
 	 * The Cortex-A76 generic vectors are overwritten to use the vectors
 	 * defined above. This is required in order to apply mitigation
 	 * against CVE-2018-3639 on exception entry from lower ELs.
+	 * If the below vector table is used, skip overriding it again for
+	 *  CVE_2022_23960 as both use the same vbar.
 	 */
-	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
+	adr	x0, cortex_a76_wa_cve_vbar
 	msr	vbar_el3, x0
 	isb
+	b	2f
 #endif /* IMAGE_BL31 */
 
 1:
 #endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 #endif /* WORKAROUND_CVE_2018_3639 */
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A76 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs. This will be bypassed
+	 * if DYNAMIC_WORKAROUND_CVE_2018_3639 has overridden the vectors.
+	 */
+	adr	x0, cortex_a76_wa_cve_vbar
+	msr	vbar_el3, x0
+	isb
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+2:
+
 #if ERRATA_DSU_798953
 	bl	errata_dsu_798953_wa
 #endif
@@ -656,6 +778,7 @@
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
 	report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
 	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a76, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index 8c8f4d3..e7365e2 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_a77.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex A77 Errata #1508412.
 	 * This applies only to revision <= r1p0 of Cortex A77.
@@ -194,6 +199,15 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_1791578
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A77.
 	 * Shall clobber: x0-x19
@@ -224,6 +238,16 @@
 	bl	errata_a77_1791578_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A77 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_a77
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret	x19
 endfunc cortex_a77_reset_func
 
@@ -261,6 +285,7 @@
 	report_errata ERRATA_A77_1925769, cortex_a77, 1925769
 	report_errata ERRATA_A77_1946167, cortex_a77, 1946167
 	report_errata ERRATA_A77_1791578, cortex_a77, 1791578
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a77, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index a1288ba..1a6f848 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,12 +10,16 @@
 #include <cortex_a78.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
 #error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
+#endif /* WORKAROUND_CVE_2022_23960 */
 
 /* --------------------------------------------------
  * Errata Workaround for A78 Erratum 1688305.
@@ -263,6 +267,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_2242635
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A78
 	 * -------------------------------------------------
@@ -327,6 +340,15 @@
 	msr	CPUAMCNTENSET1_EL0, x0
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-A78 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_a78
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc cortex_a78_reset_func
@@ -368,6 +390,7 @@
 	report_errata ERRATA_A78_1952683, cortex_a78, 1952683
 	report_errata ERRATA_A78_2132060, cortex_a78, 2132060
 	report_errata ERRATA_A78_2242635, cortex_a78, 2242635
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a78, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index 2ecfbbb..9586a5b 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -10,6 +10,7 @@
 #include <cortex_x2.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex X2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex X2 Errata #2002765.
 	 * This applies to revisions r0p0, r1p0, and r2p0 and
@@ -222,6 +227,16 @@
 	mov	x1, #0x20
 	b	cpu_rev_var_ls
 endfunc check_errata_2216384
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -258,6 +273,7 @@
 	report_errata ERRATA_X2_2017096, cortex_x2, 2017096
 	report_errata ERRATA_X2_2081180, cortex_x2, 2081180
 	report_errata ERRATA_X2_2216384, cortex_x2, 2216384
+	report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -305,6 +321,16 @@
 	bl	errata_x2_2216384_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-X2 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+         */
+	adr	x0, wa_cve_vbar_cortex_x2
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret x19
 endfunc cortex_x2_reset_func
 
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index 9c97cf6..b75b0c1 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,8 +8,8 @@
 #include <asm_macros.S>
 #include <cpuamu.h>
 #include <cpu_macros.S>
-#include <context.h>
 #include <neoverse_n1.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -23,6 +23,10 @@
 
 	.global neoverse_n1_errata_ic_trap_handler
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Neoverse N1 Erratum 1043202.
  * This applies to revision r0p0 and r1p0 of Neoverse N1.
@@ -464,6 +468,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_1946160
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 func neoverse_n1_reset_func
 	mov	x19, x30
 
@@ -575,6 +588,15 @@
 	bl	errata_dsu_936184_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-N1 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_n1
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc neoverse_n1_reset_func
@@ -624,6 +646,7 @@
 	report_errata ERRATA_N1_1868343, neoverse_n1, 1868343
 	report_errata ERRATA_N1_1946160, neoverse_n1, 1946160
 	report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_n1, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index 621aded..b93f2a6 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,7 @@
 #include <asm_macros.S>
 #include <cpu_macros.S>
 #include <neoverse_n2.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -19,6 +20,10 @@
 #error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 /* --------------------------------------------------
  * Errata Workaround for Neoverse N2 Erratum 2002655.
  * This applies to revision r0p0 of Neoverse N2. it is still open.
@@ -333,6 +338,15 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_2280757
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------
 	 * The CPU Ops reset function for Neoverse N2.
 	 * -------------------------------------------
@@ -428,6 +442,15 @@
 	bl	errata_n2_2002655_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-N2 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_n2
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret	x19
 endfunc neoverse_n2_reset_func
@@ -469,6 +492,7 @@
 	report_errata ERRATA_N2_2138958, neoverse_n2, 2138958
 	report_errata ERRATA_N2_2242400, neoverse_n2, 2242400
 	report_errata ERRATA_N2_2280757, neoverse_n2, 2280757
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_n2, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index 62a7a30..6adb3a8 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <neoverse_v1.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* --------------------------------------------------
 	 * Errata Workaround for Neoverse V1 Errata #1774420.
 	 * This applies to revisions r0p0 and r1p0, fixed in r1p1.
@@ -325,6 +330,15 @@
 	b	cpu_rev_var_range
 endfunc check_errata_2216392
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -364,6 +378,7 @@
 	report_errata ERRATA_V1_2139242, neoverse_v1, 2139242
 	report_errata ERRATA_V1_2108267, neoverse_v1, 2108267
 	report_errata ERRATA_V1_2216392, neoverse_v1, 2216392
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_v1, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -422,6 +437,16 @@
 	bl	errata_neoverse_v1_2216392_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse-V1 generic vectors are overridden to apply errata
+         * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_v1
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
 	ret	x19
 endfunc neoverse_v1_reset_func
 
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
new file mode 100644
index 0000000..e0e41cc
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <context.h>
+
+#if WORKAROUND_CVE_2022_23960
+	/*
+	 * This macro applies the mitigation for CVE-2022-23960.
+         * The macro saves x2-x3 to the CPU context.
+         * SP should point to the CPU context.
+	 */
+	.macro	apply_cve_2022_23960_bhb_wa _bhb_loop_count
+	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+	/* CVE-BHB-NUM loop count */
+	mov	x2, \_bhb_loop_count
+
+1:
+	/* b pc+4 part of the workaround */
+	b	2f
+2:
+	subs	x2, x2, #1
+	bne	1b
+	dsb	sy
+	isb
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	.endm
+#endif /* WORKAROUND_CVE_2022_23960 */
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
new file mode 100644
index 0000000..220fa11
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
+
+	/*
+	 * This macro is used to isolate the vector table for relevant CPUs
+	 * used in the mitigation for CVE_2022_23960.
+	 */
+	.macro wa_cve_2022_23960_bhb_vector_table _bhb_loop_count, _cpu
+
+	.globl	wa_cve_vbar_\_cpu
+
+vector_base wa_cve_vbar_\_cpu
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_sp_el0_\_cpu
+	b	sync_exception_sp_el0
+end_vector_entry bhb_sync_exception_sp_el0_\_cpu
+
+vector_entry bhb_irq_sp_el0_\_cpu
+	b	irq_sp_el0
+end_vector_entry bhb_irq_sp_el0_\_cpu
+
+vector_entry bhb_fiq_sp_el0_\_cpu
+	b	fiq_sp_el0
+end_vector_entry bhb_fiq_sp_el0_\_cpu
+
+vector_entry bhb_serror_sp_el0_\_cpu
+	b	serror_sp_el0
+end_vector_entry bhb_serror_sp_el0_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_sp_elx_\_cpu
+	b	sync_exception_sp_elx
+end_vector_entry bhb_sync_exception_sp_elx_\_cpu
+
+vector_entry bhb_irq_sp_elx_\_cpu
+	b	irq_sp_elx
+end_vector_entry bhb_irq_sp_elx_\_cpu
+
+vector_entry bhb_fiq_sp_elx_\_cpu
+	b	fiq_sp_elx
+end_vector_entry bhb_fiq_sp_elx_\_cpu
+
+vector_entry bhb_serror_sp_elx_\_cpu
+	b	serror_sp_elx
+end_vector_entry bhb_serror_sp_elx_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	sync_exception_aarch64
+end_vector_entry bhb_sync_exception_aarch64_\_cpu
+
+vector_entry bhb_irq_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	irq_aarch64
+end_vector_entry bhb_irq_aarch64_\_cpu
+
+vector_entry bhb_fiq_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	fiq_aarch64
+end_vector_entry bhb_fiq_aarch64_\_cpu
+
+vector_entry bhb_serror_aarch64_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	serror_aarch64
+end_vector_entry bhb_serror_aarch64_\_cpu
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry bhb_sync_exception_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	sync_exception_aarch32
+end_vector_entry bhb_sync_exception_aarch32_\_cpu
+
+vector_entry bhb_irq_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	irq_aarch32
+end_vector_entry bhb_irq_aarch32_\_cpu
+
+vector_entry bhb_fiq_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	fiq_aarch32
+end_vector_entry bhb_fiq_aarch32_\_cpu
+
+vector_entry bhb_serror_aarch32_\_cpu
+	apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+	b	serror_aarch32
+end_vector_entry bhb_serror_aarch32_\_cpu
+	.endm
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index e812c07..c7630fb 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -24,6 +24,7 @@
 WORKAROUND_CVE_2017_5715		?=1
 WORKAROUND_CVE_2018_3639		?=1
 DYNAMIC_WORKAROUND_CVE_2018_3639	?=0
+WORKAROUND_CVE_2022_23960		?=1
 
 # Flags to indicate internal or external Last level cache
 # By default internal
@@ -56,6 +57,10 @@
 $(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
 $(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
 
+# Process WORKAROUND_CVE_2022_23960 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2022_23960))
+$(eval $(call add_define,WORKAROUND_CVE_2022_23960))
+
 $(eval $(call assert_boolean,NEOVERSE_Nx_EXTERNAL_LLC))
 $(eval $(call add_define,NEOVERSE_Nx_EXTERNAL_LLC))
 
diff --git a/lib/gpt_rme/gpt_rme.c b/lib/gpt_rme/gpt_rme.c
index e424fe2..d6fbc04 100644
--- a/lib/gpt_rme/gpt_rme.c
+++ b/lib/gpt_rme/gpt_rme.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -95,9 +95,8 @@
 	if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
 	    ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
 		return true;
-	} else {
-		return false;
 	}
+	return false;
 }
 
 /*
@@ -117,9 +116,8 @@
 {
 	if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
 		return true;
-	} else {
-		return false;
 	}
+	return false;
 }
 
 /*
@@ -434,14 +432,14 @@
 	gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
 
 	/* Start index of this region in L0 GPTs */
-	idx = pas->base_pa >> GPT_L0_IDX_SHIFT;
+	idx = GPT_L0_IDX(pas->base_pa);
 
 	/*
 	 * Determine number of L0 GPT descriptors covered by
 	 * this PAS region and use the count to populate these
 	 * descriptors.
 	 */
-	end_idx = (pas->base_pa + pas->size) >> GPT_L0_IDX_SHIFT;
+	end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
 
 	/* Generate the needed block descriptors. */
 	for (; idx < end_idx; idx++) {
@@ -471,8 +469,8 @@
 	uintptr_t cur_idx;
 	uintptr_t end_idx;
 
-	cur_idx = cur_pa >> GPT_L0_IDX_SHIFT;
-	end_idx = end_pa >> GPT_L0_IDX_SHIFT;
+	cur_idx = GPT_L0_IDX(cur_pa);
+	end_idx = GPT_L0_IDX(end_pa);
 
 	assert(cur_idx <= end_idx);
 
@@ -770,7 +768,7 @@
 
 	/* Validate other parameters. */
 	ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size);
-	if (ret < 0) {
+	if (ret != 0) {
 		return ret;
 	}
 
@@ -849,7 +847,7 @@
 	if (l1_gpt_cnt > 0) {
 		ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size,
 		      l1_gpt_cnt);
-		if (ret < 0) {
+		if (ret != 0) {
 			return ret;
 		}
 
@@ -958,55 +956,170 @@
 static spinlock_t gpt_lock;
 
 /*
- * Check if caller is allowed to transition a PAS.
+ * A helper to write the value (target_pas << gpi_shift) to the index of
+ * the gpt_l1_addr
+ */
+static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
+			     unsigned int gpi_shift, unsigned int idx,
+			     unsigned int target_pas)
+{
+	*gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
+	*gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
+	gpt_l1_addr[idx] = *gpt_l1_desc;
+}
+
+/*
+ * Helper to retrieve the gpt_l1_* information from the base address
+ * returned in gpi_info
+ */
+static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
+{
+	uint64_t gpt_l0_desc, *gpt_l0_base;
+
+	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
+	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
+	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
+		VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
+		VERBOSE("      Base=0x%" PRIx64 "\n", base);
+		return -EINVAL;
+	}
+
+	/* Get the table index and GPI shift from PA. */
+	gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
+	gpi_info->idx = GPT_L1_IDX(gpt_config.p, base);
+	gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
+
+	gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
+	gpi_info->gpi = (gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
+		GPT_L1_GRAN_DESC_GPI_MASK;
+	return 0;
+}
+
+/*
+ * This function is the granule transition delegate service. When a granule
+ * transition request occurs it is routed to this function to have the request,
+ * if valid, fulfilled following A1.1.1 Delegate of RME supplement
  *
- * - Secure world caller can only request S <-> NS transitions on a
- *   granule that is already in either S or NS PAS.
- *
- * - Realm world caller can only request R <-> NS transitions on a
- *   granule that is already in either R or NS PAS.
+ * TODO: implement support for transitioning multiple granules at once.
  *
  * Parameters
+ *   base		Base address of the region to transition, must be
+ *			aligned to granule size.
+ *   size		Size of region to transition, must be aligned to granule
+ *			size.
  *   src_sec_state	Security state of the caller.
- *   current_gpi	Current GPI of the granule.
- *   target_gpi		Requested new GPI for the granule.
  *
  * Return
  *   Negative Linux error code in the event of a failure, 0 for success.
  */
-static int gpt_check_transition_gpi(unsigned int src_sec_state,
-				    unsigned int current_gpi,
-				    unsigned int target_gpi)
+int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
 {
-	unsigned int check_gpi;
+	gpi_info_t gpi_info;
+	uint64_t nse;
+	int res;
+	unsigned int target_pas;
 
-	/* Cannot transition a granule to the state it is already in. */
-	if (current_gpi == target_gpi) {
+	/* Ensure that the tables have been set up before taking requests. */
+	assert(gpt_config.plat_gpt_l0_base != 0UL);
+
+	/* Ensure that caches are enabled. */
+	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
+
+	/* Delegate request can only come from REALM or SECURE */
+	assert(src_sec_state == SMC_FROM_REALM ||
+	       src_sec_state == SMC_FROM_SECURE);
+
+	/* See if this is a single or a range of granule transition. */
+	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
 		return -EINVAL;
 	}
 
-	/* Check security state, only secure and realm can transition. */
-	if (src_sec_state == SMC_FROM_REALM) {
-		check_gpi = GPT_GPI_REALM;
-	} else if (src_sec_state == SMC_FROM_SECURE) {
-		check_gpi = GPT_GPI_SECURE;
+	/* Check that base and size are valid */
+	if ((ULONG_MAX - base) < size) {
+		VERBOSE("[GPT] Transition request address overflow!\n");
+		VERBOSE("      Base=0x%" PRIx64 "\n", base);
+		VERBOSE("      Size=0x%lx\n", size);
+		return -EINVAL;
+	}
+
+	/* Make sure base and size are valid. */
+	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+	    (size == 0UL) ||
+	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
+		VERBOSE("[GPT] Invalid granule transition address range!\n");
+		VERBOSE("      Base=0x%" PRIx64 "\n", base);
+		VERBOSE("      Size=0x%lx\n", size);
+		return -EINVAL;
+	}
+
+	target_pas = GPT_GPI_REALM;
+	if (src_sec_state == SMC_FROM_SECURE) {
+		target_pas = GPT_GPI_SECURE;
+	}
+
+	/*
+	 * Access to L1 tables is controlled by a global lock to ensure
+	 * that no more than one CPU is allowed to make changes at any
+	 * given time.
+	 */
+	spin_lock(&gpt_lock);
+	res = get_gpi_params(base, &gpi_info);
+	if (res != 0) {
+		spin_unlock(&gpt_lock);
+		return res;
+	}
+
+	/* Check that the current address is in NS state */
+	if (gpi_info.gpi != GPT_GPI_NS) {
+		VERBOSE("[GPT] Only Granule in NS state can be delegated.\n");
+		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
+			gpi_info.gpi);
+		spin_unlock(&gpt_lock);
+		return -EINVAL;
+	}
+
+	if (src_sec_state == SMC_FROM_SECURE) {
+		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
 	} else {
-		return -EINVAL;
+		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
 	}
 
-	/* Make sure security state is allowed to make the transition. */
-	if ((target_gpi != check_gpi) && (target_gpi != GPT_GPI_NS)) {
-		return -EINVAL;
-	}
-	if ((current_gpi != check_gpi) && (current_gpi != GPT_GPI_NS)) {
-		return -EINVAL;
-	}
+	/*
+	 * In order to maintain mutual distrust between Realm and Secure
+	 * states, remove any data speculatively fetched into the target
+	 * physical address space. Issue DC CIPAPA over address range
+	 */
+	flush_dcache_to_popa_range(nse | base,
+				   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+
+	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
+		  gpi_info.gpi_shift, gpi_info.idx, target_pas);
+	dsboshst();
+
+	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+	dsbosh();
+
+	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
+
+	flush_dcache_to_popa_range(nse | base,
+				   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+
+	/* Unlock access to the L1 tables. */
+	spin_unlock(&gpt_lock);
+
+	/*
+	 * The isb() will be done as part of context
+	 * synchronization when returning to lower EL
+	 */
+	VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
+		base, gpi_info.gpi, target_pas);
 
 	return 0;
 }
 
 /*
- * This function is the core of the granule transition service. When a granule
+ * This function is the granule transition undelegate service. When a granule
  * transition request occurs it is routed to this function where the request is
  * validated then fulfilled if possible.
  *
@@ -1018,29 +1131,32 @@
  *   size		Size of region to transition, must be aligned to granule
  *			size.
  *   src_sec_state	Security state of the caller.
- *   target_pas		Target PAS of the specified memory region.
  *
  * Return
  *    Negative Linux error code in the event of a failure, 0 for success.
  */
-int gpt_transition_pas(uint64_t base, size_t size, unsigned int src_sec_state,
-	unsigned int target_pas)
+int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
 {
-	int idx;
-	unsigned int gpi_shift;
-	unsigned int gpi;
-	uint64_t gpt_l0_desc;
-	uint64_t gpt_l1_desc;
-	uint64_t *gpt_l1_addr;
-	uint64_t *gpt_l0_base;
+	gpi_info_t gpi_info;
+	uint64_t nse;
+	int res;
 
 	/* Ensure that the tables have been set up before taking requests. */
-	assert(gpt_config.plat_gpt_l0_base != 0U);
+	assert(gpt_config.plat_gpt_l0_base != 0UL);
 
-	/* Ensure that MMU and data caches are enabled. */
-	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
+	/* Ensure that MMU and caches are enabled. */
+	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
 
-	/* Check for address range overflow. */
+	/* Delegate request can only come from REALM or SECURE */
+	assert(src_sec_state == SMC_FROM_REALM ||
+	       src_sec_state == SMC_FROM_SECURE);
+
+	/* See if this is a single or a range of granule transition. */
+	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
+		return -EINVAL;
+	}
+
+	/* Check that base and size are valid */
 	if ((ULONG_MAX - base) < size) {
 		VERBOSE("[GPT] Transition request address overflow!\n");
 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
@@ -1049,9 +1165,9 @@
 	}
 
 	/* Make sure base and size are valid. */
-	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) ||
-	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) ||
-	    (size == 0U) ||
+	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+	    (size == 0UL) ||
 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
 		VERBOSE("[GPT] Invalid granule transition address range!\n");
 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
@@ -1059,66 +1175,81 @@
 		return -EINVAL;
 	}
 
-	/* See if this is a single granule transition or a range of granules. */
-	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
-		/*
-		 * TODO: Add support for transitioning multiple granules with a
-		 * single call to this function.
-		 */
-		panic();
-	}
-
-	/* Get the L0 descriptor and make sure it is for a table. */
-	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
-	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
-	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
-		VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
-		VERBOSE("      Base=0x%" PRIx64 "\n", base);
-		return -EINVAL;
-	}
-
-	/* Get the table index and GPI shift from PA. */
-	gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
-	idx = GPT_L1_IDX(gpt_config.p, base);
-	gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
-
 	/*
 	 * Access to L1 tables is controlled by a global lock to ensure
 	 * that no more than one CPU is allowed to make changes at any
 	 * given time.
 	 */
 	spin_lock(&gpt_lock);
-	gpt_l1_desc = gpt_l1_addr[idx];
-	gpi = (gpt_l1_desc >> gpi_shift) & GPT_L1_GRAN_DESC_GPI_MASK;
 
-	/* Make sure caller state and source/target PAS are allowed. */
-	if (gpt_check_transition_gpi(src_sec_state, gpi, target_pas) < 0) {
+	res = get_gpi_params(base, &gpi_info);
+	if (res != 0) {
 		spin_unlock(&gpt_lock);
-			VERBOSE("[GPT] Invalid caller state and PAS combo!\n");
-		VERBOSE("      Caller: %u, Current GPI: %u, Target GPI: %u\n",
-			src_sec_state, gpi, target_pas);
-		return -EPERM;
+		return res;
 	}
 
-	/* Clear existing GPI encoding and transition granule. */
-	gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
-	gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
-	gpt_l1_addr[idx] = gpt_l1_desc;
+	/* Check that the current address is in the delegated state */
+	if ((src_sec_state == SMC_FROM_REALM  &&
+	     gpi_info.gpi != GPT_GPI_REALM) ||
+	    (src_sec_state == SMC_FROM_SECURE &&
+	     gpi_info.gpi != GPT_GPI_SECURE)) {
+		VERBOSE("[GPT] Only Granule in REALM or SECURE state can be undelegated.\n");
+		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
+			gpi_info.gpi);
+		spin_unlock(&gpt_lock);
+		return -EINVAL;
+	}
 
-	/* Ensure that the write operation will be observed by GPC */
-	dsbishst();
+
+	/* In order to maintain mutual distrust between Realm and Secure
+	 * states, remove access now, in order to guarantee that writes
+	 * to the currently-accessible physical address space will not
+	 * later become observable.
+	 */
+	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
+		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
+	dsboshst();
+
+	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+	dsbosh();
+
+	if (src_sec_state == SMC_FROM_SECURE) {
+		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
+	} else {
+		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
+	}
+
+	/* Ensure that the scrubbed data has made it past the PoPA */
+	flush_dcache_to_popa_range(nse | base,
+				   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+
+	/*
+	 * Remove any data loaded speculatively
+	 * in NS space from before the scrubbing
+	 */
+	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
+
+	flush_dcache_to_popa_range(nse | base,
+				   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+
+	/* Clear existing GPI encoding and transition granule. */
+	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
+		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
+	dsboshst();
+
+	/* Ensure that all agents observe the new NS configuration */
+	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+	dsbosh();
 
 	/* Unlock access to the L1 tables. */
 	spin_unlock(&gpt_lock);
 
-	gpt_tlbi_by_pa(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
-	dsbishst();
 	/*
 	 * The isb() will be done as part of context
 	 * synchronization when returning to lower EL
 	 */
-	VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n", base, gpi,
-		target_pas);
+	VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
+		base, gpi_info.gpi, GPT_GPI_NS);
 
 	return 0;
 }
diff --git a/lib/gpt_rme/gpt_rme_private.h b/lib/gpt_rme/gpt_rme_private.h
index 4203bba..3c817f3 100644
--- a/lib/gpt_rme/gpt_rme_private.h
+++ b/lib/gpt_rme/gpt_rme_private.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -106,6 +106,17 @@
 	PGS_64KB_P =	16U
 } gpt_p_val_e;
 
+/*
+ * Internal structure to retrieve the values from get_gpi_info();
+ */
+typedef struct gpi_info {
+	uint64_t gpt_l1_desc;
+	uint64_t *gpt_l1_addr;
+	unsigned int idx;
+	unsigned int gpi_shift;
+	unsigned int gpi;
+} gpi_info_t;
+
 /* Max valid value for PGS. */
 #define GPT_PGS_MAX			(2U)
 
diff --git a/plat/arm/board/fvp/fdts/optee_sp_manifest.dts b/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
index 551efe6..b803340 100644
--- a/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
+++ b/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
@@ -40,11 +40,5 @@
 			pages-count = <1>;
 			attributes = <0x3>; /* read-write */
 		};
-
-		gicd {
-			base-address = <0x00000000 0x2f000000>;
-			pages-count = <16>;
-			attributes = <0x3>; /* read-write */
-		};
 	};
 };
diff --git a/plat/arm/board/fvp/fvp_bl31_setup.c b/plat/arm/board/fvp/fvp_bl31_setup.c
index f9ee449..a94a4f4 100644
--- a/plat/arm/board/fvp/fvp_bl31_setup.c
+++ b/plat/arm/board/fvp/fvp_bl31_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -20,6 +20,9 @@
 void __init bl31_early_platform_setup2(u_register_t arg0,
 		u_register_t arg1, u_register_t arg2, u_register_t arg3)
 {
+	/* Initialize the console to provide early debug support */
+	arm_console_boot_init();
+
 #if !RESET_TO_BL31 && !BL2_AT_EL3
 	const struct dyn_cfg_dtb_info_t *soc_fw_config_info;
 
diff --git a/plat/arm/common/arm_console.c b/plat/arm/common/arm_console.c
index af5f11e..51830c9 100644
--- a/plat/arm/common/arm_console.c
+++ b/plat/arm/common/arm_console.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -25,6 +25,11 @@
 /* Initialize the console to provide early debug support */
 void __init arm_console_boot_init(void)
 {
+	/* If the console was initialized already, don't initialize again */
+	if (arm_boot_console.base == PLAT_ARM_BOOT_UART_BASE) {
+		return;
+	}
+
 	int rc = console_pl011_register(PLAT_ARM_BOOT_UART_BASE,
 					PLAT_ARM_BOOT_UART_CLK_IN_HZ,
 					ARM_CONSOLE_BAUDRATE,
diff --git a/plat/brcm/board/common/board_common.mk b/plat/brcm/board/common/board_common.mk
index 3b3e92d..24a27ed 100644
--- a/plat/brcm/board/common/board_common.mk
+++ b/plat/brcm/board/common/board_common.mk
@@ -214,14 +214,12 @@
 endif
 
 # Include mbedtls if it can be located
-MBEDTLS_DIR := mbedtls
-MBEDTLS_CHECK := $(shell find ${MBEDTLS_DIR}/include -name '${MBEDTLS_DIR}')
+MBEDTLS_DIR ?= mbedtls
+MBEDTLS_CHECK := $(shell find ${MBEDTLS_DIR}/include -name '$(notdir ${MBEDTLS_DIR})')
 
 ifneq (${MBEDTLS_CHECK},)
 $(info Found mbedTLS at ${MBEDTLS_DIR})
 PLAT_INCLUDES += -I${MBEDTLS_DIR}/include/mbedtls
-# Specify mbedTLS configuration file
-MBEDTLS_CONFIG_FILE	:=	"<brcm_mbedtls_config.h>"
 
 # By default, use RSA keys
 KEY_ALG := rsa_1_5
diff --git a/plat/intel/soc/agilex/include/socfpga_plat_def.h b/plat/intel/soc/agilex/include/socfpga_plat_def.h
index 6c9d81c..9c87e45 100644
--- a/plat/intel/soc/agilex/include/socfpga_plat_def.h
+++ b/plat/intel/soc/agilex/include/socfpga_plat_def.h
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -14,6 +14,10 @@
 #define PLATFORM_MODEL				PLAT_SOCFPGA_AGILEX
 #define BOOT_SOURCE				BOOT_SOURCE_SDMMC
 
+/* FPGA config helpers */
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR		0x400000
+#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE		0x2000000
+
 /* Register Mapping */
 #define SOCFPGA_MMC_REG_BASE			0xff808000
 
diff --git a/plat/intel/soc/agilex/platform.mk b/plat/intel/soc/agilex/platform.mk
index bf5cc14..10a3eec 100644
--- a/plat/intel/soc/agilex/platform.mk
+++ b/plat/intel/soc/agilex/platform.mk
@@ -1,6 +1,6 @@
 #
-# Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
-# Copyright (c) 2019-2020, Intel Corporation. All rights reserved.
+# Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -65,6 +65,8 @@
 		plat/intel/soc/common/socfpga_psci.c			\
 		plat/intel/soc/common/socfpga_sip_svc.c			\
 		plat/intel/soc/common/socfpga_topology.c		\
+		plat/intel/soc/common/sip/socfpga_sip_ecc.c		\
+		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
 		plat/intel/soc/common/soc/socfpga_mailbox.c		\
 		plat/intel/soc/common/soc/socfpga_reset_manager.c
 
@@ -72,4 +74,5 @@
 BL2_AT_EL3			:= 1
 BL2_INV_DCACHE			:= 0
 MULTI_CONSOLE_API		:= 1
+SIMICS_BUILD			:= 0
 USE_COHERENT_MEM		:= 1
diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.c b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
index b4fce7b..d4716cf 100644
--- a/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
+++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -107,6 +107,17 @@
 			OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
 }
 
+void ncore_enable_ocram_firewall(void)
+{
+	mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF1),
+			OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+	mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF2),
+			OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+	mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF3),
+			OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+	mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF4),
+			OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+}
 uint32_t init_ncore_ccu(void)
 {
 	uint32_t status;
diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.h b/plat/intel/soc/common/drivers/ccu/ncore_ccu.h
index d25ecac..3f662ff 100644
--- a/plat/intel/soc/common/drivers/ccu/ncore_ccu.h
+++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -104,5 +104,6 @@
 } coh_ss_id_t;
 
 uint32_t init_ncore_ccu(void);
+void ncore_enable_ocram_firewall(void);
 
 #endif
diff --git a/plat/intel/soc/common/include/platform_def.h b/plat/intel/soc/common/include/platform_def.h
index 55600ee..7859493 100644
--- a/plat/intel/soc/common/include/platform_def.h
+++ b/plat/intel/soc/common/include/platform_def.h
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2019-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -13,8 +13,10 @@
 #include <common/tbbr/tbbr_img_def.h>
 #include <plat/common/common_def.h>
 
+/* Platform Type */
 #define PLAT_SOCFPGA_STRATIX10			1
 #define PLAT_SOCFPGA_AGILEX			2
+#define PLAT_SOCFPGA_N5X			3
 
 /* sysmgr.boot_scratch_cold4 & 5 used for CPU release address for SPL */
 #define PLAT_CPU_RELEASE_ADDR			0xffd12210
@@ -167,9 +169,16 @@
 
 #define CRASH_CONSOLE_BASE	PLAT_UART0_BASE
 
+#ifndef SIMICS_BUILD
 #define PLAT_BAUDRATE		(115200)
 #define PLAT_UART_CLOCK		(100000000)
 
+#else
+#define PLAT_BAUDRATE		(4800)
+#define PLAT_UART_CLOCK		(76800)
+
+#endif
+
 /*******************************************************************************
  * PHY related constants
  ******************************************************************************/
diff --git a/plat/intel/soc/common/include/socfpga_fcs.h b/plat/intel/soc/common/include/socfpga_fcs.h
new file mode 100644
index 0000000..ff10d36
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_fcs.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_FCS_H
+#define SOCFPGA_FCS_H
+
+/* FCS Definitions */
+
+#define FCS_RANDOM_WORD_SIZE		8U
+#define FCS_PROV_DATA_WORD_SIZE		44U
+
+#define FCS_RANDOM_BYTE_SIZE		(FCS_RANDOM_WORD_SIZE * 4U)
+#define FCS_PROV_DATA_BYTE_SIZE		(FCS_PROV_DATA_WORD_SIZE * 4U)
+
+#define FCS_CRYPTION_DATA_0		0x10100
+
+/* FCS Payload Structure */
+
+typedef struct fcs_crypt_payload_t {
+	uint32_t first_word;
+	uint32_t src_addr;
+	uint32_t src_size;
+	uint32_t dst_addr;
+	uint32_t dst_size;
+} fcs_crypt_payload;
+
+/* Functions Definitions */
+
+uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size,
+				uint32_t *mbox_error);
+uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size,
+				uint32_t *send_id);
+uint32_t intel_fcs_get_provision_data(uint32_t *send_id);
+uint32_t intel_fcs_cryption(uint32_t mode, uint32_t src_addr,
+			uint32_t src_size, uint32_t dst_addr,
+			uint32_t dst_size, uint32_t *send_id);
+
+#endif /* SOCFPGA_FCS_H */
diff --git a/plat/intel/soc/common/include/socfpga_mailbox.h b/plat/intel/soc/common/include/socfpga_mailbox.h
index bc10dd8..6b7e0fc 100644
--- a/plat/intel/soc/common/include/socfpga_mailbox.h
+++ b/plat/intel/soc/common/include/socfpga_mailbox.h
@@ -40,6 +40,7 @@
 #define MBOX_CMD_SYNC			0x01
 #define MBOX_CMD_RESTART		0x02
 #define MBOX_CMD_CANCEL			0x03
+#define MBOX_CMD_VAB_SRC_CERT		0x0B
 #define MBOX_CMD_GET_IDCODE		0x10
 #define MBOX_CMD_REBOOT_HPS		0x47
 
@@ -61,6 +62,11 @@
 #define MBOX_RSU_UPDATE			0x5C
 #define MBOX_HPS_STAGE_NOTIFY		0x5D
 
+/* FCS Command */
+#define MBOX_FCS_GET_PROVISION			0x7B
+#define MBOX_FCS_ENCRYPT_REQ			0x7E
+#define MBOX_FCS_DECRYPT_REQ			0x7F
+#define MBOX_FCS_RANDOM_GEN			0x80
 
 /* Mailbox Definitions */
 
diff --git a/plat/intel/soc/common/include/socfpga_reset_manager.h b/plat/intel/soc/common/include/socfpga_reset_manager.h
index 637f8df..a976df7 100644
--- a/plat/intel/soc/common/include/socfpga_reset_manager.h
+++ b/plat/intel/soc/common/include/socfpga_reset_manager.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
diff --git a/plat/intel/soc/common/include/socfpga_sip_svc.h b/plat/intel/soc/common/include/socfpga_sip_svc.h
index 92adfa3..0db71e2 100644
--- a/plat/intel/soc/common/include/socfpga_sip_svc.h
+++ b/plat/intel/soc/common/include/socfpga_sip_svc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,6 +15,8 @@
 #define INTEL_SIP_SMC_STATUS_ERROR			0x4
 #define INTEL_SIP_SMC_RSU_ERROR				0x7
 
+/* SiP mailbox error code */
+#define GENERIC_RESPONSE_ERROR				0x3FF
 
 /* SMC SiP service function identifier */
 
@@ -35,6 +37,12 @@
 #define INTEL_SIP_SMC_RSU_UPDATE			0xC200000C
 #define INTEL_SIP_SMC_RSU_NOTIFY			0xC200000E
 #define INTEL_SIP_SMC_RSU_RETRY_COUNTER			0xC200000F
+#define INTEL_SIP_SMC_RSU_DCMF_VERSION			0xC2000010
+#define INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION		0xC2000011
+
+
+/* ECC */
+#define INTEL_SIP_SMC_ECC_DBE				0xC200000D
 
 /* Send Mailbox Command */
 #define INTEL_SIP_SMC_MBOX_SEND_CMD			0xC200001E
@@ -42,9 +50,11 @@
 
 /* SiP Definitions */
 
-/* FPGA config helpers */
-#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR			0x400000
-#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE			0x2000000
+/* ECC DBE */
+#define WARM_RESET_WFI_FLAG				BIT(31)
+#define SYSMGR_ECC_DBE_COLD_RST_MASK		(SYSMGR_ECC_OCRAM_MASK |\
+							SYSMGR_ECC_DDR0_MASK |\
+							SYSMGR_ECC_DDR1_MASK)
 
 /* SMC function IDs for SiP Service queries */
 #define SIP_SVC_CALL_COUNT	0x8200ff00
@@ -70,4 +80,8 @@
 
 bool is_address_in_ddr_range(uint64_t addr, uint64_t size);
 
+/* ECC DBE */
+bool cold_reset_for_ecc_dbe(void);
+uint32_t intel_ecc_dbe_notification(uint64_t dbe_value);
+
 #endif /* SOCFPGA_SIP_SVC_H */
diff --git a/plat/intel/soc/common/include/socfpga_system_manager.h b/plat/intel/soc/common/include/socfpga_system_manager.h
index 8b42d47..2b13f1f 100644
--- a/plat/intel/soc/common/include/socfpga_system_manager.h
+++ b/plat/intel/soc/common/include/socfpga_system_manager.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -30,6 +30,8 @@
 #define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_0		0x200
 #define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_1		0x204
 #define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_2		0x208
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_8		0x220
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_9		0x224
 
 /* Field Masking */
 
@@ -47,6 +49,10 @@
 					| SCR_MPU_MASK)
 #define DISABLE_BRIDGE_FIREWALL				0x0ffe0101
 
+#define SYSMGR_ECC_OCRAM_MASK				BIT(1)
+#define SYSMGR_ECC_DDR0_MASK				BIT(16)
+#define SYSMGR_ECC_DDR1_MASK				BIT(17)
+
 /* Macros */
 
 #define SOCFPGA_SYSMGR(_reg)		(SOCFPGA_SYSMGR_REG_BASE \
diff --git a/plat/intel/soc/common/sip/socfpga_sip_ecc.c b/plat/intel/soc/common/sip/socfpga_sip_ecc.c
new file mode 100644
index 0000000..c4e06a6
--- /dev/null
+++ b/plat/intel/soc/common/sip/socfpga_sip_ecc.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+#include <tools_share/uuid.h>
+
+#include "socfpga_fcs.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_sip_svc.h"
+#include "socfpga_system_manager.h"
+
+uint32_t intel_ecc_dbe_notification(uint64_t dbe_value)
+{
+	dbe_value &= WARM_RESET_WFI_FLAG;
+
+	/* Trap CPUs in WFI if warm reset flag is set */
+	if (dbe_value > 0) {
+		while (1) {
+			wfi();
+		}
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+bool cold_reset_for_ecc_dbe(void)
+{
+	uint32_t dbe_int_status;
+
+	dbe_int_status = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_8));
+
+	/* Trigger cold reset only for error in critical memory (DDR/OCRAM) */
+	dbe_int_status &= SYSMGR_ECC_DBE_COLD_RST_MASK;
+
+	if (dbe_int_status > 0) {
+		return true;
+	}
+
+	return false;
+}
diff --git a/plat/intel/soc/common/sip/socfpga_sip_fcs.c b/plat/intel/soc/common/sip/socfpga_sip_fcs.c
new file mode 100644
index 0000000..fe5461b
--- /dev/null
+++ b/plat/intel/soc/common/sip/socfpga_sip_fcs.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <lib/mmio.h>
+
+#include "socfpga_fcs.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_sip_svc.h"
+
+uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size,
+					uint32_t *mbox_error)
+{
+	int status;
+	unsigned int i;
+	unsigned int resp_len = FCS_RANDOM_WORD_SIZE;
+	uint32_t random_data[FCS_RANDOM_WORD_SIZE] = {0U};
+
+	if (!is_address_in_ddr_range(addr, FCS_RANDOM_BYTE_SIZE)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_RANDOM_GEN, NULL, 0U,
+			CMD_CASUAL, random_data, &resp_len);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	if (resp_len != FCS_RANDOM_WORD_SIZE) {
+		*mbox_error = GENERIC_RESPONSE_ERROR;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*ret_size = FCS_RANDOM_BYTE_SIZE;
+
+	for (i = 0U; i < FCS_RANDOM_WORD_SIZE; i++) {
+		mmio_write_32(addr, random_data[i]);
+		addr += MBOX_WORD_BYTE;
+	}
+
+	flush_dcache_range(addr - *ret_size, *ret_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size,
+					uint32_t *send_id)
+{
+	int status;
+
+	if (!is_address_in_ddr_range(addr, size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd_async(send_id, MBOX_CMD_VAB_SRC_CERT,
+				(uint32_t *)addr, size / MBOX_WORD_BYTE,
+				CMD_DIRECT);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_get_provision_data(uint32_t *send_id)
+{
+	int status;
+
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_GET_PROVISION,
+				NULL, 0U, CMD_DIRECT);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_cryption(uint32_t mode, uint32_t src_addr,
+		uint32_t src_size, uint32_t dst_addr,
+		uint32_t dst_size, uint32_t *send_id)
+{
+	int status;
+	uint32_t cmd;
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_crypt_payload payload = {
+		FCS_CRYPTION_DATA_0,
+		src_addr,
+		src_size,
+		dst_addr,
+		dst_size };
+
+	if (mode != 0U) {
+		cmd = MBOX_FCS_ENCRYPT_REQ;
+	} else {
+		cmd = MBOX_FCS_DECRYPT_REQ;
+	}
+
+	status = mailbox_send_cmd_async(send_id, cmd, (uint32_t *) &payload,
+				sizeof(fcs_crypt_payload) / MBOX_WORD_BYTE,
+				CMD_INDIRECT);
+	inv_dcache_range(dst_addr, dst_size);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
diff --git a/plat/intel/soc/common/soc/socfpga_reset_manager.c b/plat/intel/soc/common/soc/socfpga_reset_manager.c
index 32604c9..b0de60e 100644
--- a/plat/intel/soc/common/soc/socfpga_reset_manager.c
+++ b/plat/intel/soc/common/soc/socfpga_reset_manager.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -134,7 +134,7 @@
 #if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
 	mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
 		~(RSTMGR_FIELD(BRG, DDRSCH) | RSTMGR_FIELD(BRG, FPGA2SOC)));
-#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX
+#else
 	mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
 		~(RSTMGR_FIELD(BRG, MPFE) | RSTMGR_FIELD(BRG, FPGA2SOC)));
 #endif
diff --git a/plat/intel/soc/common/socfpga_psci.c b/plat/intel/soc/common/socfpga_psci.c
index 4b57b8f..5fd6559 100644
--- a/plat/intel/soc/common/socfpga_psci.c
+++ b/plat/intel/soc/common/socfpga_psci.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -14,7 +14,7 @@
 #include "socfpga_mailbox.h"
 #include "socfpga_plat_def.h"
 #include "socfpga_reset_manager.h"
-
+#include "socfpga_sip_svc.h"
 
 
 /*******************************************************************************
@@ -151,6 +151,9 @@
 static int socfpga_system_reset2(int is_vendor, int reset_type,
 					u_register_t cookie)
 {
+	if (cold_reset_for_ecc_dbe()) {
+		mailbox_reset_cold();
+	}
 	/* disable cpuif */
 	gicv2_cpuif_disable();
 
diff --git a/plat/intel/soc/common/socfpga_sip_svc.c b/plat/intel/soc/common/socfpga_sip_svc.c
index d53e8de..14cd9e0 100644
--- a/plat/intel/soc/common/socfpga_sip_svc.c
+++ b/plat/intel/soc/common/socfpga_sip_svc.c
@@ -10,6 +10,7 @@
 #include <lib/mmio.h>
 #include <tools_share/uuid.h>
 
+#include "socfpga_fcs.h"
 #include "socfpga_mailbox.h"
 #include "socfpga_reset_manager.h"
 #include "socfpga_sip_svc.h"
@@ -139,21 +140,23 @@
 		status = mailbox_read_response(job_id,
 				resp, &resp_len);
 
-		if (resp_len < 0)
+		if (status < 0) {
 			break;
+		}
 
 		max_blocks++;
 
 		if (mark_last_buffer_xfer_completed(
-			&completed_addr[*count]) == 0)
+			&completed_addr[*count]) == 0) {
 			*count = *count + 1;
-		else
+		} else {
 			break;
+		}
 	}
 
 	if (*count <= 0) {
-		if (resp_len != MBOX_NO_RESPONSE &&
-			resp_len != MBOX_TIMEOUT && resp_len != 0) {
+		if (status != MBOX_NO_RESPONSE &&
+			status != MBOX_TIMEOUT && resp_len != 0) {
 			mailbox_clear_response();
 			return INTEL_SIP_SMC_STATUS_ERROR;
 		}
@@ -430,9 +433,9 @@
 			 u_register_t flags)
 {
 	uint32_t retval = 0;
-	uint32_t status = INTEL_SIP_SMC_STATUS_OK;
 	uint32_t completed_addr[3];
 	uint64_t rsu_respbuf[9];
+	int status = INTEL_SIP_SMC_STATUS_OK;
 	int mbox_status;
 	unsigned int len_in_resp;
 	u_register_t x5, x6;
@@ -527,6 +530,10 @@
 			SMC_RET2(handle, status, retval);
 		}
 
+	case INTEL_SIP_SMC_ECC_DBE:
+		status = intel_ecc_dbe_notification(x1);
+		SMC_RET1(handle, status);
+
 	case INTEL_SIP_SMC_MBOX_SEND_CMD:
 		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
 		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
diff --git a/plat/intel/soc/n5x/bl31_plat_setup.c b/plat/intel/soc/n5x/bl31_plat_setup.c
new file mode 100644
index 0000000..2a8daa6
--- /dev/null
+++ b/plat/intel/soc/n5x/bl31_plat_setup.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <drivers/arm/gicv2.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables.h>
+
+#include "ccu/ncore_ccu.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_private.h"
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = (type == NON_SECURE) ?
+			  &bl33_image_ep_info : &bl32_image_ep_info;
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc) {
+		return next_image_info;
+	} else {
+		return NULL;
+	}
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+				u_register_t arg2, u_register_t arg3)
+{
+	static console_t console;
+
+	mmio_write_64(PLAT_SEC_ENTRY, 0);
+
+	console_16550_register(PLAT_UART0_BASE, PLAT_UART_CLOCK, PLAT_BAUDRATE,
+		&console);
+	/*
+	 * Check params passed from BL31 should not be NULL,
+	 */
+	void *from_bl2 = (void *) arg0;
+
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+
+	assert(params_from_bl2 != NULL);
+
+	/*
+	 * Copy BL32 (if populated by BL31) and BL33 entry point information.
+	 * They are stored in Secure RAM, in BL31's address space.
+	 */
+
+	if (params_from_bl2->h.type == PARAM_BL_PARAMS &&
+		params_from_bl2->h.version >= VERSION_2) {
+
+		bl_params_node_t *bl_params = params_from_bl2->head;
+
+		while (bl_params != NULL) {
+			if (bl_params->image_id == BL33_IMAGE_ID)
+				bl33_image_ep_info = *bl_params->ep_info;
+
+			bl_params = bl_params->next_params_info;
+		}
+	} else {
+		struct socfpga_bl31_params *arg_from_bl2 =
+			(struct socfpga_bl31_params *) from_bl2;
+
+		assert(arg_from_bl2->h.type == PARAM_BL31);
+		assert(arg_from_bl2->h.version >= VERSION_1);
+
+		bl32_image_ep_info = *arg_from_bl2->bl32_ep_info;
+		bl33_image_ep_info = *arg_from_bl2->bl33_ep_info;
+	}
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+
+static const interrupt_prop_t s10_interrupt_props[] = {
+	PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+	PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+static const gicv2_driver_data_t plat_gicv2_gic_data = {
+	.gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE,
+	.gicc_base = PLAT_INTEL_SOCFPGA_GICC_BASE,
+	.interrupt_props = s10_interrupt_props,
+	.interrupt_props_num = ARRAY_SIZE(s10_interrupt_props),
+	.target_masks = target_mask_array,
+	.target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+	socfpga_delay_timer_init();
+
+	/* Initialize the gic cpu and distributor interfaces */
+	gicv2_driver_init(&plat_gicv2_gic_data);
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+
+	/* Signal secondary CPUs to jump to BL31 (BL2 = U-boot SPL) */
+	mmio_write_64(PLAT_CPU_RELEASE_ADDR,
+		(uint64_t)plat_secondary_cpus_bl31_entry);
+
+	mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL);
+
+	ncore_enable_ocram_firewall();
+}
+
+const mmap_region_t plat_dm_mmap[] = {
+	MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE,
+		MT_MEMORY | MT_RW | MT_NS),
+	MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE,
+		MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE,
+		MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+		MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE,
+		MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE,
+		MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE,
+		MT_DEVICE | MT_RW | MT_NS),
+	{0}
+};
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+	const mmap_region_t bl_regions[] = {
+		MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE,
+			MT_MEMORY | MT_RW | MT_SECURE),
+		MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+			MT_CODE | MT_SECURE),
+		MAP_REGION_FLAT(BL_RO_DATA_BASE,
+			BL_RO_DATA_END - BL_RO_DATA_BASE,
+			MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM
+		MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+			BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+		{0}
+	};
+
+	setup_page_tables(bl_regions, plat_dm_mmap);
+	enable_mmu_el3(0);
+}
diff --git a/plat/intel/soc/n5x/include/socfpga_plat_def.h b/plat/intel/soc/n5x/include/socfpga_plat_def.h
new file mode 100644
index 0000000..9186852
--- /dev/null
+++ b/plat/intel/soc/n5x/include/socfpga_plat_def.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_SOCFPGA_DEF_H
+#define PLAT_SOCFPGA_DEF_H
+
+#include <platform_def.h>
+
+/* Platform Setting */
+#define PLATFORM_MODEL				PLAT_SOCFPGA_N5X
+#define BOOT_SOURCE				BOOT_SOURCE_SDMMC
+
+/* FPGA config helpers */
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR		0x400000
+#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE		0x2000000
+
+/* Register Mapping */
+#define SOCFPGA_MMC_REG_BASE			U(0xff808000)
+
+#define SOCFPGA_RSTMGR_REG_BASE			U(0xffd11000)
+#define SOCFPGA_SYSMGR_REG_BASE			U(0xffd12000)
+
+#define SOCFPGA_L4_PER_SCR_REG_BASE			U(0xffd21000)
+#define SOCFPGA_L4_SYS_SCR_REG_BASE			U(0xffd21100)
+#define SOCFPGA_SOC2FPGA_SCR_REG_BASE			U(0xffd21200)
+#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE			U(0xffd21300)
+
+#endif /* PLAT_SOCFPGA_DEF_H */
diff --git a/plat/intel/soc/n5x/platform.mk b/plat/intel/soc/n5x/platform.mk
new file mode 100644
index 0000000..b72bcc4
--- /dev/null
+++ b/plat/intel/soc/n5x/platform.mk
@@ -0,0 +1,52 @@
+#
+# Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES		:=	\
+			-Iplat/intel/soc/n5x/include/			\
+			-Iplat/intel/soc/common/drivers/		\
+			-Iplat/intel/soc/common/include/
+
+# Include GICv2 driver files
+include drivers/arm/gic/v2/gicv2.mk
+DM_GICv2_SOURCES	:=	\
+			${GICV2_SOURCES}                                \
+			plat/common/plat_gicv2.c
+
+
+PLAT_BL_COMMON_SOURCES	:=	\
+			${DM_GICv2_SOURCES}				\
+			drivers/delay_timer/delay_timer.c		\
+			drivers/delay_timer/generic_delay_timer.c  	\
+			drivers/ti/uart/aarch64/16550_console.S		\
+			lib/xlat_tables/aarch64/xlat_tables.c 		\
+			lib/xlat_tables/xlat_tables_common.c 		\
+			plat/intel/soc/common/aarch64/platform_common.c \
+			plat/intel/soc/common/aarch64/plat_helpers.S	\
+			plat/intel/soc/common/socfpga_delay_timer.c     \
+			plat/intel/soc/common/drivers/ccu/ncore_ccu.c
+
+BL2_SOURCES     +=
+
+BL31_SOURCES	+=	\
+		drivers/arm/cci/cci.c					\
+		lib/cpus/aarch64/aem_generic.S				\
+		lib/cpus/aarch64/cortex_a53.S				\
+		plat/common/plat_psci_common.c				\
+		plat/intel/soc/n5x/bl31_plat_setup.c			\
+		plat/intel/soc/common/socfpga_psci.c			\
+		plat/intel/soc/common/socfpga_sip_svc.c			\
+		plat/intel/soc/common/socfpga_topology.c		\
+		plat/intel/soc/common/sip/socfpga_sip_ecc.c             \
+		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
+		plat/intel/soc/common/soc/socfpga_mailbox.c		\
+		plat/intel/soc/common/soc/socfpga_reset_manager.c
+
+PROGRAMMABLE_RESET_ADDRESS	:= 0
+BL2_AT_EL3			:= 1
+BL2_INV_DCACHE			:= 0
+MULTI_CONSOLE_API		:= 1
+SIMICS_BUILD			:= 0
+USE_COHERENT_MEM		:= 1
diff --git a/plat/intel/soc/stratix10/include/socfpga_plat_def.h b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
index a2bd57b..b84a567 100644
--- a/plat/intel/soc/stratix10/include/socfpga_plat_def.h
+++ b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -13,6 +14,10 @@
 #define PLATFORM_MODEL				PLAT_SOCFPGA_STRATIX10
 #define BOOT_SOURCE				BOOT_SOURCE_SDMMC
 
+/* FPGA config helpers */
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR		0x400000
+#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE		0x1000000
+
 /* Register Mapping */
 #define SOCFPGA_MMC_REG_BASE                    0xff808000
 
diff --git a/plat/intel/soc/stratix10/platform.mk b/plat/intel/soc/stratix10/platform.mk
index 8bbd010..d9d88d4 100644
--- a/plat/intel/soc/stratix10/platform.mk
+++ b/plat/intel/soc/stratix10/platform.mk
@@ -1,6 +1,6 @@
 #
-# Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
-# Copyright (c) 2019-2020, Intel Corporation. All rights reserved.
+# Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -63,9 +63,12 @@
 		plat/intel/soc/common/socfpga_psci.c			\
 		plat/intel/soc/common/socfpga_sip_svc.c			\
 		plat/intel/soc/common/socfpga_topology.c		\
+		plat/intel/soc/common/sip/socfpga_sip_ecc.c		\
+		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
 		plat/intel/soc/common/soc/socfpga_mailbox.c		\
 		plat/intel/soc/common/soc/socfpga_reset_manager.c
 
 PROGRAMMABLE_RESET_ADDRESS	:= 0
 BL2_AT_EL3			:= 1
+SIMICS_BUILD			:= 0
 USE_COHERENT_MEM		:= 1
diff --git a/plat/marvell/armada/a3k/common/cm3_system_reset.c b/plat/marvell/armada/a3k/common/cm3_system_reset.c
index 548ff51..f105d59 100644
--- a/plat/marvell/armada/a3k/common/cm3_system_reset.c
+++ b/plat/marvell/armada/a3k/common/cm3_system_reset.c
@@ -58,5 +58,5 @@
 	}
 
 	/* If we reach here, the command is not implemented. */
-	ERROR("System reset command not implemented in WTMI firmware!\n");
+	WARN("System reset command not implemented in WTMI firmware!\n");
 }
diff --git a/plat/mediatek/mt8186/drivers/dfd/plat_dfd.c b/plat/mediatek/mt8186/drivers/dfd/plat_dfd.c
new file mode 100644
index 0000000..ade0837
--- /dev/null
+++ b/plat/mediatek/mt8186/drivers/dfd/plat_dfd.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2022, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <mtk_sip_svc.h>
+#include <plat_dfd.h>
+
+static bool dfd_enabled;
+static uint64_t dfd_base_addr;
+static uint64_t dfd_chain_length;
+static uint64_t dfd_cache_dump;
+
+static void dfd_setup(uint64_t base_addr, uint64_t chain_length,
+		      uint64_t cache_dump)
+{
+	mmio_write_32(MCUSYS_DFD_MAP, base_addr >> 24);
+	mmio_write_32(WDT_DEBUG_CTL, WDT_DEBUG_CTL_VAL_0);
+
+	sync_writel(DFD_INTERNAL_CTL, (BIT(0) | BIT(2)));
+
+	mmio_setbits_32(DFD_INTERNAL_CTL, BIT(13));
+	mmio_setbits_32(DFD_INTERNAL_CTL, BIT(3));
+	mmio_setbits_32(DFD_INTERNAL_CTL, (BIT(19) | BIT(20)));
+	mmio_write_32(DFD_INTERNAL_PWR_ON, (BIT(0) | BIT(1) | BIT(3)));
+	mmio_write_32(DFD_CHAIN_LENGTH0, chain_length);
+	mmio_write_32(DFD_INTERNAL_SHIFT_CLK_RATIO, 0);
+	mmio_write_32(DFD_INTERNAL_TEST_SO_0, DFD_INTERNAL_TEST_SO_0_VAL);
+	mmio_write_32(DFD_INTERNAL_NUM_OF_TEST_SO_GROUP, 1);
+
+	mmio_write_32(DFD_TEST_SI_0, DFD_TEST_SI_0_VAL);
+	mmio_write_32(DFD_TEST_SI_1, DFD_TEST_SI_1_VAL);
+
+	sync_writel(DFD_V30_CTL, 1);
+
+	mmio_write_32(DFD_V30_BASE_ADDR, (base_addr & 0xFFF00000));
+
+	/* setup global variables for suspend and resume */
+	dfd_enabled = true;
+	dfd_base_addr = base_addr;
+	dfd_chain_length = chain_length;
+	dfd_cache_dump = cache_dump;
+
+	if ((cache_dump & DFD_CACHE_DUMP_ENABLE) != 0UL) {
+		mmio_write_32(WDT_DEBUG_CTL, WDT_DEBUG_CTL_VAL_1);
+		sync_writel(DFD_V35_ENALBE, 1);
+		sync_writel(DFD_V35_TAP_NUMBER, DFD_V35_TAP_NUMBER_VAL);
+		sync_writel(DFD_V35_TAP_EN, DFD_V35_TAP_EN_VAL);
+		sync_writel(DFD_V35_SEQ0_0, DFD_V35_SEQ0_0_VAL);
+
+		if (cache_dump & DFD_PARITY_ERR_TRIGGER) {
+			sync_writel(DFD_HW_TRIGGER_MASK, DFD_HW_TRIGGER_MASK_VAL);
+			mmio_setbits_32(DFD_INTERNAL_CTL, BIT(4));
+		}
+	}
+	dsbsy();
+}
+
+void dfd_resume(void)
+{
+	if (dfd_enabled == true) {
+		dfd_setup(dfd_base_addr, dfd_chain_length, dfd_cache_dump);
+	}
+}
+
+uint64_t dfd_smc_dispatcher(uint64_t arg0, uint64_t arg1,
+			    uint64_t arg2, uint64_t arg3)
+{
+	uint64_t ret = 0L;
+
+	switch (arg0) {
+	case PLAT_MTK_DFD_SETUP_MAGIC:
+		INFO("[%s] DFD setup call from kernel\n", __func__);
+		dfd_setup(arg1, arg2, arg3);
+		break;
+	case PLAT_MTK_DFD_READ_MAGIC:
+		/* only allow to access DFD register base + 0x200 */
+		if (arg1 <= 0x200) {
+			ret = mmio_read_32(MISC1_CFG_BASE + arg1);
+		}
+		break;
+	case PLAT_MTK_DFD_WRITE_MAGIC:
+		/* only allow to access DFD register base + 0x200 */
+		if (arg1 <= 0x200) {
+			sync_writel(MISC1_CFG_BASE + arg1, arg2);
+		}
+		break;
+	default:
+		ret = MTK_SIP_E_INVALID_PARAM;
+		break;
+	}
+
+	return ret;
+}
diff --git a/plat/mediatek/mt8186/drivers/dfd/plat_dfd.h b/plat/mediatek/mt8186/drivers/dfd/plat_dfd.h
new file mode 100644
index 0000000..1901ec9
--- /dev/null
+++ b/plat/mediatek/mt8186/drivers/dfd/plat_dfd.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2022, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_DFD_H
+#define PLAT_DFD_H
+
+#include <arch_helpers.h>
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#define sync_writel(addr, val)	do { mmio_write_32((addr), (val)); \
+				dsbsy(); \
+				} while (0)
+
+#define PLAT_MTK_DFD_SETUP_MAGIC		(0x99716150)
+#define PLAT_MTK_DFD_READ_MAGIC			(0x99716151)
+#define PLAT_MTK_DFD_WRITE_MAGIC		(0x99716152)
+
+#define MCU_BIU_BASE				(MCUCFG_BASE)
+#define MISC1_CFG_BASE				(MCU_BIU_BASE + 0xA040)
+
+#define DFD_INTERNAL_CTL			(MISC1_CFG_BASE + 0x00)
+#define DFD_INTERNAL_PWR_ON			(MISC1_CFG_BASE + 0x08)
+#define DFD_CHAIN_LENGTH0			(MISC1_CFG_BASE + 0x0C)
+#define DFD_INTERNAL_SHIFT_CLK_RATIO		(MISC1_CFG_BASE + 0x10)
+#define DFD_INTERNAL_TEST_SO_0			(MISC1_CFG_BASE + 0x28)
+#define DFD_INTERNAL_NUM_OF_TEST_SO_GROUP	(MISC1_CFG_BASE + 0x30)
+#define DFD_V30_CTL				(MISC1_CFG_BASE + 0x48)
+#define DFD_V30_BASE_ADDR			(MISC1_CFG_BASE + 0x4C)
+#define DFD_TEST_SI_0				(MISC1_CFG_BASE + 0x58)
+#define DFD_TEST_SI_1				(MISC1_CFG_BASE + 0x5C)
+#define DFD_HW_TRIGGER_MASK			(MISC1_CFG_BASE + 0xBC)
+
+#define DFD_V35_ENALBE				(MCU_BIU_BASE + 0xA0A8)
+#define DFD_V35_TAP_NUMBER			(MCU_BIU_BASE + 0xA0AC)
+#define DFD_V35_TAP_EN				(MCU_BIU_BASE + 0xA0B0)
+#define DFD_V35_SEQ0_0				(MCU_BIU_BASE + 0xA0C0)
+#define DFD_V35_SEQ0_1				(MCU_BIU_BASE + 0xA0C4)
+
+#define DFD_CACHE_DUMP_ENABLE			(1U)
+#define DFD_PARITY_ERR_TRIGGER			(2U)
+
+#define MCUSYS_DFD_MAP				(0x10001390)
+#define WDT_DEBUG_CTL				(0x10007048)
+
+#define WDT_DEBUG_CTL_VAL_0			(0x950603A0)
+#define DFD_INTERNAL_TEST_SO_0_VAL		(0x3B)
+#define DFD_TEST_SI_0_VAL			(0x108)
+#define DFD_TEST_SI_1_VAL			(0x20200000)
+
+#define WDT_DEBUG_CTL_VAL_1			(0x95063E80)
+#define DFD_V35_TAP_NUMBER_VAL			(0xA)
+#define DFD_V35_TAP_EN_VAL			(0x3FF)
+#define DFD_V35_SEQ0_0_VAL			(0x63668820)
+#define DFD_HW_TRIGGER_MASK_VAL			(0xC)
+
+void dfd_resume(void);
+uint64_t dfd_smc_dispatcher(uint64_t arg0, uint64_t arg1,
+			    uint64_t arg2, uint64_t arg3);
+
+#endif /* PLAT_DFD_H */
diff --git a/plat/mediatek/mt8186/include/plat_sip_calls.h b/plat/mediatek/mt8186/include/plat_sip_calls.h
index 598a5b8..9e3726b 100644
--- a/plat/mediatek/mt8186/include/plat_sip_calls.h
+++ b/plat/mediatek/mt8186/include/plat_sip_calls.h
@@ -10,6 +10,10 @@
 /*******************************************************************************
  * Plat SiP function constants
  ******************************************************************************/
-#define MTK_PLAT_SIP_NUM_CALLS    0
+#define MTK_PLAT_SIP_NUM_CALLS		(2)
+
+/* DFD */
+#define MTK_SIP_KERNEL_DFD_AARCH32	(0x82000205)
+#define MTK_SIP_KERNEL_DFD_AARCH64	(0xC2000205)
 
 #endif /* PLAT_SIP_CALLS_H */
diff --git a/plat/mediatek/mt8186/plat_pm.c b/plat/mediatek/mt8186/plat_pm.c
index 6bc6b9d..e125c99 100644
--- a/plat/mediatek/mt8186/plat_pm.c
+++ b/plat/mediatek/mt8186/plat_pm.c
@@ -13,6 +13,7 @@
 #include <mt_gic_v3.h>
 #include <mtspmc.h>
 #include <plat/common/platform.h>
+#include <plat_dfd.h>
 #include <plat_mtk_lpm.h>
 #include <plat_params.h>
 #include <plat_pm.h>
@@ -164,6 +165,8 @@
 	mt_gic_distif_restore();
 	gic_sgi_restore_all();
 
+	dfd_resume();
+
 	(void)plat_mt_pm_invoke(plat_mt_pm->pwr_mcusys_on_finished, cpu, state);
 }
 
diff --git a/plat/mediatek/mt8186/plat_sip_calls.c b/plat/mediatek/mt8186/plat_sip_calls.c
index 87ba786..cb66218 100644
--- a/plat/mediatek/mt8186/plat_sip_calls.c
+++ b/plat/mediatek/mt8186/plat_sip_calls.c
@@ -8,6 +8,7 @@
 #include <common/runtime_svc.h>
 #include <mt_spm_vcorefs.h>
 #include <mtk_sip_svc.h>
+#include <plat_dfd.h>
 #include "plat_sip_calls.h"
 
 uintptr_t mediatek_plat_sip_handler(uint32_t smc_fid,
@@ -27,6 +28,11 @@
 		ret = spm_vcorefs_args(x1, x2, x3, (uint64_t *)&x4);
 		SMC_RET2(handle, ret, x4);
 		break;
+	case MTK_SIP_KERNEL_DFD_AARCH32:
+	case MTK_SIP_KERNEL_DFD_AARCH64:
+		ret = dfd_smc_dispatcher(x1, x2, x3, x4);
+		SMC_RET1(handle, ret);
+		break;
 	default:
 		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
 		break;
diff --git a/plat/mediatek/mt8186/platform.mk b/plat/mediatek/mt8186/platform.mk
index 6108a05..b6d9ca8 100644
--- a/plat/mediatek/mt8186/platform.mk
+++ b/plat/mediatek/mt8186/platform.mk
@@ -15,6 +15,7 @@
 		 -I${MTK_PLAT}/common/lpm/                        \
                  -I${MTK_PLAT_SOC}/drivers/spm/                   \
                  -I${MTK_PLAT_SOC}/drivers/dcm/                   \
+                 -I${MTK_PLAT_SOC}/drivers/dfd/                    \
                  -I${MTK_PLAT_SOC}/drivers/emi_mpu/               \
                  -I${MTK_PLAT_SOC}/drivers/gpio/               \
                  -I${MTK_PLAT_SOC}/drivers/mcdi/                  \
@@ -58,6 +59,7 @@
                 ${MTK_PLAT_SOC}/bl31_plat_setup.c                     \
                 ${MTK_PLAT_SOC}/drivers/dcm/mtk_dcm.c                 \
                 ${MTK_PLAT_SOC}/drivers/dcm/mtk_dcm_utils.c           \
+                ${MTK_PLAT_SOC}/drivers/dfd/plat_dfd.c                \
                 ${MTK_PLAT_SOC}/drivers/emi_mpu/emi_mpu.c             \
                 ${MTK_PLAT_SOC}/drivers/gpio/mtgpio.c                 \
                 ${MTK_PLAT_SOC}/drivers/mcdi/mt_cpu_pm.c              \
diff --git a/plat/st/common/bl2_stm32_io_storage.c b/plat/st/common/bl2_stm32_io_storage.c
index 2d68a50..4391195 100644
--- a/plat/st/common/bl2_stm32_io_storage.c
+++ b/plat/st/common/bl2_stm32_io_storage.c
@@ -379,19 +379,21 @@
 		stm32_sdmmc2_mmc_get_device_size();
 
 #if STM32MP_EMMC_BOOT
-	magic = get_boot_part_ssbl_header();
+	if (mmc_dev_type == MMC_IS_EMMC) {
+		magic = get_boot_part_ssbl_header();
 
-	if (magic == BOOT_API_IMAGE_HEADER_MAGIC_NB) {
-		VERBOSE("%s, header found, jump to emmc load\n", __func__);
-		idx = IMG_IDX_BL33;
-		part = &stm32image_dev_info_spec.part_info[idx];
-		part->part_offset = PLAT_EMMC_BOOT_SSBL_OFFSET;
-		part->bkp_offset = 0U;
-		mmc_device_spec.use_boot_part = true;
+		if (magic == BOOT_API_IMAGE_HEADER_MAGIC_NB) {
+			VERBOSE("%s, header found, jump to emmc load\n", __func__);
+			idx = IMG_IDX_BL33;
+			part = &stm32image_dev_info_spec.part_info[idx];
+			part->part_offset = PLAT_EMMC_BOOT_SSBL_OFFSET;
+			part->bkp_offset = 0U;
+			mmc_device_spec.use_boot_part = true;
 
-		goto emmc_boot;
-	} else {
-		WARN("%s: Can't find STM32 header on a boot partition\n", __func__);
+			goto emmc_boot;
+		} else {
+			WARN("%s: Can't find STM32 header on a boot partition\n", __func__);
+		}
 	}
 #endif
 
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
index c4ea706..28d0b01 100644
--- a/services/std_svc/rmmd/rmmd_main.c
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -325,30 +325,6 @@
 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
 
-static int gtsi_transition_granule(uint64_t pa,
-					unsigned int src_sec_state,
-					unsigned int target_pas)
-{
-	int ret;
-
-	ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas);
-
-	/* Convert TF-A error codes into GTSI error codes */
-	if (ret == -EINVAL) {
-		ERROR("[GTSI] Transition failed: invalid %s\n", "address");
-		ERROR("       PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa,
-		      src_sec_state, target_pas);
-		ret = GRAN_TRANS_RET_BAD_ADDR;
-	} else if (ret == -EPERM) {
-		ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS");
-		ERROR("       PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa,
-		      src_sec_state, target_pas);
-		ret = GRAN_TRANS_RET_BAD_PAS;
-	}
-
-	return ret;
-}
-
 /*******************************************************************************
  * This function handles all SMCs in the range reserved for GTF.
  ******************************************************************************/
@@ -357,6 +333,7 @@
 				void *handle, uint64_t flags)
 {
 	uint32_t src_sec_state;
+	int ret;
 
 	/* Determine which security state this SMC originated from */
 	src_sec_state = caller_sec_state(flags);
@@ -368,13 +345,27 @@
 
 	switch (smc_fid) {
 	case SMC_ASC_MARK_REALM:
-		SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
-								GPT_GPI_REALM));
+		ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
+		break;
 	case SMC_ASC_MARK_NONSECURE:
-		SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
-								GPT_GPI_NS));
+		ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
+		break;
 	default:
 		WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid);
 		SMC_RET1(handle, SMC_UNK);
 	}
+
+	if (ret == -EINVAL) {
+		ERROR("[GTSI] Transition failed: invalid %s\n", "address");
+		ERROR("       PA: 0x%"PRIx64 ", SRC: %d, PAS: %d\n", x1,
+		      SMC_FROM_REALM, smc_fid);
+		ret = GRAN_TRANS_RET_BAD_ADDR;
+	} else if (ret == -EPERM) {
+		ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS");
+		ERROR("       PA: 0x%"PRIx64 ", SRC: %d, PAS: %d\n", x1,
+		      SMC_FROM_REALM, smc_fid);
+		ret = GRAN_TRANS_RET_BAD_PAS;
+	}
+
+	SMC_RET1(handle, ret);
 }