Merge pull request #879 from Summer-ARM/sq/mt-support

ARM platforms: Add support for MT bit in MPIDR
diff --git a/Makefile b/Makefile
index 9f900db..83650a8 100644
--- a/Makefile
+++ b/Makefile
@@ -37,6 +37,11 @@
 # Default goal is build all images
 .DEFAULT_GOAL			:= all
 
+# Avoid any implicit propagation of command line variable definitions to
+# sub-Makefiles, like CFLAGS that we reserved for the firmware images'
+# usage. Other command line options like "-s" are still propagated as usual.
+MAKEOVERRIDES =
+
 MAKE_HELPERS_DIRECTORY := make_helpers/
 include ${MAKE_HELPERS_DIRECTORY}build_macros.mk
 include ${MAKE_HELPERS_DIRECTORY}build_env.mk
@@ -166,6 +171,7 @@
 				common/${ARCH}/debug.S			\
 				lib/${ARCH}/cache_helpers.S		\
 				lib/${ARCH}/misc_helpers.S		\
+				plat/common/${ARCH}/plat_common.c	\
 				plat/common/${ARCH}/platform_helpers.S	\
 				${STDLIB_SRCS}
 
@@ -186,6 +192,7 @@
 				-Iinclude/lib/el3_runtime/${ARCH}	\
 				-Iinclude/lib/pmf			\
 				-Iinclude/lib/psci			\
+				-Iinclude/lib/xlat_tables		\
 				-Iinclude/plat/common			\
 				-Iinclude/services			\
 				${PLAT_INCLUDES}			\
@@ -300,6 +307,11 @@
     endif
 endif
 
+# When building for systems with hardware-assisted coherency, there's no need to
+# use USE_COHERENT_MEM. Require that USE_COHERENT_MEM must be set to 0 too.
+ifeq ($(HW_ASSISTED_COHERENCY)-$(USE_COHERENT_MEM),1-1)
+$(error USE_COHERENT_MEM cannot be enabled with HW_ASSISTED_COHERENCY)
+endif
 
 ################################################################################
 # Process platform overrideable behaviour
@@ -380,6 +392,7 @@
 $(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION))
 $(eval $(call assert_boolean,ERROR_DEPRECATED))
 $(eval $(call assert_boolean,GENERATE_COT))
+$(eval $(call assert_boolean,HW_ASSISTED_COHERENCY))
 $(eval $(call assert_boolean,LOAD_IMAGE_V2))
 $(eval $(call assert_boolean,NS_TIMER_SWITCH))
 $(eval $(call assert_boolean,PL011_GENERIC_UART))
@@ -414,6 +427,7 @@
 $(eval $(call add_define,ENABLE_PSCI_STAT))
 $(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION))
 $(eval $(call add_define,ERROR_DEPRECATED))
+$(eval $(call add_define,HW_ASSISTED_COHERENCY))
 $(eval $(call add_define,LOAD_IMAGE_V2))
 $(eval $(call add_define,LOG_LEVEL))
 $(eval $(call add_define,NS_TIMER_SWITCH))
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index 45ad01e..3f3bedb 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -36,8 +36,8 @@
 				lib/cpus/${ARCH}/cpu_helpers.S		\
 				lib/cpus/errata_report.c		\
 				lib/el3_runtime/${ARCH}/context_mgmt.c	\
-				plat/common/plat_bl1_common.c
-
+				plat/common/plat_bl1_common.c		\
+				plat/common/${ARCH}/platform_up_stack.S
 
 ifeq (${ARCH},aarch64)
 BL1_SOURCES		+=	lib/el3_runtime/aarch64/context.S
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index f823ef4..27673b3 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -31,7 +31,8 @@
 BL2_SOURCES		+=	bl2/bl2_main.c				\
 				bl2/${ARCH}/bl2_entrypoint.S		\
 				bl2/${ARCH}/bl2_arch_setup.c		\
-				lib/locks/exclusive/${ARCH}/spinlock.S
+				lib/locks/exclusive/${ARCH}/spinlock.S	\
+				plat/common/${ARCH}/platform_up_stack.S
 
 ifeq (${ARCH},aarch64)
 BL2_SOURCES		+=	common/aarch64/early_exceptions.S
diff --git a/bl2u/bl2u.mk b/bl2u/bl2u.mk
index aa9de54..ef70bb2 100644
--- a/bl2u/bl2u.mk
+++ b/bl2u/bl2u.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -30,6 +30,7 @@
 
 BL2U_SOURCES		+=	bl2u/bl2u_main.c			\
 				bl2u/aarch64/bl2u_entrypoint.S		\
-				common/aarch64/early_exceptions.S
+				common/aarch64/early_exceptions.S	\
+				plat/common/aarch64/platform_up_stack.S
 
 BL2U_LINKERFILE		:=	bl2u/bl2u.ld.S
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index d14a68d..6238329 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -33,7 +33,7 @@
 #include <el3_common_macros.S>
 #include <pmf_asm_macros.S>
 #include <runtime_instr.h>
-#include <xlat_tables.h>
+#include <xlat_tables_defs.h>
 
 	.globl	bl31_entrypoint
 	.globl	bl31_warm_entrypoint
@@ -180,24 +180,29 @@
 		_init_c_runtime=0				\
 		_exception_vectors=runtime_exceptions
 
-	/* --------------------------------------------
-	 * Enable the MMU with the DCache disabled. It
-	 * is safe to use stacks allocated in normal
-	 * memory as a result. All memory accesses are
-	 * marked nGnRnE when the MMU is disabled. So
-	 * all the stack writes will make it to memory.
-	 * All memory accesses are marked Non-cacheable
-	 * when the MMU is enabled but D$ is disabled.
-	 * So used stack memory is guaranteed to be
-	 * visible immediately after the MMU is enabled
-	 * Enabling the DCache at the same time as the
-	 * MMU can lead to speculatively fetched and
-	 * possibly stale stack memory being read from
-	 * other caches. This can lead to coherency
-	 * issues.
-	 * --------------------------------------------
+	/*
+	 * We're about to enable MMU and participate in PSCI state coordination.
+	 *
+	 * The PSCI implementation invokes platform routines that enable CPUs to
+	 * participate in coherency. On a system where CPUs are not
+	 * cache-coherent out of reset, having caches enabled until such time
+	 * might lead to coherency issues (resulting from stale data getting
+	 * speculatively fetched, among others). Therefore we keep data caches
+	 * disabled while enabling the MMU, thereby forcing data accesses to
+	 * have non-cacheable, nGnRnE attributes (these will always be coherent
+	 * with main memory).
+	 *
+	 * On systems with hardware-assisted coherency, where CPUs are expected
+	 * to be cache-coherent out of reset without needing explicit software
+	 * intervention, PSCI need not invoke platform routines to enter
+	 * coherency (as CPUs already are); and there's no reason to have caches
+	 * disabled either.
 	 */
+#if HW_ASSISTED_COHERENCY
+	mov	x0, #0
+#else
 	mov	x0, #DISABLE_DCACHE
+#endif
 	bl	bl31_plat_enable_mmu
 
 	bl	psci_warmboot_entrypoint
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 4de511b..c89ebc0 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -37,6 +37,7 @@
 				bl31/aarch64/crash_reporting.S			\
 				bl31/bl31_context_mgmt.c			\
 				common/runtime_svc.c				\
+				plat/common/aarch64/platform_mp_stack.S		\
 				services/std_svc/std_svc_setup.c		\
 				${PSCI_LIB_SOURCES}
 
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
index e2ab923..c7f60b5 100644
--- a/bl32/sp_min/aarch32/entrypoint.S
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -36,7 +36,7 @@
 #include <runtime_svc.h>
 #include <smcc_helpers.h>
 #include <smcc_macros.S>
-#include <xlat_tables.h>
+#include <xlat_tables_defs.h>
 
 	.globl	sp_min_vector_table
 	.globl	sp_min_entrypoint
@@ -231,24 +231,27 @@
 		_init_c_runtime=0				\
 		_exception_vectors=sp_min_vector_table
 
-	/* --------------------------------------------
-	 * Enable the MMU with the DCache disabled. It
-	 * is safe to use stacks allocated in normal
-	 * memory as a result. All memory accesses are
-	 * marked nGnRnE when the MMU is disabled. So
-	 * all the stack writes will make it to memory.
-	 * All memory accesses are marked Non-cacheable
-	 * when the MMU is enabled but D$ is disabled.
-	 * So used stack memory is guaranteed to be
-	 * visible immediately after the MMU is enabled
-	 * Enabling the DCache at the same time as the
-	 * MMU can lead to speculatively fetched and
-	 * possibly stale stack memory being read from
-	 * other caches. This can lead to coherency
-	 * issues.
-	 * --------------------------------------------
+	/*
+	 * We're about to enable MMU and participate in PSCI state coordination.
+	 *
+	 * The PSCI implementation invokes platform routines that enable CPUs to
+	 * participate in coherency. On a system where CPUs are not
+	 * cache-coherent out of reset, having caches enabled until such time
+	 * might lead to coherency issues (resulting from stale data getting
+	 * speculatively fetched, among others). Therefore we keep data caches
+	 * disabled while enabling the MMU, thereby forcing data accesses to
+	 * have non-cacheable, nGnRnE attributes (these will always be coherent
+	 * with main memory).
+	 *
+	 * On systems where CPUs are cache-coherent out of reset, however, PSCI
+	 * need not invoke platform routines to enter coherency (as CPUs already
+	 * are), and there's no reason to have caches disabled either.
 	 */
+#if HW_ASSISTED_COHERENCY
+	mov	r0, #0
+#else
 	mov	r0, #DISABLE_DCACHE
+#endif
 	bl	bl32_plat_enable_mmu
 
 	bl	sp_min_warm_boot
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index bdb882a..182f314 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -31,7 +31,7 @@
 #include <arch.h>
 #include <asm_macros.S>
 #include <tsp.h>
-#include <xlat_tables.h>
+#include <xlat_tables_defs.h>
 #include "../tsp_private.h"
 
 
diff --git a/common/bl_common.c b/common/bl_common.c
index 1d66530..58d8e86 100644
--- a/common/bl_common.c
+++ b/common/bl_common.c
@@ -39,7 +39,7 @@
 #include <platform.h>
 #include <string.h>
 #include <utils.h>
-#include <xlat_tables.h>
+#include <xlat_tables_defs.h>
 
 uintptr_t page_align(uintptr_t value, unsigned dir)
 {
diff --git a/docs/cpu-specific-build-macros.md b/docs/cpu-specific-build-macros.md
index a743487..eb23bcd 100644
--- a/docs/cpu-specific-build-macros.md
+++ b/docs/cpu-specific-build-macros.md
@@ -58,13 +58,23 @@
 
 *   `ERRATA_A53_836870`: This applies errata 836870 workaround to Cortex-A53
      CPU. This needs to be enabled only for revision <= r0p3 of the CPU. From
-     r0p4 and onwards, this errata is enabled by default.
+     r0p4 and onwards, this errata is enabled by default in hardware.
+
+*   `ERRATA_A53_855873`: This applies errata 855873 workaround to Cortex-A53
+     CPUs. Though the erratum is present in every revision of the CPU,
+     this workaround is only applied to CPUs from r0p3 onwards, which feature
+     a chicken bit in CPUACTLR_EL1 to enable a hardware workaround.
+     Earlier revisions of the CPU have other errata which require the same
+     workaround in software, so they should be covered anyway.
 
 For Cortex-A57, following errata build flags are defined :
 
 *   `ERRATA_A57_806969`: This applies errata 806969 workaround to Cortex-A57
      CPU. This needs to be enabled only for revision r0p0 of the CPU.
 
+*   `ERRATA_A57_813419`: This applies errata 813419 workaround to Cortex-A57
+     CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
 *   `ERRATA_A57_813420`: This applies errata 813420 workaround to Cortex-A57
      CPU. This needs to be enabled only for revision r0p0 of the CPU.
 
diff --git a/docs/firmware-design.md b/docs/firmware-design.md
index 523fa55..45e56e6 100644
--- a/docs/firmware-design.md
+++ b/docs/firmware-design.md
@@ -1752,7 +1752,7 @@
     `name`: The name of the ToC. This is currently used to validate the header.
     `serial_number`: A non-zero number provided by the creation tool
     `flags`: Flags associated with this data.
-        Bits 0-13: Reserved
+        Bits 0-31: Reserved
         Bits 32-47: Platform defined
         Bits 48-63: Reserved
 
diff --git a/docs/plat/nvidia-tegra.md b/docs/plat/nvidia-tegra.md
index f82085b..b45fec6 100644
--- a/docs/plat/nvidia-tegra.md
+++ b/docs/plat/nvidia-tegra.md
@@ -84,3 +84,10 @@
 parameter to be used during the 'SYSTEM SUSPEND' call. The state-id field
 is implementation defined on Tegra SoCs and is preferably defined by
 tegra_def.h.
+
+Tegra configs
+=============
+
+* 'tegra_enable_l2_ecc_parity_prot': This flag enables the L2 ECC and Parity
+   Protection bit, for ARM Cortex-A57 CPUs, during CPU boot. This flag will
+   be enabled by Tegrs SoCs during 'Cluster power up' or 'System Suspend' exit.
diff --git a/docs/porting-guide.md b/docs/porting-guide.md
index a5e5966..65518ff 100644
--- a/docs/porting-guide.md
+++ b/docs/porting-guide.md
@@ -420,14 +420,23 @@
     TSP's interrupt handling code.
 
 If the platform port uses the translation table library code, the following
-constant must also be defined:
+constants must also be defined:
+
+*   **#define : PLAT_XLAT_TABLES_DYNAMIC**
+
+    Optional flag that can be set per-image to enable the dynamic allocation of
+    regions even when the MMU is enabled. If not defined, only static
+    functionality will be available, if defined and set to 1 it will also
+    include the dynamic functionality.
 
 *   **#define : MAX_XLAT_TABLES**
 
     Defines the maximum number of translation tables that are allocated by the
     translation table library code. To minimize the amount of runtime memory
     used, choose the smallest value needed to map the required virtual addresses
-    for each BL stage.
+    for each BL stage. If `PLAT_XLAT_TABLES_DYNAMIC` flag is enabled for a BL
+    image, `MAX_XLAT_TABLES` must be defined to accommodate the dynamic regions
+    as well.
 
 *   **#define : MAX_MMAP_REGIONS**
 
@@ -438,7 +447,9 @@
     that should be mapped. Then, the translation table library will create the
     corresponding tables and descriptors at runtime. To minimize the amount of
     runtime memory used, choose the smallest value needed to register the
-    required regions for each BL stage.
+    required regions for each BL stage. If `PLAT_XLAT_TABLES_DYNAMIC` flag is
+    enabled for a BL image, `MAX_MMAP_REGIONS` must be defined to accommodate
+    the dynamic regions as well.
 
 *   **#define : ADDR_SPACE_SIZE**
 
diff --git a/docs/psci-lib-integration-guide.md b/docs/psci-lib-integration-guide.md
index f290966..d81b328 100644
--- a/docs/psci-lib-integration-guide.md
+++ b/docs/psci-lib-integration-guide.md
@@ -176,7 +176,9 @@
    * The page tables must be setup and the MMU enabled
    * The C runtime environment must be setup and stack initialized
    * The Data cache must be enabled prior to invoking any of the PSCI library
-     interfaces except for `psci_warmboot_entrypoint()`.
+     interfaces except for `psci_warmboot_entrypoint()`. For
+     `psci_warmboot_entrypoint()`, if the build option `HW_ASSISTED_COHERENCY`
+     is enabled however, data caches are expected to be enabled.
 
 Further requirements for each interface can be found in the interface
 description.
@@ -270,11 +272,11 @@
     Return   : void
 
 This function performs the warm boot initialization/restoration as mandated by
-[PSCI spec]. For AArch32, on wakeup from power down the CPU resets to secure
-SVC mode and the EL3 Runtime Software must perform the prerequisite
-initializations mentioned at top of this section. This function must be called
-with Data cache disabled but with MMU initialized and enabled. The major
-actions performed by this function are:
+[PSCI spec]. For AArch32, on wakeup from power down the CPU resets to secure SVC
+mode and the EL3 Runtime Software must perform the prerequisite initializations
+mentioned at top of this section. This function must be called with Data cache
+disabled (unless build option `HW_ASSISTED_COHERENCY` is enabled) but with MMU
+initialized and enabled. The major actions performed by this function are:
 
   * Invalidates the stack and enables the data cache.
   * Initializes architecture and PSCI state coordination.
diff --git a/docs/user-guide.md b/docs/user-guide.md
index 9a2562c..2770b2c 100644
--- a/docs/user-guide.md
+++ b/docs/user-guide.md
@@ -334,11 +334,20 @@
 *   `HANDLE_EA_EL3_FIRST`: When defined External Aborts and SError Interrupts
     will be always trapped in EL3 i.e. in BL31 at runtime.
 
+*   `HW_ASSISTED_COHERENCY`: On most ARM systems to-date, platform-specific
+    software operations are required for CPUs to enter and exit coherency.
+    However, there exists newer systems where CPUs' entry to and exit from
+    coherency is managed in hardware. Such systems require software to only
+    initiate the operations, and the rest is managed in hardware, minimizing
+    active software management. In such systems, this boolean option enables ARM
+    Trusted Firmware to carry out build and run-time optimizations during boot
+    and power management operations. This option defaults to 0.
+
 *   `LOAD_IMAGE_V2`: Boolean option to enable support for new version (v2) of
     image loading, which provides more flexibility and scalability around what
     images are loaded and executed during boot. Default is 0.
-    Note: `TRUSTED_BOARD_BOOT` is currently not supported when `LOAD_IMAGE_V2`
-    is enabled.
+    Note: `TRUSTED_BOARD_BOOT` is currently only supported for AArch64 when
+    `LOAD_IMAGE_V2` is enabled.
 
 *   `LOG_LEVEL`: Chooses the log level, which controls the amount of console log
     output compiled into the build. This should be one of the following:
diff --git a/drivers/arm/ccn/ccn.c b/drivers/arm/ccn/ccn.c
index d739c6b..ca06182 100644
--- a/drivers/arm/ccn/ccn.c
+++ b/drivers/arm/ccn/ccn.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -38,7 +38,7 @@
 #include "ccn_private.h"
 
 static const ccn_desc_t *ccn_plat_desc;
-#ifdef IMAGE_BL31
+#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
 DEFINE_BAKERY_LOCK(ccn_lock);
 #endif
 
@@ -285,7 +285,7 @@
 	assert(ccn_plat_desc);
 	assert(ccn_plat_desc->periphbase);
 
-#ifdef IMAGE_BL31
+#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
 	bakery_lock_get(&ccn_lock);
 #endif
 	start_region_id = region_id;
@@ -305,7 +305,7 @@
 						   rn_id_map);
 	}
 
-#ifdef IMAGE_BL31
+#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
 	bakery_lock_release(&ccn_lock);
 #endif
 }
diff --git a/drivers/arm/gic/v2/gicv2_main.c b/drivers/arm/gic/v2/gicv2_main.c
index 305a8b0..fcc4b8b 100644
--- a/drivers/arm/gic/v2/gicv2_main.c
+++ b/drivers/arm/gic/v2/gicv2_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -158,6 +158,17 @@
 
 	driver_data = plat_driver_data;
 
+	/*
+	 * The GIC driver data is initialized by the primary CPU with caches
+	 * enabled. When the secondary CPU boots up, it initializes the
+	 * GICC/GICR interface with the caches disabled. Hence flush the
+	 * driver_data to ensure coherency. This is not required if the
+	 * platform has HW_ASSISTED_COHERENCY enabled.
+	 */
+#if !HW_ASSISTED_COHERENCY
+	flush_dcache_range((uintptr_t) &driver_data, sizeof(driver_data));
+	flush_dcache_range((uintptr_t) driver_data, sizeof(*driver_data));
+#endif
 	INFO("ARM GICv2 driver initialized\n");
 }
 
diff --git a/drivers/arm/gic/v3/gicv3_main.c b/drivers/arm/gic/v3/gicv3_main.c
index 5abaa1c..2efab4c 100644
--- a/drivers/arm/gic/v3/gicv3_main.c
+++ b/drivers/arm/gic/v3/gicv3_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -116,6 +116,18 @@
 
 	driver_data = plat_driver_data;
 
+	/*
+	 * The GIC driver data is initialized by the primary CPU with caches
+	 * enabled. When the secondary CPU boots up, it initializes the
+	 * GICC/GICR interface with the caches disabled. Hence flush the
+	 * driver_data to ensure coherency. This is not required if the
+	 * platform has HW_ASSISTED_COHERENCY enabled.
+	 */
+#if !HW_ASSISTED_COHERENCY
+	flush_dcache_range((uintptr_t) &driver_data, sizeof(driver_data));
+	flush_dcache_range((uintptr_t) driver_data, sizeof(*driver_data));
+#endif
+
 	INFO("GICv3 %s legacy support detected."
 			" ARM GICV3 driver initialized in EL3\n",
 			gicv2_compat ? "with" : "without");
diff --git a/drivers/io/io_memmap.c b/drivers/io/io_memmap.c
index 5104fb1..a97df6b 100644
--- a/drivers/io/io_memmap.c
+++ b/drivers/io/io_memmap.c
@@ -125,8 +125,6 @@
 	int result = -ENOMEM;
 	const io_block_spec_t *block_spec = (io_block_spec_t *)spec;
 
-	assert(block_spec->length >= 0);
-
 	/* Since we need to track open state for seek() we only allow one open
 	 * spec at a time. When we have dynamic memory we can malloc and set
 	 * entity->info.
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 14212d5..234ceeb 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -333,6 +333,15 @@
 #define PMCR_N_BITS		(PMCR_N_MASK << PMCR_N_SHIFT)
 
 /*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT		0
+#define TLBI_ADDR_MASK		0xFFFFF000
+#define TLBI_ADDR(x)		(((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
  * Definitions of register offsets and fields in the CNTCTLBase Frame of the
  * system level implementation of the Generic Timer.
  ******************************************************************************/
@@ -379,6 +388,8 @@
 #define TLBIALLIS	p15, 0, c8, c3, 0
 #define TLBIMVA		p15, 0, c8, c7, 1
 #define TLBIMVAA	p15, 0, c8, c7, 3
+#define TLBIMVAAIS	p15, 0, c8, c3, 3
+#define BPIALLIS	p15, 0, c7, c1, 6
 #define HSCTLR		p15, 4, c1, c0, 0
 #define HCR		p15, 4, c1, c1, 0
 #define HCPTR		p15, 4, c1, c1, 2
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index 3a82a7b..a7d33d8 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -131,6 +131,13 @@
 	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
 }
 
+#define _DEFINE_BPIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)		\
+static inline void bpi##_op(void)					\
+{									\
+	u_register_t v = 0;						\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
 #define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)	\
 static inline void tlbi##_op(u_register_t v)				\
 {									\
@@ -145,6 +152,10 @@
 #define DEFINE_TLBIOP_PARAM_FUNC(_op, ...)				\
 	_DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__)
 
+/* Define function for simple BPI operation */
+#define DEFINE_BPIOP_FUNC(_op, ...)					\
+	_DEFINE_BPIOP_FUNC(_op, __VA_ARGS__)
+
 /**********************************************************************
  * Macros to create inline functions for DC operations
  *********************************************************************/
@@ -199,6 +210,7 @@
 DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
 DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
 DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
 DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
 DEFINE_SYSOP_FUNC(isb)
 
@@ -263,6 +275,12 @@
 DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
 DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
 DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
+
+/*
+ * BPI operation prototypes.
+ */
+DEFINE_BPIOP_FUNC(allis, BPIALLIS)
 
 /*
  * DC operation prototypes
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index a854e96..a2c736c 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -31,6 +31,7 @@
 #ifndef __ARCH_H__
 #define __ARCH_H__
 
+#include <utils.h>
 
 /*******************************************************************************
  * MIDR bit definitions
@@ -419,6 +420,15 @@
 #define EC_BITS(x)			(x >> ESR_EC_SHIFT) & ESR_EC_MASK
 
 /*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT		12
+#define TLBI_ADDR_MASK		ULL(0x00000FFFFFFFFFFF)
+#define TLBI_ADDR(x)		(((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
  * Definitions of register offsets and fields in the CNTCTLBase Frame of the
  * system level implementation of the Generic Timer.
  ******************************************************************************/
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index d70c9ae..4f71105 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -116,14 +116,58 @@
 /*******************************************************************************
  * TLB maintenance accessor prototypes
  ******************************************************************************/
+
+#if ERRATA_A57_813419
+/*
+ * Define function for TLBI instruction with type specifier that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(_type)\
+static inline void tlbi ## _type(void)			\
+{							\
+	__asm__("tlbi " #_type "\n"			\
+		"dsb ish\n"				\
+		"tlbi " #_type);			\
+}
+
+/*
+ * Define function for TLBI instruction with register parameter that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(_type)	\
+static inline void tlbi ## _type(uint64_t v)			\
+{								\
+	__asm__("tlbi " #_type ", %0\n"				\
+		"dsb ish\n"					\
+		"tlbi " #_type ", %0" : : "r" (v));		\
+}
+#endif /* ERRATA_A57_813419 */
+
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3is)
+#else
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
+#endif
 DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
 
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vae3is)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vale3is)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
+#endif
+
 /*******************************************************************************
  * Cache maintenance accessor prototypes
  ******************************************************************************/
@@ -181,6 +225,7 @@
 DEFINE_SYSOP_TYPE_FUNC(dmb, st)
 DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
 DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
 DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
 DEFINE_SYSOP_FUNC(isb)
 
diff --git a/include/lib/aarch64/smcc_helpers.h b/include/lib/aarch64/smcc_helpers.h
index 6e63383..7fae7d8 100644
--- a/include/lib/aarch64/smcc_helpers.h
+++ b/include/lib/aarch64/smcc_helpers.h
@@ -56,6 +56,22 @@
 	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3));	\
 	SMC_RET3(_h, (_x0), (_x1), (_x2));			\
 }
+#define SMC_RET5(_h, _x0, _x1, _x2, _x3, _x4)	{		\
+	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X4, (_x4));	\
+	SMC_RET4(_h, (_x0), (_x1), (_x2), (_x3));		\
+}
+#define SMC_RET6(_h, _x0, _x1, _x2, _x3, _x4, _x5)	{	\
+	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X5, (_x5));	\
+	SMC_RET5(_h, (_x0), (_x1), (_x2), (_x3), (_x4));	\
+}
+#define SMC_RET7(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6)	{	\
+	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X6, (_x6));	\
+	SMC_RET6(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5));	\
+}
+#define SMC_RET8(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6, _x7) {	\
+	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X7, (_x7));	\
+	SMC_RET7(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5), (_x6));	\
+}
 
 /*
  * Convenience macros to access general purpose registers using handle provided
diff --git a/include/lib/cpus/aarch64/cortex_a53.h b/include/lib/cpus/aarch64/cortex_a53.h
index 6976b80..484eb63 100644
--- a/include/lib/cpus/aarch64/cortex_a53.h
+++ b/include/lib/cpus/aarch64/cortex_a53.h
@@ -67,6 +67,7 @@
 #define CPUACTLR_EL1			S3_1_C15_C2_0	/* Instruction def. */
 
 #define CPUACTLR_DTAH			(1 << 24)
+#define CPUACTLR_ENDCCASCI		(1 << 44)
 
 /*******************************************************************************
  * L2 Auxiliary Control register specific definitions.
diff --git a/include/lib/cpus/aarch64/cortex_a57.h b/include/lib/cpus/aarch64/cortex_a57.h
index c5a218b..9229a56 100644
--- a/include/lib/cpus/aarch64/cortex_a57.h
+++ b/include/lib/cpus/aarch64/cortex_a57.h
@@ -87,6 +87,8 @@
 #define L2_DATA_RAM_LATENCY_3_CYCLES	0x2
 #define L2_TAG_RAM_LATENCY_3_CYCLES	0x2
 
+#define L2_ECC_PARITY_PROTECTION_BIT	(1 << 21)
+
 /*******************************************************************************
  * L2 Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/stdlib/string.h b/include/lib/stdlib/string.h
index 902d9c1..56677b2 100644
--- a/include/lib/stdlib/string.h
+++ b/include/lib/stdlib/string.h
@@ -52,6 +52,7 @@
 void	*memchr(const void *, int, size_t) __pure;
 int	 memcmp(const void *, const void *, size_t) __pure;
 void	*memcpy(void * __restrict, const void * __restrict, size_t);
+void	*memcpy16(void * __restrict, const void * __restrict, size_t);
 void	*memmove(void *, const void *, size_t);
 void	*memset(void *, int, size_t);
 
diff --git a/include/lib/xlat_tables.h b/include/lib/xlat_tables.h
deleted file mode 100644
index f447618..0000000
--- a/include/lib/xlat_tables.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __XLAT_TABLES_H__
-#define __XLAT_TABLES_H__
-
-/* Miscellaneous MMU related constants */
-#define NUM_2MB_IN_GB		(1 << 9)
-#define NUM_4K_IN_2MB		(1 << 9)
-#define NUM_GB_IN_4GB		(1 << 2)
-
-#define TWO_MB_SHIFT		21
-#define ONE_GB_SHIFT		30
-#define FOUR_KB_SHIFT		12
-
-#define ONE_GB_INDEX(x)		((x) >> ONE_GB_SHIFT)
-#define TWO_MB_INDEX(x)		((x) >> TWO_MB_SHIFT)
-#define FOUR_KB_INDEX(x)	((x) >> FOUR_KB_SHIFT)
-
-#define INVALID_DESC		0x0
-#define BLOCK_DESC		0x1 /* Table levels 0-2 */
-#define TABLE_DESC		0x3 /* Table levels 0-2 */
-#define PAGE_DESC		0x3 /* Table level 3 */
-
-#define FIRST_LEVEL_DESC_N	ONE_GB_SHIFT
-#define SECOND_LEVEL_DESC_N	TWO_MB_SHIFT
-#define THIRD_LEVEL_DESC_N	FOUR_KB_SHIFT
-
-#define XN			(1ull << 2)
-#define PXN			(1ull << 1)
-#define CONT_HINT		(1ull << 0)
-
-#define UPPER_ATTRS(x)		(x & 0x7) << 52
-#define NON_GLOBAL		(1 << 9)
-#define ACCESS_FLAG		(1 << 8)
-#define NSH			(0x0 << 6)
-#define OSH			(0x2 << 6)
-#define ISH			(0x3 << 6)
-
-#define PAGE_SIZE_SHIFT		FOUR_KB_SHIFT
-#define PAGE_SIZE		(1 << PAGE_SIZE_SHIFT)
-#define PAGE_SIZE_MASK		(PAGE_SIZE - 1)
-#define IS_PAGE_ALIGNED(addr)	(((addr) & PAGE_SIZE_MASK) == 0)
-
-#define XLAT_ENTRY_SIZE_SHIFT	3 /* Each MMU table entry is 8 bytes (1 << 3) */
-#define XLAT_ENTRY_SIZE		(1 << XLAT_ENTRY_SIZE_SHIFT)
-
-#define XLAT_TABLE_SIZE_SHIFT	PAGE_SIZE_SHIFT
-#define XLAT_TABLE_SIZE		(1 << XLAT_TABLE_SIZE_SHIFT)
-
-#ifdef AARCH32
-#define XLAT_TABLE_LEVEL_MIN	1
-#else
-#define XLAT_TABLE_LEVEL_MIN	0
-#endif /* AARCH32 */
-
-#define XLAT_TABLE_LEVEL_MAX	3
-
-/* Values for number of entries in each MMU translation table */
-#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
-#define XLAT_TABLE_ENTRIES	(1 << XLAT_TABLE_ENTRIES_SHIFT)
-#define XLAT_TABLE_ENTRIES_MASK	(XLAT_TABLE_ENTRIES - 1)
-
-/* Values to convert a memory address to an index into a translation table */
-#define L3_XLAT_ADDRESS_SHIFT	PAGE_SIZE_SHIFT
-#define L2_XLAT_ADDRESS_SHIFT	(L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
-#define L1_XLAT_ADDRESS_SHIFT	(L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
-#define L0_XLAT_ADDRESS_SHIFT	(L1_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
-#define XLAT_ADDR_SHIFT(level)	(PAGE_SIZE_SHIFT + \
-		  ((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
-
-#define XLAT_BLOCK_SIZE(level)	((u_register_t)1 << XLAT_ADDR_SHIFT(level))
-#define XLAT_BLOCK_MASK(level)	(XLAT_BLOCK_SIZE(level) - 1)
-
-/*
- * AP[1] bit is ignored by hardware and is
- * treated as if it is One in EL2/EL3
- */
-#define AP_RO			(0x1 << 5)
-#define AP_RW			(0x0 << 5)
-
-#define NS				(0x1 << 3)
-#define ATTR_NON_CACHEABLE_INDEX	0x2
-#define ATTR_DEVICE_INDEX		0x1
-#define ATTR_IWBWA_OWBWA_NTR_INDEX	0x0
-#define LOWER_ATTRS(x)			(((x) & 0xfff) << 2)
-#define ATTR_NON_CACHEABLE		(0x44)
-#define ATTR_DEVICE			(0x4)
-#define ATTR_IWBWA_OWBWA_NTR		(0xff)
-#define MAIR_ATTR_SET(attr, index)	(attr << (index << 3))
-
-/*
- * Flags to override default values used to program system registers while
- * enabling the MMU.
- */
-#define DISABLE_DCACHE		(1 << 0)
-
-#ifndef __ASSEMBLY__
-#include <stddef.h>
-#include <stdint.h>
-
-/* Helper macro to define entries for mmap_region_t. It creates
- * identity mappings for each region.
- */
-#define MAP_REGION_FLAT(adr, sz, attr) MAP_REGION(adr, adr, sz, attr)
-
-/* Helper macro to define entries for mmap_region_t. It allows to
- * re-map address mappings from 'pa' to 'va' for each region.
- */
-#define MAP_REGION(pa, va, sz, attr) {(pa), (va), (sz), (attr)}
-
-/*
- * Shifts and masks to access fields of an mmap_attr_t
- */
-#define MT_TYPE_MASK	0x7
-#define MT_TYPE(_attr)	((_attr) & MT_TYPE_MASK)
-/* Access permissions (RO/RW) */
-#define MT_PERM_SHIFT	3
-/* Security state (SECURE/NS) */
-#define MT_SEC_SHIFT	4
-/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
-#define MT_EXECUTE_SHIFT	5
-
-/*
- * Memory mapping attributes
- */
-typedef enum  {
-	/*
-	 * Memory types supported.
-	 * These are organised so that, going down the list, the memory types
-	 * are getting weaker; conversely going up the list the memory types are
-	 * getting stronger.
-	 */
-	MT_DEVICE,
-	MT_NON_CACHEABLE,
-	MT_MEMORY,
-	/* Values up to 7 are reserved to add new memory types in the future */
-
-	MT_RO		= 0 << MT_PERM_SHIFT,
-	MT_RW		= 1 << MT_PERM_SHIFT,
-
-	MT_SECURE	= 0 << MT_SEC_SHIFT,
-	MT_NS		= 1 << MT_SEC_SHIFT,
-
-	/*
-	 * Access permissions for instruction execution are only relevant for
-	 * normal read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored
-	 * (and potentially overridden) otherwise:
-	 *  - Device memory is always marked as execute-never.
-	 *  - Read-write normal memory is always marked as execute-never.
-	 */
-	MT_EXECUTE		= 0 << MT_EXECUTE_SHIFT,
-	MT_EXECUTE_NEVER	= 1 << MT_EXECUTE_SHIFT,
-} mmap_attr_t;
-
-#define MT_CODE		(MT_MEMORY | MT_RO | MT_EXECUTE)
-#define MT_RO_DATA	(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
-
-/*
- * Structure for specifying a single region of memory.
- */
-typedef struct mmap_region {
-	unsigned long long	base_pa;
-	uintptr_t		base_va;
-	size_t			size;
-	mmap_attr_t		attr;
-} mmap_region_t;
-
-/* Generic translation table APIs */
-void init_xlat_tables(void);
-void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
-				size_t size, unsigned int attr);
-void mmap_add(const mmap_region_t *mm);
-
-#ifdef AARCH32
-/* AArch32 specific translation table API */
-void enable_mmu_secure(uint32_t flags);
-#else
-/* AArch64 specific translation table APIs */
-void enable_mmu_el1(unsigned int flags);
-void enable_mmu_el3(unsigned int flags);
-#endif /* AARCH32 */
-
-#endif /*__ASSEMBLY__*/
-#endif /* __XLAT_TABLES_H__ */
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
new file mode 100644
index 0000000..260ef3e
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XLAT_MMU_HELPERS_H__
+#define __XLAT_MMU_HELPERS_H__
+
+#ifdef AARCH32
+/* AArch32 specific translation table API */
+void enable_mmu_secure(uint32_t flags);
+#else
+/* AArch64 specific translation table APIs */
+void enable_mmu_el1(unsigned int flags);
+void enable_mmu_el3(unsigned int flags);
+#endif /* AARCH32 */
+
+#endif /* __XLAT_MMU_HELPERS_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables.h b/include/lib/xlat_tables/xlat_tables.h
new file mode 100644
index 0000000..4e85503
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XLAT_TABLES_H__
+#define __XLAT_TABLES_H__
+
+#include <xlat_tables_defs.h>
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+#include <xlat_mmu_helpers.h>
+
+/* Helper macro to define entries for mmap_region_t. It creates
+ * identity mappings for each region.
+ */
+#define MAP_REGION_FLAT(adr, sz, attr) MAP_REGION(adr, adr, sz, attr)
+
+/* Helper macro to define entries for mmap_region_t. It allows to
+ * re-map address mappings from 'pa' to 'va' for each region.
+ */
+#define MAP_REGION(pa, va, sz, attr) {(pa), (va), (sz), (attr)}
+
+/*
+ * Shifts and masks to access fields of an mmap_attr_t
+ */
+#define MT_TYPE_MASK	0x7
+#define MT_TYPE(_attr)	((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT	3
+/* Security state (SECURE/NS) */
+#define MT_SEC_SHIFT	4
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT	5
+
+/*
+ * Memory mapping attributes
+ */
+typedef enum  {
+	/*
+	 * Memory types supported.
+	 * These are organised so that, going down the list, the memory types
+	 * are getting weaker; conversely going up the list the memory types are
+	 * getting stronger.
+	 */
+	MT_DEVICE,
+	MT_NON_CACHEABLE,
+	MT_MEMORY,
+	/* Values up to 7 are reserved to add new memory types in the future */
+
+	MT_RO		= 0 << MT_PERM_SHIFT,
+	MT_RW		= 1 << MT_PERM_SHIFT,
+
+	MT_SECURE	= 0 << MT_SEC_SHIFT,
+	MT_NS		= 1 << MT_SEC_SHIFT,
+
+	/*
+	 * Access permissions for instruction execution are only relevant for
+	 * normal read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored
+	 * (and potentially overridden) otherwise:
+	 *  - Device memory is always marked as execute-never.
+	 *  - Read-write normal memory is always marked as execute-never.
+	 */
+	MT_EXECUTE		= 0 << MT_EXECUTE_SHIFT,
+	MT_EXECUTE_NEVER	= 1 << MT_EXECUTE_SHIFT,
+} mmap_attr_t;
+
+#define MT_CODE		(MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA	(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+	unsigned long long	base_pa;
+	uintptr_t		base_va;
+	size_t			size;
+	mmap_attr_t		attr;
+} mmap_region_t;
+
+/* Generic translation table APIs */
+void init_xlat_tables(void);
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+				size_t size, unsigned int attr);
+void mmap_add(const mmap_region_t *mm);
+
+#endif /*__ASSEMBLY__*/
+#endif /* __XLAT_TABLES_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
new file mode 100644
index 0000000..1f4ae6d
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XLAT_TABLES_DEFS_H__
+#define __XLAT_TABLES_DEFS_H__
+
+#include <utils.h>
+
+/* Miscellaneous MMU related constants */
+#define NUM_2MB_IN_GB		(1 << 9)
+#define NUM_4K_IN_2MB		(1 << 9)
+#define NUM_GB_IN_4GB		(1 << 2)
+
+#define TWO_MB_SHIFT		21
+#define ONE_GB_SHIFT		30
+#define FOUR_KB_SHIFT		12
+
+#define ONE_GB_INDEX(x)		((x) >> ONE_GB_SHIFT)
+#define TWO_MB_INDEX(x)		((x) >> TWO_MB_SHIFT)
+#define FOUR_KB_INDEX(x)	((x) >> FOUR_KB_SHIFT)
+
+#define INVALID_DESC		0x0
+#define BLOCK_DESC		0x1 /* Table levels 0-2 */
+#define TABLE_DESC		0x3 /* Table levels 0-2 */
+#define PAGE_DESC		0x3 /* Table level 3 */
+#define DESC_MASK		0x3
+
+#define FIRST_LEVEL_DESC_N	ONE_GB_SHIFT
+#define SECOND_LEVEL_DESC_N	TWO_MB_SHIFT
+#define THIRD_LEVEL_DESC_N	FOUR_KB_SHIFT
+
+#define XN			(ULL(1) << 2)
+#define PXN			(ULL(1) << 1)
+#define CONT_HINT		(ULL(1) << 0)
+#define UPPER_ATTRS(x)		(((x) & ULL(0x7)) << 52)
+
+#define NON_GLOBAL		(1 << 9)
+#define ACCESS_FLAG		(1 << 8)
+#define NSH			(0x0 << 6)
+#define OSH			(0x2 << 6)
+#define ISH			(0x3 << 6)
+
+#define TABLE_ADDR_MASK		ULL(0x0000FFFFFFFFF000)
+
+#define PAGE_SIZE_SHIFT		FOUR_KB_SHIFT /* 4, 16 or 64 KB */
+#define PAGE_SIZE		(1 << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK		(PAGE_SIZE - 1)
+#define IS_PAGE_ALIGNED(addr)	(((addr) & PAGE_SIZE_MASK) == 0)
+
+#define XLAT_ENTRY_SIZE_SHIFT	3 /* Each MMU table entry is 8 bytes (1 << 3) */
+#define XLAT_ENTRY_SIZE		(1 << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT	PAGE_SIZE_SHIFT /* Size of one complete table */
+#define XLAT_TABLE_SIZE		(1 << XLAT_TABLE_SIZE_SHIFT)
+
+#ifdef AARCH32
+#define XLAT_TABLE_LEVEL_MIN	1
+#else
+#define XLAT_TABLE_LEVEL_MIN	0
+#endif /* AARCH32 */
+
+#define XLAT_TABLE_LEVEL_MAX	3
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES	(1 << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK	(XLAT_TABLE_ENTRIES - 1)
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT	PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT	(L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT	(L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L0_XLAT_ADDRESS_SHIFT	(L1_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_ADDR_SHIFT(level)	(PAGE_SIZE_SHIFT + \
+		  ((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
+
+#define XLAT_BLOCK_SIZE(level)	((u_register_t)1 << XLAT_ADDR_SHIFT(level))
+/* Mask to get the bits used to index inside a block of a certain level */
+#define XLAT_BLOCK_MASK(level)	(XLAT_BLOCK_SIZE(level) - 1)
+/* Mask to get the address bits common to a block of a certain table level*/
+#define XLAT_ADDR_MASK(level)	(~XLAT_BLOCK_MASK(level))
+
+/*
+ * AP[1] bit is ignored by hardware and is
+ * treated as if it is One in EL2/EL3
+ */
+#define AP_RO				(0x1 << 5)
+#define AP_RW				(0x0 << 5)
+
+#define NS				(0x1 << 3)
+#define ATTR_NON_CACHEABLE_INDEX	0x2
+#define ATTR_DEVICE_INDEX		0x1
+#define ATTR_IWBWA_OWBWA_NTR_INDEX	0x0
+#define LOWER_ATTRS(x)			(((x) & 0xfff) << 2)
+/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
+#define ATTR_NON_CACHEABLE		(0x44)
+/* Device-nGnRE */
+#define ATTR_DEVICE			(0x4)
+/* Normal Memory, Outer Write-Back non-transient, Inner Write-Back non-transient */
+#define ATTR_IWBWA_OWBWA_NTR		(0xff)
+#define MAIR_ATTR_SET(attr, index)	((attr) << ((index) << 3))
+#define ATTR_INDEX_MASK			0x3
+#define ATTR_INDEX_GET(attr)		(((attr) >> 2) & ATTR_INDEX_MASK)
+
+/*
+ * Flags to override default values used to program system registers while
+ * enabling the MMU.
+ */
+#define DISABLE_DCACHE			(1 << 0)
+
+#endif /* __XLAT_TABLES_DEFS_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
new file mode 100644
index 0000000..16b857c
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XLAT_TABLES_V2_H__
+#define __XLAT_TABLES_V2_H__
+
+#include <xlat_tables_defs.h>
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+#include <xlat_mmu_helpers.h>
+
+/* Helper macro to define entries for mmap_region_t. It creates
+ * identity mappings for each region.
+ */
+#define MAP_REGION_FLAT(adr, sz, attr) MAP_REGION(adr, adr, sz, attr)
+
+/* Helper macro to define entries for mmap_region_t. It allows to
+ * re-map address mappings from 'pa' to 'va' for each region.
+ */
+#define MAP_REGION(pa, va, sz, attr) {(pa), (va), (sz), (attr)}
+
+/*
+ * Shifts and masks to access fields of an mmap_attr_t
+ */
+#define MT_TYPE_MASK		0x7
+#define MT_TYPE(_attr)		((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT		3
+/* Security state (SECURE/NS) */
+#define MT_SEC_SHIFT		4
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT	5
+/* All other bits are reserved */
+
+/*
+ * Memory mapping attributes
+ */
+typedef enum  {
+	/*
+	 * Memory types supported.
+	 * These are organised so that, going down the list, the memory types
+	 * are getting weaker; conversely going up the list the memory types are
+	 * getting stronger.
+	 */
+	MT_DEVICE,
+	MT_NON_CACHEABLE,
+	MT_MEMORY,
+	/* Values up to 7 are reserved to add new memory types in the future */
+
+	MT_RO		= 0 << MT_PERM_SHIFT,
+	MT_RW		= 1 << MT_PERM_SHIFT,
+
+	MT_SECURE	= 0 << MT_SEC_SHIFT,
+	MT_NS		= 1 << MT_SEC_SHIFT,
+
+	/*
+	 * Access permissions for instruction execution are only relevant for
+	 * normal read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored
+	 * (and potentially overridden) otherwise:
+	 *  - Device memory is always marked as execute-never.
+	 *  - Read-write normal memory is always marked as execute-never.
+	 */
+	MT_EXECUTE		= 0 << MT_EXECUTE_SHIFT,
+	MT_EXECUTE_NEVER	= 1 << MT_EXECUTE_SHIFT,
+} mmap_attr_t;
+
+#define MT_CODE		(MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA	(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+	unsigned long long	base_pa;
+	uintptr_t		base_va;
+	size_t			size;
+	mmap_attr_t		attr;
+} mmap_region_t;
+
+/* Generic translation table APIs */
+void init_xlat_tables(void);
+
+/*
+ * Add a region with defined base PA and base VA. This type of region can only
+ * be added before initializing the MMU and cannot be removed later.
+ */
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+				size_t size, unsigned int attr);
+
+/*
+ * Add a region with defined base PA and base VA. This type of region can be
+ * added and removed even if the MMU is enabled.
+ *
+ * Returns:
+ *        0: Success.
+ *   EINVAL: Invalid values were used as arguments.
+ *   ERANGE: Memory limits were surpassed.
+ *   ENOMEM: Not enough space in the mmap array or not enough free xlat tables.
+ *    EPERM: It overlaps another region in an invalid way.
+ */
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+				size_t size, unsigned int attr);
+
+/*
+ * Add an array of static regions with defined base PA and base VA. This type
+ * of region can only be added before initializing the MMU and cannot be
+ * removed later.
+ */
+void mmap_add(const mmap_region_t *mm);
+
+/*
+ * Remove a region with the specified base VA and size. Only dynamic regions can
+ * be removed, and they can be removed even if the MMU is enabled.
+ *
+ * Returns:
+ *        0: Success.
+ *   EINVAL: The specified region wasn't found.
+ *    EPERM: Trying to remove a static region.
+ */
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size);
+
+#endif /*__ASSEMBLY__*/
+#endif /* __XLAT_TABLES_V2_H__ */
diff --git a/include/plat/arm/board/common/board_arm_def.h b/include/plat/arm/board/common/board_arm_def.h
index dee868f..f715055 100644
--- a/include/plat/arm/board/common/board_arm_def.h
+++ b/include/plat/arm/board/common/board_arm_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -72,6 +72,9 @@
  * Provide relatively optimised values for the runtime images (BL31 and BL32).
  * Optimisation is less important for the other, transient boot images so a
  * common, maximum value is used across these images.
+ *
+ * They are also used for the dynamically mapped regions in the images that
+ * enable dynamic memory mapping.
  */
 #if defined(IMAGE_BL31) || defined(IMAGE_BL32)
 # define PLAT_ARM_MMAP_ENTRIES		6
diff --git a/include/plat/arm/board/common/v2m_def.h b/include/plat/arm/board/common/v2m_def.h
index 7cee4e8..aaa96f3 100644
--- a/include/plat/arm/board/common/v2m_def.h
+++ b/include/plat/arm/board/common/v2m_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -30,7 +30,7 @@
 #ifndef __V2M_DEF_H__
 #define __V2M_DEF_H__
 
-#include <xlat_tables.h>
+#include <xlat_tables_v2.h>
 
 
 /* V2M motherboard system registers & offsets */
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index 4d26444..8ce718a 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -34,7 +34,7 @@
 #include <common_def.h>
 #include <platform_def.h>
 #include <tbbr_img_def.h>
-#include <xlat_tables.h>
+#include <xlat_tables_defs.h>
 
 
 /******************************************************************************
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
index ccdfd41..b24af78 100644
--- a/include/plat/arm/common/plat_arm.h
+++ b/include/plat/arm/common/plat_arm.h
@@ -35,7 +35,7 @@
 #include <cpu_data.h>
 #include <stdint.h>
 #include <utils.h>
-#include <xlat_tables.h>
+#include <xlat_tables_v2.h>
 
 /*******************************************************************************
  * Forward declarations
@@ -64,7 +64,7 @@
 #endif
 );
 
-#ifdef IMAGE_BL31
+#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
 /*
  * Use this macro to instantiate lock before it is used in below
  * arm_lock_xxx() macros
@@ -88,7 +88,7 @@
 #define arm_lock_get()
 #define arm_lock_release()
 
-#endif /* IMAGE_BL31 */
+#endif /* defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) */
 
 #if ARM_RECOM_STATE_ID_ENC
 /*
diff --git a/include/plat/arm/soc/common/soc_css_def.h b/include/plat/arm/soc/common/soc_css_def.h
index 316f8f9..e83144e 100644
--- a/include/plat/arm/soc/common/soc_css_def.h
+++ b/include/plat/arm/soc/common/soc_css_def.h
@@ -32,7 +32,6 @@
 #define __SOC_CSS_DEF_H__
 
 #include <common_def.h>
-#include <xlat_tables.h>
 
 
 /*
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index 1dd8a86..a36666b 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -129,6 +129,39 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_disable_non_temporal_hint
 
+	/* --------------------------------------------------
+	 * Errata Workaround for Cortex A53 Errata #855873.
+	 *
+	 * This applies only to revisions >= r0p3 of Cortex A53.
+	 * Earlier revisions of the core are affected as well, but don't
+	 * have the chicken bit in the CPUACTLR register. It is expected that
+	 * the rich OS takes care of that, especially as the workaround is
+	 * shared with other erratas in those revisions of the CPU.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * --------------------------------------------------
+	 */
+func errata_a53_855873_wa
+	/*
+	 * Compare x0 against revision r0p3 and higher
+	 */
+        mov     x17, x30
+        bl      check_errata_855873
+        cbz     x0, 1f
+
+	mrs	x1, CPUACTLR_EL1
+	orr	x1, x1, #CPUACTLR_ENDCCASCI
+	msr	CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a53_855873_wa
+
+func check_errata_855873
+	mov	x1, #0x03
+	b	cpu_rev_var_hs
+endfunc check_errata_855873
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A53.
 	 * Shall clobber: x0-x19
@@ -150,6 +183,11 @@
 	bl	a53_disable_non_temporal_hint
 #endif
 
+#if ERRATA_A53_855873
+	mov	x0, x18
+	bl	errata_a53_855873_wa
+#endif
+
 	/* ---------------------------------------------
 	 * Enable the SMP bit.
 	 * ---------------------------------------------
@@ -238,6 +276,7 @@
 	 */
 	report_errata ERRATA_A53_826319, cortex_a53, 826319
 	report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
+	report_errata ERRATA_A53_855873, cortex_a53, 855873
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -255,13 +294,15 @@
 	 */
 .section .rodata.cortex_a53_regs, "aS"
 cortex_a53_regs:  /* The ascii list of register names to be reported */
-	.asciz	"cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", ""
+	.asciz	"cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", \
+		"cpuactlr_el1", ""
 
 func cortex_a53_cpu_reg_dump
 	adr	x6, cortex_a53_regs
 	mrs	x8, CPUECTLR_EL1
 	mrs	x9, CPUMERRSR_EL1
 	mrs	x10, L2MERRSR_EL1
+	mrs	x11, CPUACTLR_EL1
 	ret
 endfunc cortex_a53_cpu_reg_dump
 
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index ffdc930..a29e849 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -115,6 +115,21 @@
 endfunc check_errata_806969
 
 	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #813419.
+	 * This applies only to revision r0p0 of Cortex A57.
+	 * ---------------------------------------------------
+	 */
+func check_errata_813419
+	/*
+	 * Even though this is only needed for revision r0p0, it
+	 * is always applied due to limitations of the current
+	 * errata framework.
+	 */
+	mov	x0, #ERRATA_APPLIES
+	ret
+endfunc check_errata_813419
+
+	/* ---------------------------------------------------
 	 * Errata Workaround for Cortex A57 Errata #813420.
 	 * This applies only to revision r0p0 of Cortex A57.
 	 * Inputs:
@@ -482,6 +497,7 @@
 	 * checking functions of each errata.
 	 */
 	report_errata ERRATA_A57_806969, cortex_a57, 806969
+	report_errata ERRATA_A57_813419, cortex_a57, 813419
 	report_errata ERRATA_A57_813420, cortex_a57, 813420
 	report_errata A57_DISABLE_NON_TEMPORAL_HINT, cortex_a57, \
 		disable_ldnp_overread
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 6a3669d..47cb6a2 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -234,6 +234,20 @@
 	ret
 endfunc cpu_rev_var_ls
 
+/*
+ * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
+ * application purposes. If the revision-variant is higher than or same as a
+ * given value, indicates that errata applies; otherwise not.
+ */
+	.globl	cpu_rev_var_hs
+func cpu_rev_var_hs
+	mov	x2, #ERRATA_APPLIES
+	mov	x3, #ERRATA_NOT_APPLIES
+	cmp	x0, x1
+	csel	x0, x2, x3, hs
+	ret
+endfunc cpu_rev_var_hs
+
 #if REPORT_ERRATA
 /*
  * void print_errata_status(void);
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index 3e238a1..fcfae8f 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -59,7 +59,6 @@
 	mov	x1, #1
 	lsl	x1, x1, x0
 	msr	s3_0_c15_c0_2, x1
-	isb
 	ret
 endfunc denver_enable_dco
 
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 0659bff..132ab6f 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -63,13 +63,23 @@
 
 # Flag to apply erratum 836870 workaround during reset. This erratum applies
 # only to revision <= r0p3 of the Cortex A53 cpu. From r0p4 and onwards, this
-# erratum workaround is enabled by default.
+# erratum workaround is enabled by default in hardware.
 ERRATA_A53_836870	?=0
 
+# Flag to apply errata 855873 during reset. This errata applies to all
+# revisions of the Cortex A53 CPU, but this firmware workaround only works
+# for revisions r0p3 and higher. Earlier revisions are taken care
+# of by the rich OS.
+ERRATA_A53_855873	?=0
+
 # Flag to apply erratum 806969 workaround during reset. This erratum applies
 # only to revision r0p0 of the Cortex A57 cpu.
 ERRATA_A57_806969	?=0
 
+# Flag to apply erratum 813419 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+ERRATA_A57_813419	?=0
+
 # Flag to apply erratum 813420  workaround during reset. This erratum applies
 # only to revision r0p0 of the Cortex A57 cpu.
 ERRATA_A57_813420	?=0
@@ -102,10 +112,18 @@
 $(eval $(call assert_boolean,ERRATA_A53_836870))
 $(eval $(call add_define,ERRATA_A53_836870))
 
+# Process ERRATA_A53_855873 flag
+$(eval $(call assert_boolean,ERRATA_A53_855873))
+$(eval $(call add_define,ERRATA_A53_855873))
+
 # Process ERRATA_A57_806969 flag
 $(eval $(call assert_boolean,ERRATA_A57_806969))
 $(eval $(call add_define,ERRATA_A57_806969))
 
+# Process ERRATA_A57_813419 flag
+$(eval $(call assert_boolean,ERRATA_A57_813419))
+$(eval $(call add_define,ERRATA_A57_813419))
+
 # Process ERRATA_A57_813420 flag
 $(eval $(call assert_boolean,ERRATA_A57_813420))
 $(eval $(call add_define,ERRATA_A57_813420))
diff --git a/lib/locks/bakery/bakery_lock_normal.c b/lib/locks/bakery/bakery_lock_normal.c
index 5a2fb07..a3a6c00 100644
--- a/lib/locks/bakery/bakery_lock_normal.c
+++ b/lib/locks/bakery/bakery_lock_normal.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -178,8 +178,11 @@
 	unsigned int their_bakery_data;
 
 	me = plat_my_core_pos();
-
+#ifdef AARCH32
+	is_cached = read_sctlr() & SCTLR_C_BIT;
+#else
 	is_cached = read_sctlr_el3() & SCTLR_C_BIT;
+#endif
 
 	/* Get a ticket */
 	my_ticket = bakery_get_ticket(lock, me, is_cached);
@@ -231,7 +234,11 @@
 void bakery_lock_release(bakery_lock_t *lock)
 {
 	bakery_info_t *my_bakery_info;
+#ifdef AARCH32
+	unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
+#else
 	unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;
+#endif
 
 	my_bakery_info = get_bakery_info(plat_my_core_pos(), lock);
 
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 9fdce49..1be37c0 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -79,7 +79,8 @@
 #endif
 ;
 
-DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+/* Lock for PSCI state coordination */
+DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
 
 cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
 
@@ -247,6 +248,50 @@
 	return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
 }
 
+/*
+ * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
+ * memory.
+ *
+ * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
+ * it's accessed by both cached and non-cached participants. To serve the common
+ * minimum, perform a cache flush before read and after write so that non-cached
+ * participants operate on latest data in main memory.
+ *
+ * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
+ * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
+ * In both cases, no cache operations are required.
+ */
+
+/*
+ * Retrieve local state of non-CPU power domain node from a non-cached CPU,
+ * after any required cache maintenance operation.
+ */
+static plat_local_state_t get_non_cpu_pd_node_local_state(
+		unsigned int parent_idx)
+{
+#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+	flush_dcache_range(
+			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+	return psci_non_cpu_pd_nodes[parent_idx].local_state;
+}
+
+/*
+ * Update local state of non-CPU power domain node from a cached CPU; perform
+ * any required cache maintenance operation afterwards.
+ */
+static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
+		plat_local_state_t state)
+{
+	psci_non_cpu_pd_nodes[parent_idx].local_state = state;
+#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+	flush_dcache_range(
+			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+}
+
 /******************************************************************************
  * Helper function to return the current local power state of each power domain
  * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
@@ -264,18 +309,7 @@
 
 	/* Copy the local power state from node to state_info */
 	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
-#if !USE_COHERENT_MEM
-		/*
-		 * If using normal memory for psci_non_cpu_pd_nodes, we need
-		 * to flush before reading the local power state as another
-		 * cpu in the same power domain could have updated it and this
-		 * code runs before caches are enabled.
-		 */
-		flush_dcache_range(
-				(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
-				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
-		pd_state[lvl] =	psci_non_cpu_pd_nodes[parent_idx].local_state;
+		pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
 	}
 
@@ -299,21 +333,16 @@
 	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
 
 	/*
-	 * Need to flush as local_state will be accessed with Data Cache
+	 * Need to flush as local_state might be accessed with Data Cache
 	 * disabled during power on
 	 */
-	flush_cpu_data(psci_svc_cpu_data.local_state);
+	psci_flush_cpu_data(psci_svc_cpu_data.local_state);
 
 	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
 
 	/* Copy the local_state from state_info */
 	for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
-		psci_non_cpu_pd_nodes[parent_idx].local_state =	pd_state[lvl];
-#if !USE_COHERENT_MEM
-		flush_dcache_range(
-				(uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
-				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
+		set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
 	}
 }
@@ -347,13 +376,8 @@
 
 	/* Reset the local_state to RUN for the non cpu power domains. */
 	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
-		psci_non_cpu_pd_nodes[parent_idx].local_state =
-				PSCI_LOCAL_STATE_RUN;
-#if !USE_COHERENT_MEM
-		flush_dcache_range(
-				(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
-				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
+		set_non_cpu_pd_node_local_state(parent_idx,
+				PSCI_LOCAL_STATE_RUN);
 		psci_set_req_local_pwr_state(lvl,
 					     cpu_idx,
 					     PSCI_LOCAL_STATE_RUN);
@@ -364,7 +388,7 @@
 	psci_set_aff_info_state(AFF_STATE_ON);
 
 	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
-	flush_cpu_data(psci_svc_cpu_data);
+	psci_flush_cpu_data(psci_svc_cpu_data);
 }
 
 /******************************************************************************
@@ -969,3 +993,33 @@
 }
 
 #endif
+
+/*******************************************************************************
+ * Initiate power down sequence, by calling power down operations registered for
+ * this CPU.
+ ******************************************************************************/
+void psci_do_pwrdown_sequence(unsigned int power_level)
+{
+#if HW_ASSISTED_COHERENCY
+	/*
+	 * With hardware-assisted coherency, the CPU drivers only initiate the
+	 * power down sequence, without performing cache-maintenance operations
+	 * in software. Data caches and MMU remain enabled both before and after
+	 * this call.
+	 */
+	prepare_cpu_pwr_dwn(power_level);
+#else
+	/*
+	 * Without hardware-assisted coherency, the CPU drivers disable data
+	 * caches and MMU, then perform cache-maintenance operations in
+	 * software.
+	 *
+	 * We ought to call prepare_cpu_pwr_dwn() to initiate power down
+	 * sequence. We currently have data caches and MMU enabled, but the
+	 * function will return with data caches and MMU disabled. We must
+	 * ensure that the stack memory is flushed out to memory before we start
+	 * popping from it again.
+	 */
+	psci_do_pwrdown_cache_maintenance(power_level);
+#endif
+}
diff --git a/lib/psci/psci_off.c b/lib/psci/psci_off.c
index 394aaa3..4ba7865 100644
--- a/lib/psci/psci_off.c
+++ b/lib/psci/psci_off.c
@@ -119,10 +119,9 @@
 #endif
 
 	/*
-	 * Arch. management. Perform the necessary steps to flush all
-	 * cpu caches.
+	 * Arch. management. Initiate power down sequence.
 	 */
-	psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));
+	psci_do_pwrdown_sequence(psci_find_max_off_lvl(&state_info));
 
 #if ENABLE_RUNTIME_INSTRUMENTATION
 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
@@ -154,17 +153,17 @@
 	 */
 	if (rc == PSCI_E_SUCCESS) {
 		/*
-		 * Set the affinity info state to OFF. This writes directly to
-		 * main memory as caches are disabled, so cache maintenance is
+		 * Set the affinity info state to OFF. When caches are disabled,
+		 * this writes directly to main memory, so cache maintenance is
 		 * required to ensure that later cached reads of aff_info_state
-		 * return AFF_STATE_OFF.  A dsbish() ensures ordering of the
+		 * return AFF_STATE_OFF. A dsbish() ensures ordering of the
 		 * update to the affinity info state prior to cache line
 		 * invalidation.
 		 */
-		flush_cpu_data(psci_svc_cpu_data.aff_info_state);
+		psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
 		psci_set_aff_info_state(AFF_STATE_OFF);
-		dsbish();
-		inv_cpu_data(psci_svc_cpu_data.aff_info_state);
+		psci_dsbish();
+		psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
 
 #if ENABLE_RUNTIME_INSTRUMENTATION
 
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
index f4bb797..675ed66 100644
--- a/lib/psci/psci_on.c
+++ b/lib/psci/psci_on.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -165,10 +165,12 @@
 	 */
 	psci_plat_pm_ops->pwr_domain_on_finish(state_info);
 
+#if !HW_ASSISTED_COHERENCY
 	/*
 	 * Arch. management: Enable data cache and manage stack memory
 	 */
 	psci_do_pwrup_cache_maintenance();
+#endif
 
 	/*
 	 * All the platform specific actions for turning this cpu
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
index ca8291e..a27e215 100644
--- a/lib/psci/psci_private.h
+++ b/lib/psci/psci_private.h
@@ -38,17 +38,60 @@
 #include <psci.h>
 #include <spinlock.h>
 
+#if HW_ASSISTED_COHERENCY
+
 /*
- * The following helper macros abstract the interface to the Bakery
- * Lock API.
+ * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
+ * as PSCI participants are cache-coherent, and there's no need for explicit
+ * cache maintenance operations or barriers to coordinate their state.
  */
-#define psci_lock_init(non_cpu_pd_node, idx)			\
-	((non_cpu_pd_node)[(idx)].lock_index = (idx))
+#define psci_flush_dcache_range(addr, size)
+#define psci_flush_cpu_data(member)
+#define psci_inv_cpu_data(member)
+
+#define psci_dsbish()
+
+/*
+ * On systems where participant CPUs are cache-coherent, we can use spinlocks
+ * instead of bakery locks.
+ */
+#define DEFINE_PSCI_LOCK(_name)		spinlock_t _name
+#define DECLARE_PSCI_LOCK(_name)	extern DEFINE_PSCI_LOCK(_name)
+
+#define psci_lock_get(non_cpu_pd_node)				\
+	spin_lock(&psci_locks[(non_cpu_pd_node)->lock_index])
+#define psci_lock_release(non_cpu_pd_node)			\
+	spin_unlock(&psci_locks[(non_cpu_pd_node)->lock_index])
+
+#else
+
+/*
+ * If not all PSCI participants are cache-coherent, perform cache maintenance
+ * and issue barriers wherever required to coordinate state.
+ */
+#define psci_flush_dcache_range(addr, size)	flush_dcache_range(addr, size)
+#define psci_flush_cpu_data(member)		flush_cpu_data(member)
+#define psci_inv_cpu_data(member)		inv_cpu_data(member)
+
+#define psci_dsbish()				dsbish()
+
+/*
+ * Use bakery locks for state coordination as not all PSCI participants are
+ * cache coherent.
+ */
+#define DEFINE_PSCI_LOCK(_name)		DEFINE_BAKERY_LOCK(_name)
+#define DECLARE_PSCI_LOCK(_name)	DECLARE_BAKERY_LOCK(_name)
+
 #define psci_lock_get(non_cpu_pd_node)				\
 	bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
 #define psci_lock_release(non_cpu_pd_node)			\
 	bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
 
+#endif
+
+#define psci_lock_init(non_cpu_pd_node, idx)			\
+	((non_cpu_pd_node)[(idx)].lock_index = (idx))
+
 /*
  * The PSCI capability which are provided by the generic code but does not
  * depend on the platform or spd capabilities.
@@ -166,8 +209,8 @@
 extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
 extern unsigned int psci_caps;
 
-/* One bakery lock is required for each non-cpu power domain */
-DECLARE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+/* One lock is required per non-CPU power domain node */
+DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
 
 /*******************************************************************************
  * SPD's power management hooks registered with PSCI
@@ -204,6 +247,14 @@
 void psci_print_power_domain_map(void);
 unsigned int psci_is_last_on_cpu(void);
 int psci_spd_migrate_info(u_register_t *mpidr);
+void psci_do_pwrdown_sequence(unsigned int power_level);
+
+/*
+ * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
+ * available. Otherwise, this needs post-call stack maintenance, which is
+ * handled in assembly.
+ */
+void prepare_cpu_pwr_dwn(unsigned int power_level);
 
 /* Private exported functions from psci_on.c */
 int psci_cpu_on_start(u_register_t target_cpu,
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index 7327b92..323dc62 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -86,7 +86,7 @@
 		/* Set the power state to OFF state */
 		svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
 
-		flush_dcache_range((uintptr_t)svc_cpu_data,
+		psci_flush_dcache_range((uintptr_t)svc_cpu_data,
 						 sizeof(*svc_cpu_data));
 
 		cm_set_context_by_index(node_idx,
@@ -242,9 +242,9 @@
 
 	/*
 	 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
-	 * during warm boot before data cache is enabled.
+	 * during warm boot, possibly before data cache is enabled.
 	 */
-	flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
+	psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
 					sizeof(psci_plat_pm_ops));
 
 	/* Initialize the psci capability */
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index 302116b..08c8fd6 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -91,10 +91,10 @@
 	psci_set_suspend_pwrlvl(end_pwrlvl);
 
 	/*
-	 * Flush the target power level as it will be accessed on power up with
+	 * Flush the target power level as it might be accessed on power up with
 	 * Data cache disabled.
 	 */
-	flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
+	psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
 
 	/*
 	 * Call the cpu suspend handler registered by the Secure Payload
@@ -121,13 +121,11 @@
 #endif
 
 	/*
-	 * Arch. management. Perform the necessary steps to flush all
-	 * cpu caches. Currently we assume that the power level correspond
-	 * the cache level.
+	 * Arch. management. Initiate power down sequence.
 	 * TODO : Introduce a mechanism to query the cache level to flush
 	 * and the cpu-ops power down to perform from the platform.
 	 */
-	psci_do_pwrdown_cache_maintenance(max_off_lvl);
+	psci_do_pwrdown_sequence(max_off_lvl);
 
 #if ENABLE_RUNTIME_INSTRUMENTATION
 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
@@ -304,12 +302,10 @@
 	 */
 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
 
-	/*
-	 * Arch. management: Enable the data cache, manage stack memory and
-	 * restore the stashed EL3 architectural context from the 'cpu_context'
-	 * structure for this cpu.
-	 */
+#if !HW_ASSISTED_COHERENCY
+	/* Arch. management: Enable the data cache, stack memory maintenance. */
 	psci_do_pwrup_cache_maintenance();
+#endif
 
 	/* Re-init the cntfrq_el0 register */
 	counter_freq = plat_get_syscnt_freq2();
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index a168636..af12b9f 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -208,7 +208,7 @@
 		/* into memory, the TLB invalidation is complete, */	\
 		/* and translation register writes are committed */	\
 		/* before enabling the MMU */				\
-		dsb();							\
+		dsbish();						\
 		isb();							\
 									\
 		sctlr = read_sctlr_el##_el();				\
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
new file mode 100644
index 0000000..7de9030
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <platform_def.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+#include "../xlat_tables_private.h"
+
+#if DEBUG
+static unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+	/* Physical address space size for long descriptor format. */
+	return (1ull << 40) - 1ull;
+}
+#endif /* DEBUG*/
+
+int is_mmu_enabled(void)
+{
+	return (read_sctlr() & SCTLR_M_BIT) != 0;
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+void xlat_arch_tlbi_va(uintptr_t va)
+{
+	/*
+	 * Ensure the translation table write has drained into memory before
+	 * invalidating the TLB entry.
+	 */
+	dsbishst();
+
+	tlbimvaais(TLBI_ADDR(va));
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+	/* Invalidate all entries from branch predictors. */
+	bpiallis();
+
+	/*
+	 * A TLB maintenance instruction can complete at any time after
+	 * it is issued, but is only guaranteed to be complete after the
+	 * execution of DSB by the PE that executed the TLB maintenance
+	 * instruction. After the TLB invalidate instruction is
+	 * complete, no new memory accesses using the invalidated TLB
+	 * entries will be observed by any observer of the system
+	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+	 * "Ordering and completion of TLB maintenance instructions".
+	 */
+	dsbish();
+
+	/*
+	 * The effects of a completed TLB maintenance instruction are
+	 * only guaranteed to be visible on the PE that executed the
+	 * instruction after the execution of an ISB instruction by the
+	 * PE that executed the TLB maintenance instruction.
+	 */
+	isb();
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables_arch(unsigned long long max_pa)
+{
+	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
+	       xlat_arch_get_max_supported_pa());
+}
+
+/*******************************************************************************
+ * Function for enabling the MMU in Secure PL1, assuming that the
+ * page-tables have already been created.
+ ******************************************************************************/
+void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table)
+
+{
+	u_register_t mair0, ttbcr, sctlr;
+	uint64_t ttbr0;
+
+	assert(IS_IN_SECURE());
+	assert((read_sctlr() & SCTLR_M_BIT) == 0);
+
+	/* Invalidate TLBs at the current exception level */
+	tlbiall();
+
+	/* Set attributes in the right indices of the MAIR */
+	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+			ATTR_IWBWA_OWBWA_NTR_INDEX);
+	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
+			ATTR_NON_CACHEABLE_INDEX);
+	write_mair0(mair0);
+
+	/*
+	 * Set TTBCR bits as well. Set TTBR0 table properties as Inner
+	 * & outer WBWA & shareable. Disable TTBR1.
+	 */
+	ttbcr = TTBCR_EAE_BIT |
+		TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
+		TTBCR_RGN0_INNER_WBA |
+		(32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
+	ttbcr |= TTBCR_EPD1_BIT;
+	write_ttbcr(ttbcr);
+
+	/* Set TTBR0 bits as well */
+	ttbr0 = (uint64_t)(uintptr_t) base_table;
+	write64_ttbr0(ttbr0);
+	write64_ttbr1(0);
+
+	/*
+	 * Ensure all translation table writes have drained
+	 * into memory, the TLB invalidation is complete,
+	 * and translation register writes are committed
+	 * before enabling the MMU
+	 */
+	dsb();
+	isb();
+
+	sctlr = read_sctlr();
+	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
+
+	if (flags & DISABLE_DCACHE)
+		sctlr &= ~SCTLR_C_BIT;
+	else
+		sctlr |= SCTLR_C_BIT;
+
+	write_sctlr(sctlr);
+
+	/* Ensure the MMU enable takes effect immediately */
+	isb();
+}
+
+void enable_mmu_arch(unsigned int flags, uint64_t *base_table)
+{
+	enable_mmu_internal_secure(flags, base_table);
+}
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.h
new file mode 100644
index 0000000..070877b
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XLAT_TABLES_ARCH_H__
+#define __XLAT_TABLES_ARCH_H__
+
+#include <arch.h>
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+/*
+ * In AArch32 state, the MMU only supports 4KB page granularity, which means
+ * that the first translation table level is either 1 or 2. Both of them are
+ * allowed to have block and table descriptors. See section G4.5.6 of the
+ * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+
+#define MIN_LVL_BLOCK_DESC 1
+
+/*
+ * Each platform can define the size of the virtual address space, which is
+ * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
+ * the width of said address space. The value of TTBCR.TxSZ must be in the
+ * range 0 to 7 [1], which means that the virtual address space width must be
+ * in the range 32 to 25 bits.
+ *
+ * Here we calculate the initial lookup level from the value of
+ * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
+ * address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
+ * narrower address spaces are not supported. As a result, level 3 cannot be
+ * used as initial lookup level with 4 KB granularity [1].
+ *
+ * For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
+ * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
+ * G4-5 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information:
+ * [1] Section G4.6.5
+ */
+
+#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
+
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
+
+#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
+
+# define XLAT_TABLE_LEVEL_BASE	1
+# define NUM_BASE_LEVEL_ENTRIES	\
+		(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+
+#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
+
+# define XLAT_TABLE_LEVEL_BASE	2
+# define NUM_BASE_LEVEL_ENTRIES	\
+		(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
+
+#else
+
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
+
+#endif
+
+#endif /* __XLAT_TABLES_ARCH_H__ */
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
new file mode 100644
index 0000000..235fa44
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <cassert.h>
+#include <common_def.h>
+#include <platform_def.h>
+#include <sys/types.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+#include "../xlat_tables_private.h"
+
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
+# define IMAGE_EL	3
+#else
+# define IMAGE_EL	1
+#endif
+
+static unsigned long long tcr_ps_bits;
+
+static unsigned long long calc_physical_addr_size_bits(
+					unsigned long long max_addr)
+{
+	/* Physical address can't exceed 48 bits */
+	assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+
+	/* 48 bits address */
+	if (max_addr & ADDR_MASK_44_TO_47)
+		return TCR_PS_BITS_256TB;
+
+	/* 44 bits address */
+	if (max_addr & ADDR_MASK_42_TO_43)
+		return TCR_PS_BITS_16TB;
+
+	/* 42 bits address */
+	if (max_addr & ADDR_MASK_40_TO_41)
+		return TCR_PS_BITS_4TB;
+
+	/* 40 bits address */
+	if (max_addr & ADDR_MASK_36_TO_39)
+		return TCR_PS_BITS_1TB;
+
+	/* 36 bits address */
+	if (max_addr & ADDR_MASK_32_TO_35)
+		return TCR_PS_BITS_64GB;
+
+	return TCR_PS_BITS_4GB;
+}
+
+#if DEBUG
+/* Physical Address ranges supported in the AArch64 Memory Model */
+static const unsigned int pa_range_bits_arr[] = {
+	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
+	PARANGE_0101
+};
+
+unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+	u_register_t pa_range = read_id_aa64mmfr0_el1() &
+						ID_AA64MMFR0_EL1_PARANGE_MASK;
+
+	/* All other values are reserved */
+	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
+
+	return (1ull << pa_range_bits_arr[pa_range]) - 1ull;
+}
+#endif /* DEBUG*/
+
+int is_mmu_enabled(void)
+{
+#if IMAGE_EL == 1
+	assert(IS_IN_EL(1));
+	return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
+#elif IMAGE_EL == 3
+	assert(IS_IN_EL(3));
+	return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
+#endif
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+void xlat_arch_tlbi_va(uintptr_t va)
+{
+	/*
+	 * Ensure the translation table write has drained into memory before
+	 * invalidating the TLB entry.
+	 */
+	dsbishst();
+
+#if IMAGE_EL == 1
+	assert(IS_IN_EL(1));
+	tlbivaae1is(TLBI_ADDR(va));
+#elif IMAGE_EL == 3
+	assert(IS_IN_EL(3));
+	tlbivae3is(TLBI_ADDR(va));
+#endif
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+	/*
+	 * A TLB maintenance instruction can complete at any time after
+	 * it is issued, but is only guaranteed to be complete after the
+	 * execution of DSB by the PE that executed the TLB maintenance
+	 * instruction. After the TLB invalidate instruction is
+	 * complete, no new memory accesses using the invalidated TLB
+	 * entries will be observed by any observer of the system
+	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+	 * "Ordering and completion of TLB maintenance instructions".
+	 */
+	dsbish();
+
+	/*
+	 * The effects of a completed TLB maintenance instruction are
+	 * only guaranteed to be visible on the PE that executed the
+	 * instruction after the execution of an ISB instruction by the
+	 * PE that executed the TLB maintenance instruction.
+	 */
+	isb();
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables_arch(unsigned long long max_pa)
+{
+	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
+	       xlat_arch_get_max_supported_pa());
+
+	/*
+	 * If dynamic allocation of new regions is enabled the code can't make
+	 * assumptions about the max physical address because it could change
+	 * after adding new regions. If this functionality is disabled it is
+	 * safer to restrict the max physical address as much as possible.
+	 */
+#ifdef PLAT_XLAT_TABLES_DYNAMIC
+	tcr_ps_bits = calc_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);
+#else
+	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
+#endif
+}
+
+/*******************************************************************************
+ * Macro generating the code for the function enabling the MMU in the given
+ * exception level, assuming that the pagetables have already been created.
+ *
+ *   _el:		Exception level at which the function will run
+ *   _tcr_extra:	Extra bits to set in the TCR register. This mask will
+ *			be OR'ed with the default TCR value.
+ *   _tlbi_fct:		Function to invalidate the TLBs at the current
+ *			exception level
+ ******************************************************************************/
+#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
+	void enable_mmu_internal_el##_el(unsigned int flags,		\
+					 uint64_t *base_table)		\
+	{								\
+		uint64_t mair, tcr, ttbr;				\
+		uint32_t sctlr;						\
+									\
+		assert(IS_IN_EL(_el));					\
+		assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0);	\
+									\
+		/* Invalidate TLBs at the current exception level */	\
+		_tlbi_fct();						\
+									\
+		/* Set attributes in the right indices of the MAIR */	\
+		mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);	\
+		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,		\
+				ATTR_IWBWA_OWBWA_NTR_INDEX);		\
+		mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,		\
+				ATTR_NON_CACHEABLE_INDEX);		\
+		write_mair_el##_el(mair);				\
+									\
+		/* Set TCR bits as well. */				\
+		/* Inner & outer WBWA & shareable. */			\
+		/* Set T0SZ to (64 - width of virtual address space) */	\
+		tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |	\
+			TCR_RGN_INNER_WBA |				\
+			(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
+		tcr |= _tcr_extra;					\
+		write_tcr_el##_el(tcr);					\
+									\
+		/* Set TTBR bits as well */				\
+		ttbr = (uint64_t) base_table;				\
+		write_ttbr0_el##_el(ttbr);				\
+									\
+		/* Ensure all translation table writes have drained */	\
+		/* into memory, the TLB invalidation is complete, */	\
+		/* and translation register writes are committed */	\
+		/* before enabling the MMU */				\
+		dsbish();						\
+		isb();							\
+									\
+		sctlr = read_sctlr_el##_el();				\
+		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
+									\
+		if (flags & DISABLE_DCACHE)				\
+			sctlr &= ~SCTLR_C_BIT;				\
+		else							\
+			sctlr |= SCTLR_C_BIT;				\
+									\
+		write_sctlr_el##_el(sctlr);				\
+									\
+		/* Ensure the MMU enable takes effect immediately */	\
+		isb();							\
+	}
+
+/* Define EL1 and EL3 variants of the function enabling the MMU */
+#if IMAGE_EL == 1
+DEFINE_ENABLE_MMU_EL(1,
+		(tcr_ps_bits << TCR_EL1_IPS_SHIFT),
+		tlbivmalle1)
+#elif IMAGE_EL == 3
+DEFINE_ENABLE_MMU_EL(3,
+		TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
+		tlbialle3)
+#endif
+
+void enable_mmu_arch(unsigned int flags, uint64_t *base_table)
+{
+#if IMAGE_EL == 1
+	assert(IS_IN_EL(1));
+	enable_mmu_internal_el1(flags, base_table);
+#elif IMAGE_EL == 3
+	assert(IS_IN_EL(3));
+	enable_mmu_internal_el3(flags, base_table);
+#endif
+}
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.h
new file mode 100644
index 0000000..3336b62
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XLAT_TABLES_ARCH_H__
+#define __XLAT_TABLES_ARCH_H__
+
+#include <arch.h>
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+/*
+ * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
+ * granularity. For 4KB granularity, a level 0 table descriptor doesn't support
+ * block translation. For 16KB, the same thing happens to levels 0 and 1. For
+ * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
+ * Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+
+#if PAGE_SIZE == (4*1024) /* 4KB */
+# define MIN_LVL_BLOCK_DESC 1
+#else /* 16KB or 64KB */
+# define MIN_LVL_BLOCK_DESC 2
+#endif
+
+/*
+ * Each platform can define the size of the virtual address space, which is
+ * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
+ * width of said address space. The value of TCR.TxSZ must be in the range 16
+ * to 39 [1], which means that the virtual address space width must be in the
+ * range 48 to 25 bits.
+ *
+ * Here we calculate the initial lookup level from the value of
+ * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
+ * address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
+ * from 30 to 25. Wider or narrower address spaces are not supported. As a
+ * result, level 3 cannot be used as initial lookup level with 4 KB
+ * granularity. [2]
+ *
+ * For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
+ * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
+ * D4-11 in the ARM ARM, the initial lookup level for an address space like
+ * that is 1.
+ *
+ * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information:
+ * [1] Page 1730: 'Input address size', 'For all translation stages'.
+ * [2] Section D4.2.5
+ */
+
+#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
+
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
+
+#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
+
+# define XLAT_TABLE_LEVEL_BASE	0
+# define NUM_BASE_LEVEL_ENTRIES	\
+		(PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
+
+#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
+
+# define XLAT_TABLE_LEVEL_BASE	1
+# define NUM_BASE_LEVEL_ENTRIES	\
+		(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+
+#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
+
+# define XLAT_TABLE_LEVEL_BASE	2
+# define NUM_BASE_LEVEL_ENTRIES	\
+		(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
+
+#else
+
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
+
+#endif
+
+#endif /* __XLAT_TABLES_ARCH_H__ */
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
new file mode 100644
index 0000000..3d4b2a0
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+XLAT_TABLES_LIB_SRCS	:=	$(addprefix lib/xlat_tables_v2/,	\
+				${ARCH}/xlat_tables_arch.c		\
+				xlat_tables_common.c			\
+				xlat_tables_internal.c)
diff --git a/lib/xlat_tables_v2/xlat_tables_common.c b/lib/xlat_tables_v2/xlat_tables_common.c
new file mode 100644
index 0000000..b4691a2
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_common.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <common_def.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <string.h>
+#include <types.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+#ifdef AARCH32
+# include "aarch32/xlat_tables_arch.h"
+#else
+# include "aarch64/xlat_tables_arch.h"
+#endif
+#include "xlat_tables_private.h"
+
+/*
+ * Private variables used by the TF
+ */
+static mmap_region_t tf_mmap[MAX_MMAP_REGIONS + 1];
+
+static uint64_t tf_xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
+			__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
+
+static uint64_t tf_base_xlat_table[NUM_BASE_LEVEL_ENTRIES]
+		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
+
+static mmap_region_t tf_mmap[MAX_MMAP_REGIONS + 1];
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+static int xlat_tables_mapped_regions[MAX_XLAT_TABLES];
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+xlat_ctx_t tf_xlat_ctx = {
+
+	.pa_max_address = PLAT_PHY_ADDR_SPACE_SIZE - 1,
+	.va_max_address = PLAT_VIRT_ADDR_SPACE_SIZE - 1,
+
+	.mmap = tf_mmap,
+	.mmap_num = MAX_MMAP_REGIONS,
+
+	.tables = tf_xlat_tables,
+	.tables_num = MAX_XLAT_TABLES,
+#if PLAT_XLAT_TABLES_DYNAMIC
+	.tables_mapped_regions = xlat_tables_mapped_regions,
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+	.base_table = tf_base_xlat_table,
+	.base_table_entries = NUM_BASE_LEVEL_ENTRIES,
+
+	.max_pa = 0,
+	.max_va = 0,
+
+	.next_table = 0,
+
+	.base_level = XLAT_TABLE_LEVEL_BASE,
+
+	.initialized = 0
+};
+
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+			size_t size, unsigned int attr)
+{
+	mmap_region_t mm = {
+		.base_va = base_va,
+		.base_pa = base_pa,
+		.size = size,
+		.attr = attr,
+	};
+	mmap_add_region_ctx(&tf_xlat_ctx, (mmap_region_t *)&mm);
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+	while (mm->size) {
+		mmap_add_region_ctx(&tf_xlat_ctx, (mmap_region_t *)mm);
+		mm++;
+	}
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region(unsigned long long base_pa,
+			    uintptr_t base_va, size_t size, unsigned int attr)
+{
+	mmap_region_t mm = {
+		.base_va = base_va,
+		.base_pa = base_pa,
+		.size = size,
+		.attr = attr,
+	};
+	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
+{
+	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, base_va, size);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables(void)
+{
+	assert(!is_mmu_enabled());
+	assert(!tf_xlat_ctx.initialized);
+	print_mmap(tf_xlat_ctx.mmap);
+	init_xlation_table(&tf_xlat_ctx);
+	xlat_tables_print(&tf_xlat_ctx);
+
+	assert(tf_xlat_ctx.max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
+	assert(tf_xlat_ctx.max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
+
+	init_xlat_tables_arch(tf_xlat_ctx.max_pa);
+}
+
+#ifdef AARCH32
+
+void enable_mmu_secure(unsigned int flags)
+{
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table);
+}
+
+#else
+
+void enable_mmu_el1(unsigned int flags)
+{
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table);
+}
+
+void enable_mmu_el3(unsigned int flags)
+{
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table);
+}
+
+#endif /* AARCH32 */
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
new file mode 100644
index 0000000..2f03306
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -0,0 +1,1102 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <common_def.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <string.h>
+#include <types.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+#ifdef AARCH32
+# include "aarch32/xlat_tables_arch.h"
+#else
+# include "aarch64/xlat_tables_arch.h"
+#endif
+#include "xlat_tables_private.h"
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * The following functions assume that they will be called using subtables only.
+ * The base table can't be unmapped, so it is not needed to do any special
+ * handling for it.
+ */
+
+/*
+ * Returns the index of the array corresponding to the specified translation
+ * table.
+ */
+static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
+{
+	for (int i = 0; i < ctx->tables_num; i++)
+		if (ctx->tables[i] == table)
+			return i;
+
+	/*
+	 * Maybe we were asked to get the index of the base level table, which
+	 * should never happen.
+	 */
+	assert(0);
+
+	return -1;
+}
+
+/* Returns a pointer to an empty translation table. */
+static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+{
+	for (int i = 0; i < ctx->tables_num; i++)
+		if (ctx->tables_mapped_regions[i] == 0)
+			return ctx->tables[i];
+
+	return NULL;
+}
+
+/* Increments region count for a given table. */
+static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+{
+	ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
+}
+
+/* Decrements region count for a given table. */
+static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+{
+	ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
+}
+
+/* Returns 0 if the speficied table isn't empty, otherwise 1. */
+static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
+{
+	return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
+}
+
+#else /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/* Returns a pointer to the first empty translation table. */
+static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+{
+	assert(ctx->next_table < ctx->tables_num);
+
+	return ctx->tables[ctx->next_table++];
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/* Returns a block/page table descriptor for the given level and attributes. */
+static uint64_t xlat_desc(unsigned int attr, unsigned long long addr_pa,
+			  int level)
+{
+	uint64_t desc;
+	int mem_type;
+
+	/* Make sure that the granularity is fine enough to map this address. */
+	assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+
+	desc = addr_pa;
+	/*
+	 * There are different translation table descriptors for level 3 and the
+	 * rest.
+	 */
+	desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
+	/*
+	 * Always set the access flag, as TF doesn't manage access flag faults.
+	 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
+	 * memory region attributes.
+	 */
+	desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
+	desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+	desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+	/*
+	 * Deduce shareability domain and executability of the memory region
+	 * from the memory type of the attributes (MT_TYPE).
+	 *
+	 * Data accesses to device memory and non-cacheable normal memory are
+	 * coherent for all observers in the system, and correspondingly are
+	 * always treated as being Outer Shareable. Therefore, for these 2 types
+	 * of memory, it is not strictly needed to set the shareability field
+	 * in the translation tables.
+	 */
+	mem_type = MT_TYPE(attr);
+	if (mem_type == MT_DEVICE) {
+		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+		/*
+		 * Always map device memory as execute-never.
+		 * This is to avoid the possibility of a speculative instruction
+		 * fetch, which could be an issue if this memory region
+		 * corresponds to a read-sensitive peripheral.
+		 */
+		desc |= UPPER_ATTRS(XN);
+	} else { /* Normal memory */
+		/*
+		 * Always map read-write normal memory as execute-never.
+		 * (Trusted Firmware doesn't self-modify its code, therefore
+		 * R/W memory is reserved for data storage, which must not be
+		 * executable.)
+		 * Note that setting the XN bit here is for consistency only.
+		 * The enable_mmu_elx() function sets the SCTLR_EL3.WXN bit,
+		 * which makes any writable memory region to be treated as
+		 * execute-never, regardless of the value of the XN bit in the
+		 * translation table.
+		 *
+		 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
+		 * attribute to figure out the value of the XN bit.
+		 */
+		if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER))
+			desc |= UPPER_ATTRS(XN);
+
+		if (mem_type == MT_MEMORY) {
+			desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+		} else {
+			assert(mem_type == MT_NON_CACHEABLE);
+			desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
+		}
+	}
+
+	return desc;
+}
+
+/*
+ * Enumeration of actions that can be made when mapping table entries depending
+ * on the previous value in that entry and information about the region being
+ * mapped.
+ */
+typedef enum {
+
+	/* Do nothing */
+	ACTION_NONE,
+
+	/* Write a block (or page, if in level 3) entry. */
+	ACTION_WRITE_BLOCK_ENTRY,
+
+	/*
+	 * Create a new table and write a table entry pointing to it. Recurse
+	 * into it for further processing.
+	 */
+	ACTION_CREATE_NEW_TABLE,
+
+	/*
+	 * There is a table descriptor in this entry, read it and recurse into
+	 * that table for further processing.
+	 */
+	ACTION_RECURSE_INTO_TABLE,
+
+} action_t;
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * Recursive function that writes to the translation tables and unmaps the
+ * specified region.
+ */
+static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+				     const uintptr_t table_base_va,
+				     uint64_t *const table_base,
+				     const int table_entries,
+				     const int level)
+{
+	assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+
+	uint64_t *subtable;
+	uint64_t desc;
+
+	uintptr_t table_idx_va;
+	uintptr_t table_idx_end_va; /* End VA of this entry */
+
+	uintptr_t region_end_va = mm->base_va + mm->size - 1;
+
+	int table_idx;
+
+	if (mm->base_va > table_base_va) {
+		/* Find the first index of the table affected by the region. */
+		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+
+		table_idx = (table_idx_va - table_base_va) >>
+			    XLAT_ADDR_SHIFT(level);
+
+		assert(table_idx < table_entries);
+	} else {
+		/* Start from the beginning of the table. */
+		table_idx_va = table_base_va;
+		table_idx = 0;
+	}
+
+	while (table_idx < table_entries) {
+
+		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
+
+		desc = table_base[table_idx];
+		uint64_t desc_type = desc & DESC_MASK;
+
+		action_t action = ACTION_NONE;
+
+		if ((mm->base_va <= table_idx_va) &&
+		    (region_end_va >= table_idx_end_va)) {
+
+			/* Region covers all block */
+
+			if (level == 3) {
+				/*
+				 * Last level, only page descriptors allowed,
+				 * erase it.
+				 */
+				assert(desc_type == PAGE_DESC);
+
+				action = ACTION_WRITE_BLOCK_ENTRY;
+			} else {
+				/*
+				 * Other levels can have table descriptors. If
+				 * so, recurse into it and erase descriptors
+				 * inside it as needed. If there is a block
+				 * descriptor, just erase it. If an invalid
+				 * descriptor is found, this table isn't
+				 * actually mapped, which shouldn't happen.
+				 */
+				if (desc_type == TABLE_DESC) {
+					action = ACTION_RECURSE_INTO_TABLE;
+				} else {
+					assert(desc_type == BLOCK_DESC);
+					action = ACTION_WRITE_BLOCK_ENTRY;
+				}
+			}
+
+		} else if ((mm->base_va <= table_idx_end_va) ||
+			   (region_end_va >= table_idx_va)) {
+
+			/*
+			 * Region partially covers block.
+			 *
+			 * It can't happen in level 3.
+			 *
+			 * There must be a table descriptor here, if not there
+			 * was a problem when mapping the region.
+			 */
+
+			assert(level < 3);
+
+			assert(desc_type == TABLE_DESC);
+
+			action = ACTION_RECURSE_INTO_TABLE;
+		}
+
+		if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+			table_base[table_idx] = INVALID_DESC;
+			xlat_arch_tlbi_va(table_idx_va);
+
+		} else if (action == ACTION_RECURSE_INTO_TABLE) {
+
+			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+
+			/* Recurse to write into subtable */
+			xlat_tables_unmap_region(ctx, mm, table_idx_va,
+						 subtable, XLAT_TABLE_ENTRIES,
+						 level + 1);
+
+			/*
+			 * If the subtable is now empty, remove its reference.
+			 */
+			if (xlat_table_is_empty(ctx, subtable)) {
+				table_base[table_idx] = INVALID_DESC;
+				xlat_arch_tlbi_va(table_idx_va);
+			}
+
+		} else {
+			assert(action == ACTION_NONE);
+		}
+
+		table_idx++;
+		table_idx_va += XLAT_BLOCK_SIZE(level);
+
+		/* If reached the end of the region, exit */
+		if (region_end_va <= table_idx_va)
+			break;
+	}
+
+	if (level > ctx->base_level)
+		xlat_table_dec_regions_count(ctx, table_base);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * From the given arguments, it decides which action to take when mapping the
+ * specified region.
+ */
+static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
+		const int desc_type, const unsigned long long dest_pa,
+		const uintptr_t table_entry_base_va, const int level)
+{
+	uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+	uintptr_t table_entry_end_va =
+			table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
+
+	/*
+	 * The descriptor types allowed depend on the current table level.
+	 */
+
+	if ((mm->base_va <= table_entry_base_va) &&
+	    (mm_end_va >= table_entry_end_va)) {
+
+		/*
+		 * Table entry is covered by region
+		 * --------------------------------
+		 *
+		 * This means that this table entry can describe the whole
+		 * translation with this granularity in principle.
+		 */
+
+		if (level == 3) {
+			/*
+			 * Last level, only page descriptors are allowed.
+			 */
+			if (desc_type == PAGE_DESC) {
+				/*
+				 * There's another region mapped here, don't
+				 * overwrite.
+				 */
+				return ACTION_NONE;
+			} else {
+				assert(desc_type == INVALID_DESC);
+				return ACTION_WRITE_BLOCK_ENTRY;
+			}
+
+		} else {
+
+			/*
+			 * Other levels. Table descriptors are allowed. Block
+			 * descriptors too, but they have some limitations.
+			 */
+
+			if (desc_type == TABLE_DESC) {
+				/* There's already a table, recurse into it. */
+				return ACTION_RECURSE_INTO_TABLE;
+
+			} else if (desc_type == INVALID_DESC) {
+				/*
+				 * There's nothing mapped here, create a new
+				 * entry.
+				 *
+				 * Check if the destination granularity allows
+				 * us to use a block descriptor or we need a
+				 * finer table for it.
+				 *
+				 * Also, check if the current level allows block
+				 * descriptors. If not, create a table instead.
+				 */
+				if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
+				    (level < MIN_LVL_BLOCK_DESC))
+					return ACTION_CREATE_NEW_TABLE;
+				else
+					return ACTION_WRITE_BLOCK_ENTRY;
+
+			} else {
+				/*
+				 * There's another region mapped here, don't
+				 * overwrite.
+				 */
+				assert(desc_type == BLOCK_DESC);
+
+				return ACTION_NONE;
+			}
+		}
+
+	} else if ((mm->base_va <= table_entry_end_va) ||
+		   (mm_end_va >= table_entry_base_va)) {
+
+		/*
+		 * Region partially covers table entry
+		 * -----------------------------------
+		 *
+		 * This means that this table entry can't describe the whole
+		 * translation, a finer table is needed.
+
+		 * There cannot be partial block overlaps in level 3. If that
+		 * happens, some of the preliminary checks when adding the
+		 * mmap region failed to detect that PA and VA must at least be
+		 * aligned to PAGE_SIZE.
+		 */
+		assert(level < 3);
+
+		if (desc_type == INVALID_DESC) {
+			/*
+			 * The block is not fully covered by the region. Create
+			 * a new table, recurse into it and try to map the
+			 * region with finer granularity.
+			 */
+			return ACTION_CREATE_NEW_TABLE;
+
+		} else {
+			assert(desc_type == TABLE_DESC);
+			/*
+			 * The block is not fully covered by the region, but
+			 * there is already a table here. Recurse into it and
+			 * try to map with finer granularity.
+			 *
+			 * PAGE_DESC for level 3 has the same value as
+			 * TABLE_DESC, but this code can't run on a level 3
+			 * table because there can't be overlaps in level 3.
+			 */
+			return ACTION_RECURSE_INTO_TABLE;
+		}
+	}
+
+	/*
+	 * This table entry is outside of the region specified in the arguments,
+	 * don't write anything to it.
+	 */
+	return ACTION_NONE;
+}
+
+/*
+ * Recursive function that writes to the translation tables and maps the
+ * specified region. On success, it returns the VA of the last byte that was
+ * succesfully mapped. On error, it returns the VA of the next entry that
+ * should have been mapped.
+ */
+static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+				   const uintptr_t table_base_va,
+				   uint64_t *const table_base,
+				   const int table_entries,
+				   const int level)
+{
+	assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+
+	uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+
+	uintptr_t table_idx_va;
+	unsigned long long table_idx_pa;
+
+	uint64_t *subtable;
+	uint64_t desc;
+
+	int table_idx;
+
+	if (mm->base_va > table_base_va) {
+		/* Find the first index of the table affected by the region. */
+		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+
+		table_idx = (table_idx_va - table_base_va) >>
+			    XLAT_ADDR_SHIFT(level);
+
+		assert(table_idx < table_entries);
+	} else {
+		/* Start from the beginning of the table. */
+		table_idx_va = table_base_va;
+		table_idx = 0;
+	}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+	if (level > ctx->base_level)
+		xlat_table_inc_regions_count(ctx, table_base);
+#endif
+
+	while (table_idx < table_entries) {
+
+		desc = table_base[table_idx];
+
+		table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
+
+		action_t action = xlat_tables_map_region_action(mm,
+			desc & DESC_MASK, table_idx_pa, table_idx_va, level);
+
+		if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+			table_base[table_idx] =
+				xlat_desc(mm->attr, table_idx_pa, level);
+
+		} else if (action == ACTION_CREATE_NEW_TABLE) {
+
+			subtable = xlat_table_get_empty(ctx);
+			if (subtable == NULL) {
+				/* Not enough free tables to map this region */
+				return table_idx_va;
+			}
+
+			/* Point to new subtable from this one. */
+			table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+
+			/* Recurse to write into subtable */
+			uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+					       subtable, XLAT_TABLE_ENTRIES,
+					       level + 1);
+			if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+				return end_va;
+
+		} else if (action == ACTION_RECURSE_INTO_TABLE) {
+
+			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+			/* Recurse to write into subtable */
+			uintptr_t end_va =  xlat_tables_map_region(ctx, mm, table_idx_va,
+					       subtable, XLAT_TABLE_ENTRIES,
+					       level + 1);
+			if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+				return end_va;
+
+		} else {
+
+			assert(action == ACTION_NONE);
+
+		}
+
+		table_idx++;
+		table_idx_va += XLAT_BLOCK_SIZE(level);
+
+		/* If reached the end of the region, exit */
+		if (mm_end_va <= table_idx_va)
+			break;
+	}
+
+	return table_idx_va - 1;
+}
+
+void print_mmap(mmap_region_t *const mmap)
+{
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+	tf_printf("mmap:\n");
+	mmap_region_t *mm = mmap;
+
+	while (mm->size) {
+		tf_printf(" VA:%p  PA:0x%llx  size:0x%zx  attr:0x%x\n",
+				(void *)mm->base_va, mm->base_pa,
+				mm->size, mm->attr);
+		++mm;
+	};
+	tf_printf("\n");
+#endif
+}
+
+/*
+ * Function that verifies that a region can be mapped.
+ * Returns:
+ *        0: Success, the mapping is allowed.
+ *   EINVAL: Invalid values were used as arguments.
+ *   ERANGE: The memory limits were surpassed.
+ *   ENOMEM: There is not enough memory in the mmap array.
+ *    EPERM: Region overlaps another one in an invalid way.
+ */
+static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
+				 uintptr_t base_va, size_t size,
+				 unsigned int attr)
+{
+	mmap_region_t *mm = ctx->mmap;
+	unsigned long long end_pa = base_pa + size - 1;
+	uintptr_t end_va = base_va + size - 1;
+
+	if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
+			!IS_PAGE_ALIGNED(size))
+		return -EINVAL;
+
+	/* Check for overflows */
+	if ((base_pa > end_pa) || (base_va > end_va))
+		return -ERANGE;
+
+	if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
+		return -ERANGE;
+
+	if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
+		return -ERANGE;
+
+	/* Check that there is space in the mmap array */
+	if (ctx->mmap[ctx->mmap_num - 1].size != 0)
+		return -ENOMEM;
+
+	/* Check for PAs and VAs overlaps with all other regions */
+	for (mm = ctx->mmap; mm->size; ++mm) {
+
+		uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+
+		/*
+		 * Check if one of the regions is completely inside the other
+		 * one.
+		 */
+		int fully_overlapped_va =
+			((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
+			((mm->base_va >= base_va) && (mm_end_va <= end_va));
+
+		/*
+		 * Full VA overlaps are only allowed if both regions are
+		 * identity mapped (zero offset) or have the same VA to PA
+		 * offset. Also, make sure that it's not the exact same area.
+		 * This can only be done with static regions.
+		 */
+		if (fully_overlapped_va) {
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+			if ((attr & MT_DYNAMIC) || (mm->attr & MT_DYNAMIC))
+				return -EPERM;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+			if ((mm->base_va - mm->base_pa) != (base_va - base_pa))
+				return -EPERM;
+
+			if ((base_va == mm->base_va) && (size == mm->size))
+				return -EPERM;
+
+		} else {
+			/*
+			 * If the regions do not have fully overlapping VAs,
+			 * then they must have fully separated VAs and PAs.
+			 * Partial overlaps are not allowed
+			 */
+
+			unsigned long long mm_end_pa =
+						     mm->base_pa + mm->size - 1;
+
+			int separated_pa =
+				(end_pa < mm->base_pa) || (base_pa > mm_end_pa);
+			int separated_va =
+				(end_va < mm->base_va) || (base_va > mm_end_va);
+
+			if (!(separated_va && separated_pa))
+				return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+	mmap_region_t *mm_cursor = ctx->mmap;
+	mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+	unsigned long long end_pa = mm->base_pa + mm->size - 1;
+	uintptr_t end_va = mm->base_va + mm->size - 1;
+	int ret;
+
+	/* Ignore empty regions */
+	if (!mm->size)
+		return;
+
+	/* Static regions must be added before initializing the xlat tables. */
+	assert(!ctx->initialized);
+
+	ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size,
+				    mm->attr);
+	if (ret != 0) {
+		ERROR("mmap_add_region_check() failed. error %d\n", ret);
+		assert(0);
+		return;
+	}
+
+	/*
+	 * Find correct place in mmap to insert new region.
+	 *
+	 * 1 - Lower region VA end first.
+	 * 2 - Smaller region size first.
+	 *
+	 * VA  0                                   0xFF
+	 *
+	 * 1st |------|
+	 * 2nd |------------|
+	 * 3rd                 |------|
+	 * 4th                            |---|
+	 * 5th                                   |---|
+	 * 6th                            |----------|
+	 * 7th |-------------------------------------|
+	 *
+	 * This is required for overlapping regions only. It simplifies adding
+	 * regions with the loop in xlat_tables_init_internal because the outer
+	 * ones won't overwrite block or page descriptors of regions added
+	 * previously.
+	 *
+	 * Overlapping is only allowed for static regions.
+	 */
+
+	while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
+	       && mm_cursor->size)
+		++mm_cursor;
+
+	while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
+	       && (mm_cursor->size < mm->size))
+		++mm_cursor;
+
+	/* Make room for new region by moving other regions up by one place */
+	memmove(mm_cursor + 1, mm_cursor,
+		(uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+	/*
+	 * Check we haven't lost the empty sentinel from the end of the array.
+	 * This shouldn't happen as we have checked in mmap_add_region_check
+	 * that there is free space.
+	 */
+	assert(mm_last->size == 0);
+
+	mm_cursor->base_pa = mm->base_pa;
+	mm_cursor->base_va = mm->base_va;
+	mm_cursor->size = mm->size;
+	mm_cursor->attr = mm->attr;
+
+	if (end_pa > ctx->max_pa)
+		ctx->max_pa = end_pa;
+	if (end_va > ctx->max_va)
+		ctx->max_va = end_va;
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+	mmap_region_t *mm_cursor = ctx->mmap;
+	mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+	unsigned long long end_pa = mm->base_pa + mm->size - 1;
+	uintptr_t end_va = mm->base_va + mm->size - 1;
+	int ret;
+
+	/* Nothing to do */
+	if (!mm->size)
+		return 0;
+
+	ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size, mm->attr | MT_DYNAMIC);
+	if (ret != 0)
+		return ret;
+
+	/*
+	 * Find the adequate entry in the mmap array in the same way done for
+	 * static regions in mmap_add_region_ctx().
+	 */
+
+	while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va && mm_cursor->size)
+		++mm_cursor;
+
+	while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) && (mm_cursor->size < mm->size))
+		++mm_cursor;
+
+	/* Make room for new region by moving other regions up by one place */
+	memmove(mm_cursor + 1, mm_cursor, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+	/*
+	 * Check we haven't lost the empty sentinal from the end of the array.
+	 * This shouldn't happen as we have checked in mmap_add_region_check
+	 * that there is free space.
+	 */
+	assert(mm_last->size == 0);
+
+	mm_cursor->base_pa = mm->base_pa;
+	mm_cursor->base_va = mm->base_va;
+	mm_cursor->size = mm->size;
+	mm_cursor->attr = mm->attr | MT_DYNAMIC;
+
+	/*
+	 * Update the translation tables if the xlat tables are initialized. If
+	 * not, this region will be mapped when they are initialized.
+	 */
+	if (ctx->initialized) {
+		uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor, 0, ctx->base_table,
+				ctx->base_table_entries, ctx->base_level);
+
+		/* Failed to map, remove mmap entry, unmap and return error. */
+		if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
+			memmove(mm_cursor, mm_cursor + 1, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+			/*
+			 * Check if the mapping function actually managed to map
+			 * anything. If not, just return now.
+			 */
+			if (mm_cursor->base_va >= end_va)
+				return -ENOMEM;
+
+			/*
+			 * Something went wrong after mapping some table entries,
+			 * undo every change done up to this point.
+			 */
+			mmap_region_t unmap_mm = {
+					.base_pa = 0,
+					.base_va = mm->base_va,
+					.size = end_va - mm->base_va,
+					.attr = 0
+			};
+			xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
+							ctx->base_table_entries, ctx->base_level);
+
+			return -ENOMEM;
+		}
+
+		/*
+		 * Make sure that all entries are written to the memory. There
+		 * is no need to invalidate entries when mapping dynamic regions
+		 * because new table/block/page descriptors only replace old
+		 * invalid descriptors, that aren't TLB cached.
+		 */
+		dsbishst();
+	}
+
+	if (end_pa > ctx->max_pa)
+		ctx->max_pa = end_pa;
+	if (end_va > ctx->max_va)
+		ctx->max_va = end_va;
+
+	return 0;
+}
+
+/*
+ * Removes the region with given base Virtual Address and size from the given
+ * context.
+ *
+ * Returns:
+ *        0: Success.
+ *   EINVAL: Invalid values were used as arguments (region not found).
+ *    EPERM: Tried to remove a static region.
+ */
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
+				   size_t size)
+{
+	mmap_region_t *mm = ctx->mmap;
+	mmap_region_t *mm_last = mm + ctx->mmap_num;
+	int update_max_va_needed = 0;
+	int update_max_pa_needed = 0;
+
+	/* Check sanity of mmap array. */
+	assert(mm[ctx->mmap_num].size == 0);
+
+	while (mm->size) {
+		if ((mm->base_va == base_va) && (mm->size == size))
+			break;
+		++mm;
+	}
+
+	/* Check that the region was found */
+	if (mm->size == 0)
+		return -EINVAL;
+
+	/* If the region is static it can't be removed */
+	if (!(mm->attr & MT_DYNAMIC))
+		return -EPERM;
+
+	/* Check if this region is using the top VAs or PAs. */
+	if ((mm->base_va + mm->size - 1) == ctx->max_va)
+		update_max_va_needed = 1;
+	if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
+		update_max_pa_needed = 1;
+
+	/* Update the translation tables if needed */
+	if (ctx->initialized) {
+		xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
+					 ctx->base_table_entries,
+					 ctx->base_level);
+		xlat_arch_tlbi_va_sync();
+	}
+
+	/* Remove this region by moving the rest down by one place. */
+	memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
+
+	/* Check if we need to update the max VAs and PAs */
+	if (update_max_va_needed) {
+		ctx->max_va = 0;
+		mm = ctx->mmap;
+		while (mm->size) {
+			if ((mm->base_va + mm->size - 1) > ctx->max_va)
+				ctx->max_va = mm->base_va + mm->size - 1;
+			++mm;
+		}
+	}
+
+	if (update_max_pa_needed) {
+		ctx->max_pa = 0;
+		mm = ctx->mmap;
+		while (mm->size) {
+			if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
+				ctx->max_pa = mm->base_pa + mm->size - 1;
+			++mm;
+		}
+	}
+
+	return 0;
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+
+/* Print the attributes of the specified block descriptor. */
+static void xlat_desc_print(uint64_t desc)
+{
+	int mem_type_index = ATTR_INDEX_GET(desc);
+
+	if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+		tf_printf("MEM");
+	} else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
+		tf_printf("NC");
+	} else {
+		assert(mem_type_index == ATTR_DEVICE_INDEX);
+		tf_printf("DEV");
+	}
+
+	tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
+	tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
+	tf_printf(UPPER_ATTRS(XN) & desc ? "-XN" : "-EXEC");
+}
+
+static const char * const level_spacers[] = {
+	"[LV0] ",
+	"  [LV1] ",
+	"    [LV2] ",
+	"      [LV3] "
+};
+
+static const char *invalid_descriptors_ommited =
+		"%s(%d invalid descriptors omitted)\n";
+
+/*
+ * Recursive function that reads the translation tables passed as an argument
+ * and prints their status.
+ */
+static void xlat_tables_print_internal(const uintptr_t table_base_va,
+		uint64_t *const table_base, const int table_entries,
+		const int level)
+{
+	assert(level <= XLAT_TABLE_LEVEL_MAX);
+
+	uint64_t desc;
+	uintptr_t table_idx_va = table_base_va;
+	int table_idx = 0;
+
+	size_t level_size = XLAT_BLOCK_SIZE(level);
+
+	/*
+	 * Keep track of how many invalid descriptors are counted in a row.
+	 * Whenever multiple invalid descriptors are found, only the first one
+	 * is printed, and a line is added to inform about how many descriptors
+	 * have been omitted.
+	 */
+	int invalid_row_count = 0;
+
+	while (table_idx < table_entries) {
+
+		desc = table_base[table_idx];
+
+		if ((desc & DESC_MASK) == INVALID_DESC) {
+
+			if (invalid_row_count == 0) {
+				tf_printf("%sVA:%p size:0x%zx\n",
+					  level_spacers[level],
+					  (void *)table_idx_va, level_size);
+			}
+			invalid_row_count++;
+
+		} else {
+
+			if (invalid_row_count > 1) {
+				tf_printf(invalid_descriptors_ommited,
+					  level_spacers[level],
+					  invalid_row_count - 1);
+			}
+			invalid_row_count = 0;
+
+			/*
+			 * Check if this is a table or a block. Tables are only
+			 * allowed in levels other than 3, but DESC_PAGE has the
+			 * same value as DESC_TABLE, so we need to check.
+			 */
+			if (((desc & DESC_MASK) == TABLE_DESC) &&
+					(level < XLAT_TABLE_LEVEL_MAX)) {
+				/*
+				 * Do not print any PA for a table descriptor,
+				 * as it doesn't directly map physical memory
+				 * but instead points to the next translation
+				 * table in the translation table walk.
+				 */
+				tf_printf("%sVA:%p size:0x%zx\n",
+					  level_spacers[level],
+					  (void *)table_idx_va, level_size);
+
+				uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
+
+				xlat_tables_print_internal(table_idx_va,
+					(uint64_t *)addr_inner,
+					XLAT_TABLE_ENTRIES, level+1);
+			} else {
+				tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
+					  level_spacers[level],
+					  (void *)table_idx_va,
+					  (unsigned long long)(desc & TABLE_ADDR_MASK),
+					  level_size);
+				xlat_desc_print(desc);
+				tf_printf("\n");
+			}
+		}
+
+		table_idx++;
+		table_idx_va += level_size;
+	}
+
+	if (invalid_row_count > 1) {
+		tf_printf(invalid_descriptors_ommited,
+			  level_spacers[level], invalid_row_count - 1);
+	}
+}
+
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+void xlat_tables_print(xlat_ctx_t *ctx)
+{
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+	xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
+				   ctx->base_level);
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+}
+
+void init_xlation_table(xlat_ctx_t *ctx)
+{
+	mmap_region_t *mm = ctx->mmap;
+
+	/* All tables must be zeroed before mapping any region. */
+
+	for (int i = 0; i < ctx->base_table_entries; i++)
+		ctx->base_table[i] = INVALID_DESC;
+
+	for (int j = 0; j < ctx->tables_num; j++) {
+#if PLAT_XLAT_TABLES_DYNAMIC
+		ctx->tables_mapped_regions[j] = 0;
+#endif
+		for (int i = 0; i < XLAT_TABLE_ENTRIES; i++)
+			ctx->tables[j][i] = INVALID_DESC;
+	}
+
+	while (mm->size) {
+		uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
+				ctx->base_table_entries, ctx->base_level);
+
+		if (end_va != mm->base_va + mm->size - 1) {
+			ERROR("Not enough memory to map region:\n"
+			      " VA:%p  PA:0x%llx  size:0x%zx  attr:0x%x\n",
+			      (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
+			panic();
+		}
+
+		mm++;
+	}
+
+	ctx->initialized = 1;
+}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
new file mode 100644
index 0000000..048c4a8
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XLAT_TABLES_PRIVATE_H__
+#define __XLAT_TABLES_PRIVATE_H__
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <utils.h>
+
+/*
+ * If the platform hasn't defined a physical and a virtual address space size
+ * default to ADDR_SPACE_SIZE.
+ */
+#if ERROR_DEPRECATED
+# ifdef ADDR_SPACE_SIZE
+#  error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
+# endif
+#elif defined(ADDR_SPACE_SIZE)
+# ifndef PLAT_PHY_ADDR_SPACE_SIZE
+#  define PLAT_PHY_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
+# endif
+# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
+#  define PLAT_VIRT_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
+# endif
+#endif
+
+/* The virtual and physical address space sizes must be powers of two. */
+CASSERT(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE),
+	assert_valid_virt_addr_space_size);
+CASSERT(IS_POWER_OF_TWO(PLAT_PHY_ADDR_SPACE_SIZE),
+	assert_valid_phy_addr_space_size);
+
+/* Struct that holds all information about the translation tables. */
+typedef struct {
+
+	/*
+	 * Max allowed Virtual and Physical Addresses.
+	 */
+	unsigned long long pa_max_address;
+	uintptr_t va_max_address;
+
+	/*
+	 * Array of all memory regions stored in order of ascending end address
+	 * and ascending size to simplify the code that allows overlapping
+	 * regions. The list is terminated by the first entry with size == 0.
+	 */
+	mmap_region_t *mmap; /* mmap_num + 1 elements */
+	int mmap_num;
+
+	/*
+	 * Array of finer-grain translation tables.
+	 * For example, if the initial lookup level is 1 then this array would
+	 * contain both level-2 and level-3 entries.
+	 */
+	uint64_t (*tables)[XLAT_TABLE_ENTRIES];
+	int tables_num;
+	/*
+	 * Keep track of how many regions are mapped in each table. The base
+	 * table can't be unmapped so it isn't needed to keep track of it.
+	 */
+#if PLAT_XLAT_TABLES_DYNAMIC
+	int *tables_mapped_regions;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+	int next_table;
+
+	/*
+	 * Base translation table. It doesn't need to have the same amount of
+	 * entries as the ones used for other levels.
+	 */
+	uint64_t *base_table;
+	int base_table_entries;
+
+	unsigned long long max_pa;
+	uintptr_t max_va;
+
+	/* Level of the base translation table. */
+	int base_level;
+
+	/* Set to 1 when the translation tables are initialized. */
+	int initialized;
+
+} xlat_ctx_t;
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Shifts and masks to access fields of an mmap_attr_t
+ */
+/* Dynamic or static */
+#define MT_DYN_SHIFT		30 /* 31 would cause undefined behaviours */
+
+/*
+ * Memory mapping private attributes
+ *
+ * Private attributes not exposed in the mmap_attr_t enum.
+ */
+typedef enum  {
+	/*
+	 * Regions mapped before the MMU can't be unmapped dynamically (they are
+	 * static) and regions mapped with MMU enabled can be unmapped. This
+	 * behaviour can't be overridden.
+	 *
+	 * Static regions can overlap each other, dynamic regions can't.
+	 */
+	MT_STATIC	= 0 << MT_DYN_SHIFT,
+	MT_DYNAMIC	= 1 << MT_DYN_SHIFT
+} mmap_priv_attr_t;
+
+/*
+ * Function used to invalidate all levels of the translation walk for a given
+ * virtual address. It must be called for every translation table entry that is
+ * modified.
+ */
+void xlat_arch_tlbi_va(uintptr_t va);
+
+/*
+ * This function has to be called at the end of any code that uses the function
+ * xlat_arch_tlbi_va().
+ */
+void xlat_arch_tlbi_va_sync(void);
+
+/* Add a dynamic region to the specified context. */
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/* Remove a dynamic region from the specified context. */
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
+			size_t size);
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/* Print VA, PA, size and attributes of all regions in the mmap array. */
+void print_mmap(mmap_region_t *const mmap);
+
+/*
+ * Print the current state of the translation tables by reading them from
+ * memory.
+ */
+void xlat_tables_print(xlat_ctx_t *ctx);
+
+/*
+ * Initialize the translation tables by mapping all regions added to the
+ * specified context.
+ */
+void init_xlation_table(xlat_ctx_t *ctx);
+
+/* Add a static region to the specified context. */
+void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Architecture-specific initialization code.
+ */
+
+/* Execute architecture-specific translation table initialization code. */
+void init_xlat_tables_arch(unsigned long long max_pa);
+
+/* Enable MMU and configure it to use the specified translation tables. */
+void enable_mmu_arch(unsigned int flags, uint64_t *base_table);
+
+/* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */
+int is_mmu_enabled(void);
+
+#endif /* __XLAT_TABLES_PRIVATE_H__ */
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index b47ea46..de506be 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -105,6 +105,10 @@
 # For Chain of Trust
 GENERATE_COT			:= 0
 
+# Whether system coherency is managed in hardware, without explicit software
+# operations.
+HW_ASSISTED_COHERENCY		:= 0
+
 # Flag to enable new version of image loading
 LOAD_IMAGE_V2			:= 0
 
diff --git a/plat/arm/board/juno/platform.mk b/plat/arm/board/juno/platform.mk
index c1cfffc..7571582 100644
--- a/plat/arm/board/juno/platform.mk
+++ b/plat/arm/board/juno/platform.mk
@@ -67,8 +67,10 @@
 				${JUNO_INTERCONNECT_SOURCES}		\
 				${JUNO_SECURITY_SOURCES}
 
-# Enable workarounds for selected Cortex-A57 erratas.
+# Enable workarounds for selected Cortex-A53 and A57 erratas.
+ERRATA_A53_855873		:=	1
 ERRATA_A57_806969		:=	0
+ERRATA_A57_813419		:=	1
 ERRATA_A57_813420		:=	1
 
 # Enable option to skip L1 data cache flush during the Cortex-A57 cluster
diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c
index 81a87c4..8c1fde4 100644
--- a/plat/arm/common/arm_bl1_setup.c
+++ b/plat/arm/common/arm_bl1_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -36,7 +36,7 @@
 #include <plat_arm.h>
 #include <sp805.h>
 #include <utils.h>
-#include <xlat_tables.h>
+#include <xlat_tables_v2.h>
 #include "../../../bl1/bl1_private.h"
 
 /* Weak definitions may be overridden in specific ARM standard platform */
diff --git a/plat/arm/common/arm_common.c b/plat/arm/common/arm_common.c
index c53723d..aade221 100644
--- a/plat/arm/common/arm_common.c
+++ b/plat/arm/common/arm_common.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -34,7 +34,7 @@
 #include <mmio.h>
 #include <plat_arm.h>
 #include <platform_def.h>
-#include <xlat_tables.h>
+#include <xlat_tables_v2.h>
 
 extern const mmap_region_t plat_arm_mmap[];
 
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 204ae4c..891e2fb 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -113,19 +113,18 @@
 PLAT_INCLUDES		+=	-Iinclude/plat/arm/common/aarch64
 endif
 
-PLAT_BL_COMMON_SOURCES	+=	lib/xlat_tables/xlat_tables_common.c		\
-				lib/xlat_tables/${ARCH}/xlat_tables.c		\
+include lib/xlat_tables_v2/xlat_tables.mk
+
+PLAT_BL_COMMON_SOURCES	+=	${XLAT_TABLES_LIB_SRCS}				\
 				plat/arm/common/${ARCH}/arm_helpers.S		\
-				plat/arm/common/arm_common.c			\
-				plat/common/${ARCH}/plat_common.c
+				plat/arm/common/arm_common.c
 
 BL1_SOURCES		+=	drivers/arm/sp805/sp805.c			\
 				drivers/io/io_fip.c				\
 				drivers/io/io_memmap.c				\
 				drivers/io/io_storage.c				\
 				plat/arm/common/arm_bl1_setup.c			\
-				plat/arm/common/arm_io_storage.c		\
-				plat/common/${ARCH}/platform_up_stack.S
+				plat/arm/common/arm_io_storage.c
 ifdef EL3_PAYLOAD_BASE
 # Need the arm_program_trusted_mailbox() function to release secondary CPUs from
 # their holding pen
@@ -136,21 +135,18 @@
 				drivers/io/io_memmap.c				\
 				drivers/io/io_storage.c				\
 				plat/arm/common/arm_bl2_setup.c			\
-				plat/arm/common/arm_io_storage.c		\
-				plat/common/${ARCH}/platform_up_stack.S
+				plat/arm/common/arm_io_storage.c
 ifeq (${LOAD_IMAGE_V2},1)
 BL2_SOURCES		+=	plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c\
 				plat/arm/common/arm_image_load.c		\
 				common/desc_image_load.c
 endif
 
-BL2U_SOURCES		+=	plat/arm/common/arm_bl2u_setup.c		\
-				plat/common/aarch64/platform_up_stack.S
+BL2U_SOURCES		+=	plat/arm/common/arm_bl2u_setup.c
 
 BL31_SOURCES		+=	plat/arm/common/arm_bl31_setup.c		\
 				plat/arm/common/arm_pm.c			\
 				plat/arm/common/arm_topology.c			\
-				plat/common/aarch64/platform_mp_stack.S		\
 				plat/common/plat_psci_common.c
 
 ifeq (${ENABLE_PMF}, 1)
diff --git a/plat/common/aarch32/plat_common.c b/plat/common/aarch32/plat_common.c
index a5b9535..29bafa8 100644
--- a/plat/common/aarch32/plat_common.c
+++ b/plat/common/aarch32/plat_common.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -29,7 +29,7 @@
  */
 
 #include <platform.h>
-#include <xlat_tables.h>
+#include <xlat_mmu_helpers.h>
 
 /*
  * The following platform setup functions are weakly defined. They
diff --git a/plat/common/aarch32/platform_mp_stack.S b/plat/common/aarch32/platform_mp_stack.S
index a015436..0266e83 100644
--- a/plat/common/aarch32/platform_mp_stack.S
+++ b/plat/common/aarch32/platform_mp_stack.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -32,8 +32,8 @@
 #include <asm_macros.S>
 #include <platform_def.h>
 
-	.globl	plat_get_my_stack
-	.globl	plat_set_my_stack
+	.weak	plat_get_my_stack
+	.weak	plat_set_my_stack
 
 	/* -----------------------------------------------------
 	 * uintptr_t plat_get_my_stack (u_register_t mpidr)
diff --git a/plat/common/aarch32/platform_up_stack.S b/plat/common/aarch32/platform_up_stack.S
index 8275aec..e36d063 100644
--- a/plat/common/aarch32/platform_up_stack.S
+++ b/plat/common/aarch32/platform_up_stack.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -32,8 +32,8 @@
 #include <asm_macros.S>
 #include <platform_def.h>
 
-	.globl	plat_get_my_stack
-	.globl	plat_set_my_stack
+	.weak	plat_get_my_stack
+	.weak	plat_set_my_stack
 
 	/* -----------------------------------------------------
 	 * unsigned long plat_get_my_stack ()
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
index 4322341..0cf27ca 100644
--- a/plat/common/aarch64/plat_common.c
+++ b/plat/common/aarch64/plat_common.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -30,7 +30,7 @@
 #include <assert.h>
 #include <console.h>
 #include <platform.h>
-#include <xlat_tables.h>
+#include <xlat_mmu_helpers.h>
 
 /*
  * The following platform setup functions are weakly defined. They
diff --git a/plat/common/aarch64/platform_up_stack.S b/plat/common/aarch64/platform_up_stack.S
index 5b82630..93489e9 100644
--- a/plat/common/aarch64/platform_up_stack.S
+++ b/plat/common/aarch64/platform_up_stack.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -34,10 +34,10 @@
 
 
 	.local	platform_normal_stacks
-	.globl	plat_set_my_stack
-	.globl	plat_get_my_stack
-	.globl	platform_set_stack
-	.globl	platform_get_stack
+	.weak	plat_set_my_stack
+	.weak	plat_get_my_stack
+	.weak	platform_set_stack
+	.weak	platform_get_stack
 
 	/* -----------------------------------------------------
 	 * uintptr_t plat_get_my_stack ()
diff --git a/plat/mediatek/mt6795/platform.mk b/plat/mediatek/mt6795/platform.mk
index e105dfe..9851442 100644
--- a/plat/mediatek/mt6795/platform.mk
+++ b/plat/mediatek/mt6795/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -51,7 +51,6 @@
 				${OEMS_INCLUDES}
 
 PLAT_BL_COMMON_SOURCES	:=	lib/aarch64/xlat_tables.c			\
-				plat/common/aarch64/plat_common.c		\
 				plat/common/plat_gic.c
 
 BL31_SOURCES		+=	drivers/arm/cci/cci.c				\
@@ -63,7 +62,6 @@
 				drivers/console/console.S			\
 				drivers/delay_timer/delay_timer.c		\
 				lib/cpus/aarch64/cortex_a53.S			\
-				plat/common/aarch64/platform_mp_stack.S		\
 				${MTK_PLAT_SOC}/bl31_plat_setup.c		\
 				${MTK_PLAT_SOC}/plat_mt_gic.c			\
 				${MTK_PLAT}/common/mtk_sip_svc.c		\
diff --git a/plat/mediatek/mt8173/platform.mk b/plat/mediatek/mt8173/platform.mk
index e59125a..ad27749 100644
--- a/plat/mediatek/mt8173/platform.mk
+++ b/plat/mediatek/mt8173/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -45,7 +45,6 @@
 
 PLAT_BL_COMMON_SOURCES	:=	lib/xlat_tables/xlat_tables_common.c		\
 				lib/xlat_tables/aarch64/xlat_tables.c		\
-				plat/common/aarch64/plat_common.c		\
 				plat/arm/common/arm_gicv2.c			\
 				plat/common/plat_gicv2.c
 
@@ -60,7 +59,6 @@
 				lib/cpus/aarch64/cortex_a53.S			\
 				lib/cpus/aarch64/cortex_a57.S			\
 				lib/cpus/aarch64/cortex_a72.S			\
-				plat/common/aarch64/platform_mp_stack.S		\
 				${MTK_PLAT}/common/drivers/uart/8250_console.S	\
 				${MTK_PLAT}/common/mtk_plat_common.c		\
 				${MTK_PLAT}/common/mtk_sip_svc.c		\
@@ -90,6 +88,7 @@
 # Enable workarounds for selected Cortex-A53 erratas.
 ERRATA_A53_826319	:=	1
 ERRATA_A53_836870	:=	1
+ERRATA_A53_855873	:=	1
 
 # indicate the reset vector address can be programmed
 PROGRAMMABLE_RESET_ADDRESS	:=	1
diff --git a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
index 6851b15..70a7f3a 100644
--- a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
+++ b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
@@ -33,6 +33,7 @@
 #include <cpu_macros.S>
 #include <cortex_a57.h>
 #include <cortex_a53.h>
+#include <platform_def.h>
 #include <tegra_def.h>
 
 #define MIDR_PN_CORTEX_A57		0xD07
@@ -67,6 +68,7 @@
 	.globl	ns_image_entrypoint
 	.globl	tegra_bl31_phys_base
 	.globl	tegra_console_base
+	.globl	tegra_enable_l2_ecc_parity_prot
 
 	/* ---------------------
 	 * Common CPU init code
@@ -75,8 +77,8 @@
 .macro	cpu_init_common
 
 	/* ------------------------------------------------
-	 * We enable procesor retention and L2/CPUECTLR NS
-	 * access for A57 CPUs only.
+	 * We enable procesor retention, L2/CPUECTLR NS
+	 * access and ECC/Parity protection for A57 CPUs
 	 * ------------------------------------------------
 	 */
 	mrs	x0, midr_el1
@@ -89,7 +91,7 @@
 	/* ---------------------------
 	 * Enable processor retention
 	 * ---------------------------
-	*/
+	 */
 	mrs	x0, L2ECTLR_EL1
 	mov	x1, #RETENTION_ENTRY_TICKS_512 << L2ECTLR_RET_CTRL_SHIFT
 	bic	x0, x0, #L2ECTLR_RET_CTRL_MASK
@@ -107,12 +109,26 @@
 	/* -------------------------------------------------------
 	 * Enable L2 and CPU ECTLR RW access from non-secure world
 	 * -------------------------------------------------------
-	*/
+	 */
 	mov	x0, #ACTLR_EL3_ENABLE_ALL_ACCESS
 	msr	actlr_el3, x0
 	msr	actlr_el2, x0
 	isb
 
+	/* -------------------------------------------------------
+	 * Enable L2 ECC and Parity Protection
+	 * -------------------------------------------------------
+	 */
+	adr	x0, tegra_enable_l2_ecc_parity_prot
+	ldr	x0, [x0]
+	cbz	x0, 1f
+	mrs	x0, L2CTLR_EL1
+	and	x1, x0, #L2_ECC_PARITY_PROTECTION_BIT
+	cbnz	x1, 1f
+	orr	x0, x0, #L2_ECC_PARITY_PROTECTION_BIT
+	msr	L2CTLR_EL1, x0
+	isb
+
 	/* --------------------------------
 	 * Enable the cycle count register
 	 * --------------------------------
@@ -254,6 +270,47 @@
 	 */
 func plat_reset_handler
 
+	/* ----------------------------------------------------
+	 * Verify if we are running from BL31_BASE address
+	 * ----------------------------------------------------
+	 */
+	adr	x18, bl31_entrypoint
+	mov	x17, #BL31_BASE
+	cmp	x18, x17
+	b.eq	1f
+
+	/* ----------------------------------------------------
+	 * Copy the entire BL31 code to BL31_BASE if we are not
+	 * running from it already
+	 * ----------------------------------------------------
+	 */
+	mov	x0, x17
+	mov	x1, x18
+	mov	x2, #BL31_SIZE
+_loop16:
+	cmp	x2, #16
+	b.lt	_loop1
+	ldp	x3, x4, [x1], #16
+	stp	x3, x4, [x0], #16
+	sub	x2, x2, #16
+	b	_loop16
+	/* copy byte per byte */
+_loop1:
+	cbz	x2, _end
+	ldrb	w3, [x1], #1
+	strb	w3, [x0], #1
+	subs	x2, x2, #1
+	b.ne	_loop1
+
+	/* ----------------------------------------------------
+	 * Jump to BL31_BASE and start execution again
+	 * ----------------------------------------------------
+	 */
+_end:	mov	x0, x20
+	mov	x1, x21
+	br	x17
+1:
+
 	/* -----------------------------------
 	 * derive and save the phys_base addr
 	 * -----------------------------------
@@ -412,3 +469,10 @@
 	 */
 tegra_console_base:
 	.quad	0
+
+	/* --------------------------------------------------
+	 * Enable L2 ECC and Parity Protection
+	 * --------------------------------------------------
+	 */
+tegra_enable_l2_ecc_parity_prot:
+	.quad	0
diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
new file mode 100644
index 0000000..f020204
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mce.h>
+#include <memctrl.h>
+#include <memctrl_v2.h>
+#include <mmio.h>
+#include <smmu.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <xlat_tables.h>
+
+#define TEGRA_GPU_RESET_REG_OFFSET	0x30
+#define  GPU_RESET_BIT			(1 << 0)
+
+/* Video Memory base and size (live values) */
+static uint64_t video_mem_base;
+static uint64_t video_mem_size;
+
+/* array to hold stream_id override config register offsets */
+const static uint32_t streamid_overrides[] = {
+	MC_STREAMID_OVERRIDE_CFG_PTCR,
+	MC_STREAMID_OVERRIDE_CFG_AFIR,
+	MC_STREAMID_OVERRIDE_CFG_HDAR,
+	MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR,
+	MC_STREAMID_OVERRIDE_CFG_NVENCSRD,
+	MC_STREAMID_OVERRIDE_CFG_SATAR,
+	MC_STREAMID_OVERRIDE_CFG_MPCORER,
+	MC_STREAMID_OVERRIDE_CFG_NVENCSWR,
+	MC_STREAMID_OVERRIDE_CFG_AFIW,
+	MC_STREAMID_OVERRIDE_CFG_SATAW,
+	MC_STREAMID_OVERRIDE_CFG_MPCOREW,
+	MC_STREAMID_OVERRIDE_CFG_SATAW,
+	MC_STREAMID_OVERRIDE_CFG_HDAW,
+	MC_STREAMID_OVERRIDE_CFG_ISPRA,
+	MC_STREAMID_OVERRIDE_CFG_ISPWA,
+	MC_STREAMID_OVERRIDE_CFG_ISPWB,
+	MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR,
+	MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW,
+	MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR,
+	MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW,
+	MC_STREAMID_OVERRIDE_CFG_TSECSRD,
+	MC_STREAMID_OVERRIDE_CFG_TSECSWR,
+	MC_STREAMID_OVERRIDE_CFG_GPUSRD,
+	MC_STREAMID_OVERRIDE_CFG_GPUSWR,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCRA,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCRAA,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCR,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCRAB,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCWA,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCWAA,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCW,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCWAB,
+	MC_STREAMID_OVERRIDE_CFG_VICSRD,
+	MC_STREAMID_OVERRIDE_CFG_VICSWR,
+	MC_STREAMID_OVERRIDE_CFG_VIW,
+	MC_STREAMID_OVERRIDE_CFG_NVDECSRD,
+	MC_STREAMID_OVERRIDE_CFG_NVDECSWR,
+	MC_STREAMID_OVERRIDE_CFG_APER,
+	MC_STREAMID_OVERRIDE_CFG_APEW,
+	MC_STREAMID_OVERRIDE_CFG_NVJPGSRD,
+	MC_STREAMID_OVERRIDE_CFG_NVJPGSWR,
+	MC_STREAMID_OVERRIDE_CFG_SESRD,
+	MC_STREAMID_OVERRIDE_CFG_SESWR,
+	MC_STREAMID_OVERRIDE_CFG_ETRR,
+	MC_STREAMID_OVERRIDE_CFG_ETRW,
+	MC_STREAMID_OVERRIDE_CFG_TSECSRDB,
+	MC_STREAMID_OVERRIDE_CFG_TSECSWRB,
+	MC_STREAMID_OVERRIDE_CFG_GPUSRD2,
+	MC_STREAMID_OVERRIDE_CFG_GPUSWR2,
+	MC_STREAMID_OVERRIDE_CFG_AXISR,
+	MC_STREAMID_OVERRIDE_CFG_AXISW,
+	MC_STREAMID_OVERRIDE_CFG_EQOSR,
+	MC_STREAMID_OVERRIDE_CFG_EQOSW,
+	MC_STREAMID_OVERRIDE_CFG_UFSHCR,
+	MC_STREAMID_OVERRIDE_CFG_UFSHCW,
+	MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR,
+	MC_STREAMID_OVERRIDE_CFG_BPMPR,
+	MC_STREAMID_OVERRIDE_CFG_BPMPW,
+	MC_STREAMID_OVERRIDE_CFG_BPMPDMAR,
+	MC_STREAMID_OVERRIDE_CFG_BPMPDMAW,
+	MC_STREAMID_OVERRIDE_CFG_AONR,
+	MC_STREAMID_OVERRIDE_CFG_AONW,
+	MC_STREAMID_OVERRIDE_CFG_AONDMAR,
+	MC_STREAMID_OVERRIDE_CFG_AONDMAW,
+	MC_STREAMID_OVERRIDE_CFG_SCER,
+	MC_STREAMID_OVERRIDE_CFG_SCEW,
+	MC_STREAMID_OVERRIDE_CFG_SCEDMAR,
+	MC_STREAMID_OVERRIDE_CFG_SCEDMAW,
+	MC_STREAMID_OVERRIDE_CFG_APEDMAR,
+	MC_STREAMID_OVERRIDE_CFG_APEDMAW,
+	MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1,
+	MC_STREAMID_OVERRIDE_CFG_VICSRD1,
+	MC_STREAMID_OVERRIDE_CFG_NVDECSRD1
+};
+
+/* array to hold the security configs for stream IDs */
+const static mc_streamid_security_cfg_t sec_cfgs[] = {
+	mc_make_sec_cfg(SCEW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AFIR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDISPLAYR1, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(XUSB_DEVR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(VICSRD1, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVENCSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(TSECSRDB, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AXISW, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(SDMMCWAB, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AONDMAW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(GPUSWR2, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(SATAW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(UFSHCW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AFIW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SCEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(UFSHCR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCWAA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SESWR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(MPCORER, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(PTCR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(BPMPW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ETRW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(GPUSRD, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(VICSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SCEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(HDAW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ISPWA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(EQOSW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(XUSB_HOSTW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(TSECSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCRAA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(VIW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AXISR, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(SDMMCW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(BPMPDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ISPRA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDECSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(XUSB_DEVW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDECSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(MPCOREW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDISPLAYR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(BPMPDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVJPGSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDECSRD1, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(TSECSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVJPGSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCWA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SCER, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(XUSB_HOSTR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(VICSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AONDMAR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AONW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCRA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(HOST1XDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(EQOSR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SATAR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(BPMPR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(HDAR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCRAB, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ETRR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AONR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SESRD, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVENCSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(GPUSWR, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(TSECSWRB, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ISPWB, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(GPUSRD2, SECURE, NO_OVERRIDE, DISABLE),
+};
+
+const static mc_txn_override_cfg_t mc_override_cfgs[] = {
+	mc_make_txn_override_cfg(BPMPW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(EQOSW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(NVJPGSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SDMMCWAA, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(MPCOREW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SCEDMAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SDMMCW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(AXISW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(TSECSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(GPUSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(XUSB_HOSTW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(TSECSWRB, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(GPUSWR2, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(AONDMAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(AONW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SESWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(BPMPDMAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SDMMCWA, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(HDAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(NVDECSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(UFSHCW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SATAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(ETRW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(VICSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(NVENCSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SDMMCWAB, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(ISPWB, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(APEW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(XUSB_DEVW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(AFIW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SCEW, CGID_TAG_ADR),
+};
+
+/*
+ * Init Memory controller during boot.
+ */
+void tegra_memctrl_setup(void)
+{
+	uint32_t val;
+	uint32_t num_overrides = sizeof(streamid_overrides) / sizeof(uint32_t);
+	uint32_t num_sec_cfgs = sizeof(sec_cfgs) / sizeof(mc_streamid_security_cfg_t);
+	uint32_t num_txn_overrides = sizeof(mc_override_cfgs) / sizeof(mc_txn_override_cfg_t);
+	uint32_t tegra_rev;
+	int i;
+
+	INFO("Tegra Memory Controller (v2)\n");
+
+	/* Program the SMMU pagesize */
+	tegra_smmu_init();
+
+	/* Program all the Stream ID overrides */
+	for (i = 0; i < num_overrides; i++)
+		tegra_mc_streamid_write_32(streamid_overrides[i],
+			MC_STREAM_ID_MAX);
+
+	/* Program the security config settings for all Stream IDs */
+	for (i = 0; i < num_sec_cfgs; i++) {
+		val = sec_cfgs[i].override_enable << 16 |
+		      sec_cfgs[i].override_client_inputs << 8 |
+		      sec_cfgs[i].override_client_ns_flag << 0;
+		tegra_mc_streamid_write_32(sec_cfgs[i].offset, val);
+	}
+
+	/*
+	 * All requests at boot time, and certain requests during
+	 * normal run time, are physically addressed and must bypass
+	 * the SMMU. The client hub logic implements a hardware bypass
+	 * path around the Translation Buffer Units (TBU). During
+	 * boot-time, the SMMU_BYPASS_CTRL register (which defaults to
+	 * TBU_BYPASS mode) will be used to steer all requests around
+	 * the uninitialized TBUs. During normal operation, this register
+	 * is locked into TBU_BYPASS_SID config, which routes requests
+	 * with special StreamID 0x7f on the bypass path and all others
+	 * through the selected TBU. This is done to disable SMMU Bypass
+	 * mode, as it could be used to circumvent SMMU security checks.
+	 */
+	tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG,
+		MC_SMMU_BYPASS_CONFIG_SETTINGS);
+
+	/*
+	 * Set the MC_TXN_OVERRIDE registers for write clients.
+	 */
+	tegra_rev = (mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET) &
+			HARDWARE_MINOR_REVISION_MASK) >> HARDWARE_MINOR_REVISION_SHIFT;
+
+	if (tegra_rev == HARDWARE_REVISION_A01) {
+
+		/* GPU and NVENC settings for rev. A01 */
+		val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
+		val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
+			val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
+
+		val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2);
+		val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2,
+			val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
+
+		val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR);
+		val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR,
+			val | MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID);
+
+	} else {
+
+		/* settings for rev. A02 */
+		for (i = 0; i < num_txn_overrides; i++) {
+			val = tegra_mc_read_32(mc_override_cfgs[i].offset);
+			val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+			tegra_mc_write_32(mc_override_cfgs[i].offset,
+				val | mc_override_cfgs[i].cgid_tag);
+		}
+
+	}
+}
+
+/*
+ * Restore Memory Controller settings after "System Suspend"
+ */
+void tegra_memctrl_restore_settings(void)
+{
+	/* video memory carveout region */
+	if (video_mem_base) {
+		tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO,
+				  (uint32_t)video_mem_base);
+		tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+				  (uint32_t)(video_mem_base >> 32));
+		tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size);
+
+		/*
+		 * MCE propogates the VideoMem configuration values across the
+		 * CCPLEX.
+		 */
+		mce_update_gsc_videomem();
+	}
+}
+
+/*
+ * Secure the BL31 DRAM aperture.
+ *
+ * phys_base = physical base of TZDRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	/*
+	 * Setup the Memory controller to allow only secure accesses to
+	 * the TZDRAM carveout
+	 */
+	INFO("Configuring TrustZone DRAM Memory Carveout\n");
+
+	tegra_mc_write_32(MC_SECURITY_CFG0_0, (uint32_t)phys_base);
+	tegra_mc_write_32(MC_SECURITY_CFG3_0, (uint32_t)(phys_base >> 32));
+	tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
+
+	/*
+	 * MCE propogates the security configuration values across the
+	 * CCPLEX.
+	 */
+	mce_update_gsc_tzdram();
+}
+
+/*
+ * Secure the BL31 TZRAM aperture.
+ *
+ * phys_base = physical base of TZRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	uint64_t tzram_end = phys_base + size_in_bytes - 1;
+	uint32_t val;
+
+	/*
+	 * Check if the TZRAM is locked already.
+	 */
+	if (tegra_mc_read_32(MC_TZRAM_REG_CTRL) == DISABLE_TZRAM_ACCESS)
+		return;
+
+	/*
+	 * Setup the Memory controller to allow only secure accesses to
+	 * the TZRAM carveout
+	 */
+	INFO("Configuring TrustZone RAM (SysRAM) Memory Carveout\n");
+
+	/* Program the base and end values */
+	tegra_mc_write_32(MC_TZRAM_BASE, (uint32_t)phys_base);
+	tegra_mc_write_32(MC_TZRAM_END, (uint32_t)tzram_end);
+
+	/* Extract the high address bits from the base/end values */
+	val = (uint32_t)(phys_base >> 32) & TZRAM_ADDR_HI_BITS_MASK;
+	val |= (((uint32_t)(tzram_end >> 32) & TZRAM_ADDR_HI_BITS_MASK) <<
+		TZRAM_END_HI_BITS_SHIFT);
+	tegra_mc_write_32(MC_TZRAM_HI_ADDR_BITS, val);
+
+	/* Disable further writes to the TZRAM setup registers */
+	tegra_mc_write_32(MC_TZRAM_REG_CTRL, DISABLE_TZRAM_ACCESS);
+
+	/*
+	 * MCE propogates the security configuration values across the
+	 * CCPLEX.
+	 */
+	mce_update_gsc_tzram();
+}
+
+/*
+ * Program the Video Memory carveout region
+ *
+ * phys_base = physical base of aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	uint32_t regval;
+
+	/*
+	 * The GPU is the user of the Video Memory region. In order to
+	 * transition to the new memory region smoothly, we program the
+	 * new base/size ONLY if the GPU is in reset mode.
+	 */
+	regval = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET);
+	if ((regval & GPU_RESET_BIT) == 0) {
+		ERROR("GPU not in reset! Video Memory setup failed\n");
+		return;
+	}
+
+	/*
+	 * Setup the Memory controller to restrict CPU accesses to the Video
+	 * Memory region
+	 */
+	INFO("Configuring Video Memory Carveout\n");
+
+	tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
+	tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+			  (uint32_t)(phys_base >> 32));
+	tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes);
+
+	/* store new values */
+	video_mem_base = phys_base;
+	video_mem_size = size_in_bytes >> 20;
+
+	/*
+	 * MCE propogates the VideoMem configuration values across the
+	 * CCPLEX.
+	 */
+	mce_update_gsc_videomem();
+}
diff --git a/plat/nvidia/tegra/common/tegra_bl31_setup.c b/plat/nvidia/tegra/common/tegra_bl31_setup.c
index 72da4b3..9e7e576 100644
--- a/plat/nvidia/tegra/common/tegra_bl31_setup.c
+++ b/plat/nvidia/tegra/common/tegra_bl31_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -44,15 +44,22 @@
 #include <platform.h>
 #include <platform_def.h>
 #include <stddef.h>
+#include <string.h>
 #include <tegra_def.h>
 #include <tegra_private.h>
 
+extern void zeromem16(void *mem, unsigned int length);
+
 /*******************************************************************************
  * Declarations of linker defined symbols which will help us find the layout
  * of trusted SRAM
  ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
+extern unsigned long __TEXT_START__;
+extern unsigned long __TEXT_END__;
+extern unsigned long __RW_START__;
+extern unsigned long __RW_END__;
+extern unsigned long __RODATA_START__;
+extern unsigned long __RODATA_END__;
 extern unsigned long __BL31_END__;
 
 extern uint64_t tegra_bl31_phys_base;
@@ -65,8 +72,10 @@
  * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
  * refer to page-aligned addresses.
  */
-#define BL31_RO_BASE (unsigned long)(&__RO_START__)
-#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
+#define BL31_RW_START (unsigned long)(&__RW_START__)
+#define BL31_RW_END (unsigned long)(&__RW_END__)
+#define BL31_RODATA_BASE (unsigned long)(&__RODATA_START__)
+#define BL31_RODATA_END (unsigned long)(&__RODATA_END__)
 #define BL31_END (unsigned long)(&__BL31_END__)
 
 static entry_point_info_t bl33_image_ep_info, bl32_image_ep_info;
@@ -80,6 +89,29 @@
 extern uint64_t ns_image_entrypoint;
 
 /*******************************************************************************
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that will be overridden by a SoC.
+ ******************************************************************************/
+#pragma weak plat_early_platform_setup
+#pragma weak plat_get_bl31_params
+#pragma weak plat_get_bl31_plat_params
+
+void plat_early_platform_setup(void)
+{
+	; /* do nothing */
+}
+
+bl31_params_t *plat_get_bl31_params(void)
+{
+	return NULL;
+}
+
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
+{
+	return NULL;
+}
+
+/*******************************************************************************
  * Return a pointer to the 'entry_point_info' structure of the next image for
  * security state specified. BL33 corresponds to the non-secure image type
  * while BL32 corresponds to the secure image type.
@@ -89,7 +121,8 @@
 	if (type == NON_SECURE)
 		return &bl33_image_ep_info;
 
-	if (type == SECURE)
+	/* return BL32 entry point info if it is valid */
+	if (type == SECURE && bl32_image_ep_info.pc)
 		return &bl32_image_ep_info;
 
 	return NULL;
@@ -116,11 +149,25 @@
 #if DEBUG
 	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
 #endif
+	image_info_t bl32_img_info = { {0} };
+	uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end;
+
+	/*
+	 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
+	 * there's no argument to relay from a previous bootloader. Platforms
+	 * might use custom ways to get arguments, so provide handlers which
+	 * they can override.
+	 */
+	if (from_bl2 == NULL)
+		from_bl2 = plat_get_bl31_params();
+	if (plat_params == NULL)
+		plat_params = plat_get_bl31_plat_params();
 
 	/*
 	 * Copy BL3-3, BL3-2 entry point information.
 	 * They are stored in Secure RAM, in BL2's address space.
 	 */
+	assert(from_bl2);
 	assert(from_bl2->bl33_ep_info);
 	bl33_image_ep_info = *from_bl2->bl33_ep_info;
 
@@ -136,21 +183,74 @@
 	plat_bl31_params_from_bl2.uart_id = plat_params->uart_id;
 
 	/*
+	 * It is very important that we run either from TZDRAM or TZSRAM base.
+	 * Add an explicit check here.
+	 */
+	if ((plat_bl31_params_from_bl2.tzdram_base != BL31_BASE) &&
+	    (TEGRA_TZRAM_BASE != BL31_BASE))
+		panic();
+
+	/*
 	 * Get the base address of the UART controller to be used for the
 	 * console
 	 */
-	assert(plat_params->uart_id);
 	tegra_console_base = plat_get_console_from_id(plat_params->uart_id);
 
-	/*
-	 * Configure the UART port to be used as the console
-	 */
-	assert(tegra_console_base);
-	console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
-		TEGRA_CONSOLE_BAUDRATE);
+	if (tegra_console_base != (uint64_t)0) {
+		/*
+		 * Configure the UART port to be used as the console
+		 */
+		console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
+			TEGRA_CONSOLE_BAUDRATE);
 
-	/* Initialise crash console */
-	plat_crash_console_init();
+		/* Initialise crash console */
+		plat_crash_console_init();
+	}
+
+	/*
+	 * Do initial security configuration to allow DRAM/device access.
+	 */
+	tegra_memctrl_tzdram_setup(plat_bl31_params_from_bl2.tzdram_base,
+			plat_bl31_params_from_bl2.tzdram_size);
+
+	/*
+	 * The previous bootloader might not have placed the BL32 image
+	 * inside the TZDRAM. We check the BL32 image info to find out
+	 * the base/PC values and relocate the image if necessary.
+	 */
+	if (from_bl2->bl32_image_info) {
+
+		bl32_img_info = *from_bl2->bl32_image_info;
+
+		/* Relocate BL32 if it resides outside of the TZDRAM */
+		tzdram_start = plat_bl31_params_from_bl2.tzdram_base;
+		tzdram_end = plat_bl31_params_from_bl2.tzdram_base +
+				plat_bl31_params_from_bl2.tzdram_size;
+		bl32_start = bl32_img_info.image_base;
+		bl32_end = bl32_img_info.image_base + bl32_img_info.image_size;
+
+		assert(tzdram_end > tzdram_start);
+		assert(bl32_end > bl32_start);
+		assert(bl32_image_ep_info.pc > tzdram_start);
+		assert(bl32_image_ep_info.pc < tzdram_end);
+
+		/* relocate BL32 */
+		if (bl32_start >= tzdram_end || bl32_end <= tzdram_start) {
+
+			INFO("Relocate BL32 to TZDRAM\n");
+
+			memcpy16((void *)(uintptr_t)bl32_image_ep_info.pc,
+				 (void *)(uintptr_t)bl32_start,
+				 bl32_img_info.image_size);
+
+			/* clean up non-secure intermediate buffer */
+			zeromem16((void *)(uintptr_t)bl32_start,
+				bl32_img_info.image_size);
+		}
+	}
+
+	/* Early platform setup for Tegra SoCs */
+	plat_early_platform_setup();
 
 	INFO("BL3-1: Boot CPU: %s Processor [%lx]\n", (impl == DENVER_IMPL) ?
 		"Denver" : "ARM", read_mpidr());
@@ -163,6 +263,9 @@
 {
 	uint32_t tmp_reg;
 
+	/* Initialize the gic cpu and distributor interfaces */
+	plat_gic_setup();
+
 	/*
 	 * Initialize delay timer
 	 */
@@ -179,12 +282,6 @@
 	tegra_memctrl_setup();
 
 	/*
-	 * Do initial security configuration to allow DRAM/device access.
-	 */
-	tegra_memctrl_tzdram_setup(plat_bl31_params_from_bl2.tzdram_base,
-			plat_bl31_params_from_bl2.tzdram_size);
-
-	/*
 	 * Set up the TZRAM memory aperture to allow only secure world
 	 * access
 	 */
@@ -194,9 +291,6 @@
 	tmp_reg = SCR_RES1_BITS | SCR_RW_BIT;
 	write_scr(tmp_reg);
 
-	/* Initialize the gic cpu and distributor interfaces */
-	tegra_gic_setup();
-
 	INFO("BL3-1: Tegra platform setup complete\n");
 }
 
@@ -205,9 +299,7 @@
  ******************************************************************************/
 void bl31_plat_runtime_setup(void)
 {
-	/* Initialize the runtime console */
-	console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
-		TEGRA_CONSOLE_BAUDRATE);
+	; /* do nothing */
 }
 
 /*******************************************************************************
@@ -216,11 +308,12 @@
  ******************************************************************************/
 void bl31_plat_arch_setup(void)
 {
-	unsigned long bl31_base_pa = tegra_bl31_phys_base;
-	unsigned long total_base = bl31_base_pa;
-	unsigned long total_size = BL32_BASE - BL31_RO_BASE;
-	unsigned long ro_start = bl31_base_pa;
-	unsigned long ro_size = BL31_RO_LIMIT - BL31_RO_BASE;
+	unsigned long rw_start = BL31_RW_START;
+	unsigned long rw_size = BL31_RW_END - BL31_RW_START;
+	unsigned long rodata_start = BL31_RODATA_BASE;
+	unsigned long rodata_size = BL31_RODATA_END - BL31_RODATA_BASE;
+	unsigned long code_base = (unsigned long)(&__TEXT_START__);
+	unsigned long code_size = (unsigned long)(&__TEXT_END__) - code_base;
 	const mmap_region_t *plat_mmio_map = NULL;
 #if USE_COHERENT_MEM
 	unsigned long coh_start, coh_size;
@@ -228,12 +321,15 @@
 	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
 
 	/* add memory regions */
-	mmap_add_region(total_base, total_base,
-			total_size,
+	mmap_add_region(rw_start, rw_start,
+			rw_size,
 			MT_MEMORY | MT_RW | MT_SECURE);
-	mmap_add_region(ro_start, ro_start,
-			ro_size,
-			MT_MEMORY | MT_RO | MT_SECURE);
+	mmap_add_region(rodata_start, rodata_start,
+			rodata_size,
+			MT_RO_DATA | MT_SECURE);
+	mmap_add_region(code_base, code_base,
+			code_size,
+			MT_CODE | MT_SECURE);
 
 	/* map TZDRAM used by BL31 as coherent memory */
 	if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) {
diff --git a/plat/nvidia/tegra/common/tegra_common.mk b/plat/nvidia/tegra/common/tegra_common.mk
index c9e9255..d6bd2ea 100644
--- a/plat/nvidia/tegra/common/tegra_common.mk
+++ b/plat/nvidia/tegra/common/tegra_common.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -36,28 +36,28 @@
 
 USE_COHERENT_MEM	:=	0
 
+SEPARATE_CODE_AND_RODATA :=	1
+
 PLAT_INCLUDES		:=	-Iplat/nvidia/tegra/include/drivers \
 				-Iplat/nvidia/tegra/include \
 				-Iplat/nvidia/tegra/include/${TARGET_SOC}
 
 PLAT_BL_COMMON_SOURCES	:=	lib/xlat_tables/xlat_tables_common.c		\
-				lib/xlat_tables/aarch64/xlat_tables.c		\
-				plat/common/aarch64/plat_common.c
+				lib/xlat_tables/aarch64/xlat_tables.c
 
 COMMON_DIR		:=	plat/nvidia/tegra/common
 
 BL31_SOURCES		+=	drivers/arm/gic/gic_v2.c			\
-				drivers/arm/gic/gic_v3.c			\
 				drivers/console/aarch64/console.S		\
 				drivers/delay_timer/delay_timer.c		\
 				drivers/ti/uart/aarch64/16550_console.S		\
-				plat/common/aarch64/platform_mp_stack.S		\
-				plat/common/plat_psci_common.c			\
 				${COMMON_DIR}/aarch64/tegra_helpers.S		\
 				${COMMON_DIR}/drivers/pmc/pmc.c			\
 				${COMMON_DIR}/tegra_bl31_setup.c		\
 				${COMMON_DIR}/tegra_delay_timer.c		\
+				${COMMON_DIR}/tegra_fiq_glue.c			\
 				${COMMON_DIR}/tegra_gic.c			\
+				${COMMON_DIR}/tegra_platform.c			\
 				${COMMON_DIR}/tegra_pm.c			\
 				${COMMON_DIR}/tegra_sip_calls.c			\
 				${COMMON_DIR}/tegra_topology.c
diff --git a/plat/nvidia/tegra/common/tegra_fiq_glue.c b/plat/nvidia/tegra/common/tegra_fiq_glue.c
new file mode 100644
index 0000000..7fcc114
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_fiq_glue.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <gic_v2.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+DEFINE_BAKERY_LOCK(tegra_fiq_lock);
+
+/*******************************************************************************
+ * Static variables
+ ******************************************************************************/
+static uint64_t ns_fiq_handler_addr;
+static unsigned int fiq_handler_active;
+static pcpu_fiq_state_t fiq_state[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Handler for FIQ interrupts
+ ******************************************************************************/
+static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
+					  uint32_t flags,
+					  void *handle,
+					  void *cookie)
+{
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	el3_state_t *el3state_ctx = get_el3state_ctx(ctx);
+	int cpu = plat_my_core_pos();
+	uint32_t irq;
+
+	bakery_lock_get(&tegra_fiq_lock);
+
+	/*
+	 * The FIQ was generated when the execution was in the non-secure
+	 * world. Save the context registers to start with.
+	 */
+	cm_el1_sysregs_context_save(NON_SECURE);
+
+	/*
+	 * Save elr_el3 and spsr_el3 from the saved context, and overwrite
+	 * the context with the NS fiq_handler_addr and SPSR value.
+	 */
+	fiq_state[cpu].elr_el3 = read_ctx_reg(el3state_ctx, CTX_ELR_EL3);
+	fiq_state[cpu].spsr_el3 = read_ctx_reg(el3state_ctx, CTX_SPSR_EL3);
+
+	/*
+	 * Set the new ELR to continue execution in the NS world using the
+	 * FIQ handler registered earlier.
+	 */
+	assert(ns_fiq_handler_addr);
+	write_ctx_reg(el3state_ctx, CTX_ELR_EL3, ns_fiq_handler_addr);
+
+	/*
+	 * Mark this interrupt as complete to avoid a FIQ storm.
+	 */
+	irq = plat_ic_acknowledge_interrupt();
+	if (irq < 1022)
+		plat_ic_end_of_interrupt(irq);
+
+	bakery_lock_release(&tegra_fiq_lock);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Setup handler for FIQ interrupts
+ ******************************************************************************/
+void tegra_fiq_handler_setup(void)
+{
+	uint64_t flags;
+	int rc;
+
+	/* return if already registered */
+	if (fiq_handler_active)
+		return;
+
+	/*
+	 * Register an interrupt handler for FIQ interrupts generated for
+	 * NS interrupt sources
+	 */
+	flags = 0;
+	set_interrupt_rm_flag(flags, NON_SECURE);
+	rc = register_interrupt_type_handler(INTR_TYPE_EL3,
+				tegra_fiq_interrupt_handler,
+				flags);
+	if (rc)
+		panic();
+
+	/* handler is now active */
+	fiq_handler_active = 1;
+}
+
+/*******************************************************************************
+ * Validate and store NS world's entrypoint for FIQ interrupts
+ ******************************************************************************/
+void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint)
+{
+	ns_fiq_handler_addr = entrypoint;
+}
+
+/*******************************************************************************
+ * Handler to return the NS EL1/EL0 CPU context
+ ******************************************************************************/
+int tegra_fiq_get_intr_context(void)
+{
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	gp_regs_t *gpregs_ctx = get_gpregs_ctx(ctx);
+	el1_sys_regs_t *el1state_ctx = get_sysregs_ctx(ctx);
+	int cpu = plat_my_core_pos();
+	uint64_t val;
+
+	/*
+	 * We store the ELR_EL3, SPSR_EL3, SP_EL0 and SP_EL1 registers so
+	 * that el3_exit() sends these values back to the NS world.
+	 */
+	write_ctx_reg(gpregs_ctx, CTX_GPREG_X0, fiq_state[cpu].elr_el3);
+	write_ctx_reg(gpregs_ctx, CTX_GPREG_X1, fiq_state[cpu].spsr_el3);
+
+	val = read_ctx_reg(gpregs_ctx, CTX_GPREG_SP_EL0);
+	write_ctx_reg(gpregs_ctx, CTX_GPREG_X2, val);
+
+	val = read_ctx_reg(el1state_ctx, CTX_SP_EL1);
+	write_ctx_reg(gpregs_ctx, CTX_GPREG_X3, val);
+
+	return 0;
+}
diff --git a/plat/nvidia/tegra/common/tegra_gic.c b/plat/nvidia/tegra/common/tegra_gic.c
index ee12975..6864f8b 100644
--- a/plat/nvidia/tegra/common/tegra_gic.c
+++ b/plat/nvidia/tegra/common/tegra_gic.c
@@ -47,6 +47,9 @@
 	(GIC_HIGHEST_NS_PRIORITY << 16) | \
 	(GIC_HIGHEST_NS_PRIORITY << 24))
 
+static const irq_sec_cfg_t *g_irq_sec_ptr;
+static unsigned int g_num_irqs;
+
 /*******************************************************************************
  * Place the cpu interface in a state where it can never make a cpu exit wfi as
  * as result of an asserted interrupt. This is critical for powering down a cpu
@@ -110,7 +113,9 @@
  ******************************************************************************/
 static void tegra_gic_distif_setup(unsigned int gicd_base)
 {
-	unsigned int index, num_ints;
+	unsigned int index, num_ints, irq_num;
+	uint8_t target_cpus;
+	uint32_t val;
 
 	/*
 	 * Mark out non-secure interrupts. Calculate number of
@@ -128,6 +133,39 @@
 				GICD_IPRIORITYR_DEF_VAL);
 	}
 
+	/* Configure SPI secure interrupts now */
+	if (g_irq_sec_ptr) {
+
+		for (index = 0; index < g_num_irqs; index++) {
+			irq_num = (g_irq_sec_ptr + index)->irq;
+			target_cpus = (g_irq_sec_ptr + index)->target_cpus;
+
+			if (irq_num >= MIN_SPI_ID) {
+
+				/* Configure as a secure interrupt */
+				gicd_clr_igroupr(gicd_base, irq_num);
+
+				/* Configure SPI priority */
+				mmio_write_8(gicd_base + GICD_IPRIORITYR +
+					irq_num,
+					GIC_HIGHEST_SEC_PRIORITY &
+					GIC_PRI_MASK);
+
+				/* Configure as level triggered */
+				val = gicd_read_icfgr(gicd_base, irq_num);
+				val |= (3 << ((irq_num & 0xF) << 1));
+				gicd_write_icfgr(gicd_base, irq_num, val);
+
+				/* Route SPI to the target CPUs */
+				gicd_set_itargetsr(gicd_base, irq_num,
+					target_cpus);
+
+				/* Enable this interrupt */
+				gicd_set_isenabler(gicd_base, irq_num);
+			}
+		}
+	}
+
 	/*
 	 * Configure the SGI and PPI. This is done in a separated function
 	 * because each CPU is responsible for initializing its own private
@@ -139,8 +177,11 @@
 	gicd_write_ctlr(gicd_base, ENABLE_GRP0 | ENABLE_GRP1);
 }
 
-void tegra_gic_setup(void)
+void tegra_gic_setup(const irq_sec_cfg_t *irq_sec_ptr, unsigned int num_irqs)
 {
+	g_irq_sec_ptr = irq_sec_ptr;
+	g_num_irqs = num_irqs;
+
 	tegra_gic_cpuif_setup(TEGRA_GICC_BASE);
 	tegra_gic_distif_setup(TEGRA_GICD_BASE);
 }
@@ -185,12 +226,17 @@
 uint32_t tegra_gic_get_pending_interrupt_type(void)
 {
 	uint32_t id;
+	unsigned int index;
 
 	id = gicc_read_hppir(TEGRA_GICC_BASE) & INT_ID_MASK;
 
-	/* Assume that all secure interrupts are S-EL1 interrupts */
-	if (id < 1022)
-		return INTR_TYPE_S_EL1;
+	/* get the interrupt type */
+	if (id < 1022) {
+		for (index = 0; index < g_num_irqs; index++) {
+			if (id == (g_irq_sec_ptr + index)->irq)
+				return (g_irq_sec_ptr + index)->type;
+		}
+	}
 
 	if (id == GIC_SPURIOUS_INTERRUPT)
 		return INTR_TYPE_INVAL;
@@ -248,14 +294,19 @@
 uint32_t tegra_gic_get_interrupt_type(uint32_t id)
 {
 	uint32_t group;
+	unsigned int index;
 
 	group = gicd_get_igroupr(TEGRA_GICD_BASE, id);
 
-	/* Assume that all secure interrupts are S-EL1 interrupts */
-	if (group == GRP0)
-		return INTR_TYPE_S_EL1;
-	else
-		return INTR_TYPE_NS;
+	/* get the interrupt type */
+	if (group == GRP0) {
+		for (index = 0; index < g_num_irqs; index++) {
+			if (id == (g_irq_sec_ptr + index)->irq)
+				return (g_irq_sec_ptr + index)->type;
+		}
+	}
+
+	return INTR_TYPE_NS;
 }
 
 #else
diff --git a/plat/nvidia/tegra/common/tegra_platform.c b/plat/nvidia/tegra/common/tegra_platform.c
new file mode 100644
index 0000000..0724b18
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_platform.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <mmio.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Tegra platforms
+ ******************************************************************************/
+typedef enum tegra_platform {
+	TEGRA_PLATFORM_SILICON = 0,
+	TEGRA_PLATFORM_QT,
+	TEGRA_PLATFORM_FPGA,
+	TEGRA_PLATFORM_EMULATION,
+	TEGRA_PLATFORM_MAX,
+} tegra_platform_t;
+
+/*******************************************************************************
+ * Tegra macros defining all the SoC minor versions
+ ******************************************************************************/
+#define TEGRA_MINOR_QT			0
+#define TEGRA_MINOR_FPGA		1
+#define TEGRA_MINOR_EMULATION_MIN	2
+#define TEGRA_MINOR_EMULATION_MAX	10
+
+/*******************************************************************************
+ * Tegra major, minor version helper macros
+ ******************************************************************************/
+#define MAJOR_VERSION_SHIFT		0x4
+#define MAJOR_VERSION_MASK		0xF
+#define MINOR_VERSION_SHIFT		0x10
+#define MINOR_VERSION_MASK		0xF
+#define CHIP_ID_SHIFT			8
+#define CHIP_ID_MASK			0xFF
+
+/*******************************************************************************
+ * Tegra chip ID values
+ ******************************************************************************/
+typedef enum tegra_chipid {
+	TEGRA_CHIPID_TEGRA13 = 0x13,
+	TEGRA_CHIPID_TEGRA21 = 0x21,
+} tegra_chipid_t;
+
+/*
+ * Read the chip ID value
+ */
+static uint32_t tegra_get_chipid(void)
+{
+	return mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET);
+}
+
+/*
+ * Read the chip's major version from chip ID value
+ */
+uint32_t tegra_get_chipid_major(void)
+{
+	return (tegra_get_chipid() >> MAJOR_VERSION_SHIFT) & MAJOR_VERSION_MASK;
+}
+
+/*
+ * Read the chip's minor version from the chip ID value
+ */
+uint32_t tegra_get_chipid_minor(void)
+{
+	return (tegra_get_chipid() >> MINOR_VERSION_SHIFT) & MINOR_VERSION_MASK;
+}
+
+uint8_t tegra_chipid_is_t132(void)
+{
+	uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+	return (chip_id == TEGRA_CHIPID_TEGRA13);
+}
+
+uint8_t tegra_chipid_is_t210(void)
+{
+	uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+	return (chip_id == TEGRA_CHIPID_TEGRA21);
+}
+
+/*
+ * Read the chip ID value and derive the platform
+ */
+static tegra_platform_t tegra_get_platform(void)
+{
+	uint32_t major = tegra_get_chipid_major();
+	uint32_t minor = tegra_get_chipid_minor();
+
+	/* Actual silicon platforms have a non-zero major version */
+	if (major > 0)
+		return TEGRA_PLATFORM_SILICON;
+
+	/*
+	 * The minor version number is used by simulation platforms
+	 */
+
+	/*
+	 * Cadence's QuickTurn emulation system is a Solaris-based
+	 * chip emulation system
+	 */
+	if (minor == TEGRA_MINOR_QT)
+		return TEGRA_PLATFORM_QT;
+
+	/*
+	 * FPGAs are used during early software/hardware development
+	 */
+	if (minor == TEGRA_MINOR_FPGA)
+		return TEGRA_PLATFORM_FPGA;
+
+	/* Minor version reserved for other emulation platforms */
+	if ((minor > TEGRA_MINOR_FPGA) && (minor <= TEGRA_MINOR_EMULATION_MAX))
+		return TEGRA_PLATFORM_EMULATION;
+
+	/* unsupported platform */
+	return TEGRA_PLATFORM_MAX;
+}
+
+uint8_t tegra_platform_is_silicon(void)
+{
+	return (tegra_get_platform() == TEGRA_PLATFORM_SILICON);
+}
+
+uint8_t tegra_platform_is_qt(void)
+{
+	return (tegra_get_platform() == TEGRA_PLATFORM_QT);
+}
+
+uint8_t tegra_platform_is_fpga(void)
+{
+	return (tegra_get_platform() == TEGRA_PLATFORM_FPGA);
+}
+
+uint8_t tegra_platform_is_emulation(void)
+{
+	return (tegra_get_platform() == TEGRA_PLATFORM_EMULATION);
+}
diff --git a/plat/nvidia/tegra/common/tegra_pm.c b/plat/nvidia/tegra/common/tegra_pm.c
index f5ef3e7..5376d52 100644
--- a/plat/nvidia/tegra/common/tegra_pm.c
+++ b/plat/nvidia/tegra/common/tegra_pm.c
@@ -33,6 +33,7 @@
 #include <bl_common.h>
 #include <context.h>
 #include <context_mgmt.h>
+#include <console.h>
 #include <debug.h>
 #include <memctrl.h>
 #include <mmio.h>
@@ -45,6 +46,7 @@
 
 extern uint64_t tegra_bl31_phys_base;
 extern uint64_t tegra_sec_entry_point;
+extern uint64_t tegra_console_base;
 
 /*
  * The following platform setup functions are weakly defined. They
@@ -57,6 +59,7 @@
 #pragma weak tegra_soc_pwr_domain_power_down_wfi
 #pragma weak tegra_soc_prepare_system_reset
 #pragma weak tegra_soc_prepare_system_off
+#pragma weak tegra_soc_get_target_pwr_state
 
 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
 {
@@ -94,6 +97,23 @@
 	panic();
 }
 
+plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+					     const plat_local_state_t *states,
+					     unsigned int ncpu)
+{
+	plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
+
+	assert(ncpu);
+
+	do {
+		temp = *states++;
+		if ((temp < target))
+			target = temp;
+	} while (--ncpu);
+
+	return target;
+}
+
 /*******************************************************************************
  * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
  * call to get the `power_state` parameter. This allows the platform to encode
@@ -102,12 +122,9 @@
 ******************************************************************************/
 void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state)
 {
-	/* lower affinities use PLAT_MAX_OFF_STATE */
-	for (int i = MPIDR_AFFLVL0; i < PLAT_MAX_PWR_LVL; i++)
-		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
-
-	/* max affinity uses system suspend state id */
-	req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] = PSTATE_ID_SOC_POWERDN;
+	/* all affinities use system suspend state id */
+	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PSTATE_ID_SOC_POWERDN;
 }
 
 /*******************************************************************************
@@ -149,6 +166,11 @@
 {
 	tegra_soc_pwr_domain_suspend(target_state);
 
+	/* Disable console if we are entering deep sleep. */
+	if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+			PSTATE_ID_SOC_POWERDN)
+		console_uninit();
+
 	/* disable GICC */
 	tegra_gic_cpuif_deactivate();
 }
@@ -183,7 +205,7 @@
 	/*
 	 * Initialize the GIC cpu and distributor interfaces
 	 */
-	tegra_gic_setup();
+	plat_gic_setup();
 
 	/*
 	 * Check if we are exiting from deep sleep.
@@ -191,6 +213,12 @@
 	if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
 			PSTATE_ID_SOC_POWERDN) {
 
+		/* Initialize the runtime console */
+		if (tegra_console_base != (uint64_t)0) {
+			console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
+				TEGRA_CONSOLE_BAUDRATE);
+		}
+
 		/*
 		 * Restore Memory Controller settings as it loses state
 		 * during system suspend.
@@ -203,6 +231,12 @@
 		plat_params = bl31_get_plat_params();
 		tegra_memctrl_tzdram_setup(plat_params->tzdram_base,
 			plat_params->tzdram_size);
+
+		/*
+		 * Set up the TZRAM memory aperture to allow only secure world
+		 * access
+		 */
+		tegra_memctrl_tzram_setup(TEGRA_TZRAM_BASE, TEGRA_TZRAM_SIZE);
 	}
 
 	/*
@@ -318,3 +352,14 @@
 
 	return 0;
 }
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+					     const plat_local_state_t *states,
+					     unsigned int ncpu)
+{
+	return tegra_soc_get_target_pwr_state(lvl, states, ncpu);
+}
diff --git a/plat/nvidia/tegra/common/tegra_sip_calls.c b/plat/nvidia/tegra/common/tegra_sip_calls.c
index 3bcd441..4dd4353 100644
--- a/plat/nvidia/tegra/common/tegra_sip_calls.c
+++ b/plat/nvidia/tegra/common/tegra_sip_calls.c
@@ -42,6 +42,8 @@
  * Common Tegra SiP SMCs
  ******************************************************************************/
 #define TEGRA_SIP_NEW_VIDEOMEM_REGION		0x82000003
+#define TEGRA_SIP_FIQ_NS_ENTRYPOINT		0x82000005
+#define TEGRA_SIP_FIQ_NS_GET_CONTEXT		0x82000006
 
 /*******************************************************************************
  * SoC specific SiP handler
@@ -60,7 +62,7 @@
 }
 
 /*******************************************************************************
- * This function is responsible for handling all SiP calls from the NS world
+ * This function is responsible for handling all SiP calls
  ******************************************************************************/
 uint64_t tegra_sip_handler(uint32_t smc_fid,
 			   uint64_t x1,
@@ -71,14 +73,8 @@
 			   void *handle,
 			   uint64_t flags)
 {
-	uint32_t ns;
 	int err;
 
-	/* Determine which security state this SMC originated from */
-	ns = is_caller_non_secure(flags);
-	if (!ns)
-		SMC_RET1(handle, SMC_UNK);
-
 	/* Check if this is a SoC specific SiP */
 	err = plat_sip_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
 	if (err == 0)
@@ -89,7 +85,6 @@
 	case TEGRA_SIP_NEW_VIDEOMEM_REGION:
 
 		/* clean up the high bits */
-		x1 = (uint32_t)x1;
 		x2 = (uint32_t)x2;
 
 		/*
@@ -114,6 +109,41 @@
 		SMC_RET1(handle, 0);
 		break;
 
+	/*
+	 * The NS world registers the address of its handler to be
+	 * used for processing the FIQ. This is normally used by the
+	 * NS FIQ debugger driver to detect system hangs by programming
+	 * a watchdog timer to fire a FIQ interrupt.
+	 */
+	case TEGRA_SIP_FIQ_NS_ENTRYPOINT:
+
+		if (!x1)
+			SMC_RET1(handle, SMC_UNK);
+
+		/*
+		 * TODO: Check if x1 contains a valid DRAM address
+		 */
+
+		/* store the NS world's entrypoint */
+		tegra_fiq_set_ns_entrypoint(x1);
+
+		SMC_RET1(handle, 0);
+		break;
+
+	/*
+	 * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0
+	 * CPU context when the FIQ interrupt was triggered. This allows the
+	 * NS world to understand the CPU state when the watchdog interrupt
+	 * triggered.
+	 */
+	case TEGRA_SIP_FIQ_NS_GET_CONTEXT:
+
+		/* retrieve context registers when FIQ triggered */
+		tegra_fiq_get_intr_context();
+
+		SMC_RET0(handle);
+		break;
+
 	default:
 		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
 		break;
diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v2.h b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
new file mode 100644
index 0000000..9623e25
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEMCTRLV2_H__
+#define __MEMCTRLV2_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * StreamID to indicate no SMMU translations (requests to be steered on the
+ * SMMU bypass path)
+ ******************************************************************************/
+#define MC_STREAM_ID_MAX			0x7F
+
+/*******************************************************************************
+ * Stream ID Override Config registers
+ ******************************************************************************/
+#define MC_STREAMID_OVERRIDE_CFG_PTCR		0x0
+#define MC_STREAMID_OVERRIDE_CFG_AFIR		0x70
+#define MC_STREAMID_OVERRIDE_CFG_HDAR		0xA8
+#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR	0xB0
+#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD	0xE0
+#define MC_STREAMID_OVERRIDE_CFG_SATAR		0xF8
+#define MC_STREAMID_OVERRIDE_CFG_MPCORER	0x138
+#define MC_STREAMID_OVERRIDE_CFG_NVENCSWR	0x158
+#define MC_STREAMID_OVERRIDE_CFG_AFIW		0x188
+#define MC_STREAMID_OVERRIDE_CFG_SATAW		0x1E8
+#define MC_STREAMID_OVERRIDE_CFG_MPCOREW	0x1C8
+#define MC_STREAMID_OVERRIDE_CFG_SATAW		0x1E8
+#define MC_STREAMID_OVERRIDE_CFG_HDAW		0x1A8
+#define MC_STREAMID_OVERRIDE_CFG_ISPRA		0x220
+#define MC_STREAMID_OVERRIDE_CFG_ISPWA		0x230
+#define MC_STREAMID_OVERRIDE_CFG_ISPWB		0x238
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR	0x250
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW	0x258
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR	0x260
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW	0x268
+#define MC_STREAMID_OVERRIDE_CFG_TSECSRD	0x2A0
+#define MC_STREAMID_OVERRIDE_CFG_TSECSWR	0x2A8
+#define MC_STREAMID_OVERRIDE_CFG_GPUSRD		0x2C0
+#define MC_STREAMID_OVERRIDE_CFG_GPUSWR		0x2C8
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRA	0x300
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAA	0x308
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCR		0x310
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAB	0x318
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWA	0x320
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAA	0x328
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCW		0x330
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAB	0x338
+#define MC_STREAMID_OVERRIDE_CFG_VICSRD		0x360
+#define MC_STREAMID_OVERRIDE_CFG_VICSWR		0x368
+#define MC_STREAMID_OVERRIDE_CFG_VIW		0x390
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD	0x3C0
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSWR	0x3C8
+#define MC_STREAMID_OVERRIDE_CFG_APER		0x3D0
+#define MC_STREAMID_OVERRIDE_CFG_APEW		0x3D8
+#define MC_STREAMID_OVERRIDE_CFG_NVJPGSRD	0x3F0
+#define MC_STREAMID_OVERRIDE_CFG_NVJPGSWR	0x3F8
+#define MC_STREAMID_OVERRIDE_CFG_SESRD		0x400
+#define MC_STREAMID_OVERRIDE_CFG_SESWR		0x408
+#define MC_STREAMID_OVERRIDE_CFG_ETRR		0x420
+#define MC_STREAMID_OVERRIDE_CFG_ETRW		0x428
+#define MC_STREAMID_OVERRIDE_CFG_TSECSRDB	0x430
+#define MC_STREAMID_OVERRIDE_CFG_TSECSWRB	0x438
+#define MC_STREAMID_OVERRIDE_CFG_GPUSRD2	0x440
+#define MC_STREAMID_OVERRIDE_CFG_GPUSWR2	0x448
+#define MC_STREAMID_OVERRIDE_CFG_AXISR		0x460
+#define MC_STREAMID_OVERRIDE_CFG_AXISW		0x468
+#define MC_STREAMID_OVERRIDE_CFG_EQOSR		0x470
+#define MC_STREAMID_OVERRIDE_CFG_EQOSW		0x478
+#define MC_STREAMID_OVERRIDE_CFG_UFSHCR		0x480
+#define MC_STREAMID_OVERRIDE_CFG_UFSHCW		0x488
+#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR	0x490
+#define MC_STREAMID_OVERRIDE_CFG_BPMPR		0x498
+#define MC_STREAMID_OVERRIDE_CFG_BPMPW		0x4A0
+#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAR	0x4A8
+#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAW	0x4B0
+#define MC_STREAMID_OVERRIDE_CFG_AONR		0x4B8
+#define MC_STREAMID_OVERRIDE_CFG_AONW		0x4C0
+#define MC_STREAMID_OVERRIDE_CFG_AONDMAR	0x4C8
+#define MC_STREAMID_OVERRIDE_CFG_AONDMAW	0x4D0
+#define MC_STREAMID_OVERRIDE_CFG_SCER		0x4D8
+#define MC_STREAMID_OVERRIDE_CFG_SCEW		0x4E0
+#define MC_STREAMID_OVERRIDE_CFG_SCEDMAR	0x4E8
+#define MC_STREAMID_OVERRIDE_CFG_SCEDMAW	0x4F0
+#define MC_STREAMID_OVERRIDE_CFG_APEDMAR	0x4F8
+#define MC_STREAMID_OVERRIDE_CFG_APEDMAW	0x500
+#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1	0x508
+#define MC_STREAMID_OVERRIDE_CFG_VICSRD1	0x510
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1	0x518
+
+/*******************************************************************************
+ * Stream ID Security Config registers
+ ******************************************************************************/
+#define MC_STREAMID_SECURITY_CFG_PTCR		0x4
+#define MC_STREAMID_SECURITY_CFG_AFIR		0x74
+#define MC_STREAMID_SECURITY_CFG_HDAR		0xAC
+#define MC_STREAMID_SECURITY_CFG_HOST1XDMAR	0xB4
+#define MC_STREAMID_SECURITY_CFG_NVENCSRD	0xE4
+#define MC_STREAMID_SECURITY_CFG_SATAR		0xFC
+#define MC_STREAMID_SECURITY_CFG_HDAW		0x1AC
+#define MC_STREAMID_SECURITY_CFG_MPCORER	0x13C
+#define MC_STREAMID_SECURITY_CFG_NVENCSWR	0x15C
+#define MC_STREAMID_SECURITY_CFG_AFIW		0x18C
+#define MC_STREAMID_SECURITY_CFG_MPCOREW	0x1CC
+#define MC_STREAMID_SECURITY_CFG_SATAW		0x1EC
+#define MC_STREAMID_SECURITY_CFG_ISPRA		0x224
+#define MC_STREAMID_SECURITY_CFG_ISPWA		0x234
+#define MC_STREAMID_SECURITY_CFG_ISPWB		0x23C
+#define MC_STREAMID_SECURITY_CFG_XUSB_HOSTR	0x254
+#define MC_STREAMID_SECURITY_CFG_XUSB_HOSTW	0x25C
+#define MC_STREAMID_SECURITY_CFG_XUSB_DEVR	0x264
+#define MC_STREAMID_SECURITY_CFG_XUSB_DEVW	0x26C
+#define MC_STREAMID_SECURITY_CFG_TSECSRD	0x2A4
+#define MC_STREAMID_SECURITY_CFG_TSECSWR	0x2AC
+#define MC_STREAMID_SECURITY_CFG_GPUSRD		0x2C4
+#define MC_STREAMID_SECURITY_CFG_GPUSWR		0x2CC
+#define MC_STREAMID_SECURITY_CFG_SDMMCRA	0x304
+#define MC_STREAMID_SECURITY_CFG_SDMMCRAA	0x30C
+#define MC_STREAMID_SECURITY_CFG_SDMMCR		0x314
+#define MC_STREAMID_SECURITY_CFG_SDMMCRAB	0x31C
+#define MC_STREAMID_SECURITY_CFG_SDMMCWA	0x324
+#define MC_STREAMID_SECURITY_CFG_SDMMCWAA	0x32C
+#define MC_STREAMID_SECURITY_CFG_SDMMCW		0x334
+#define MC_STREAMID_SECURITY_CFG_SDMMCWAB	0x33C
+#define MC_STREAMID_SECURITY_CFG_VICSRD		0x364
+#define MC_STREAMID_SECURITY_CFG_VICSWR		0x36C
+#define MC_STREAMID_SECURITY_CFG_VIW		0x394
+#define MC_STREAMID_SECURITY_CFG_NVDECSRD	0x3C4
+#define MC_STREAMID_SECURITY_CFG_NVDECSWR	0x3CC
+#define MC_STREAMID_SECURITY_CFG_APER		0x3D4
+#define MC_STREAMID_SECURITY_CFG_APEW		0x3DC
+#define MC_STREAMID_SECURITY_CFG_NVJPGSRD	0x3F4
+#define MC_STREAMID_SECURITY_CFG_NVJPGSWR	0x3FC
+#define MC_STREAMID_SECURITY_CFG_SESRD		0x404
+#define MC_STREAMID_SECURITY_CFG_SESWR		0x40C
+#define MC_STREAMID_SECURITY_CFG_ETRR		0x424
+#define MC_STREAMID_SECURITY_CFG_ETRW		0x42C
+#define MC_STREAMID_SECURITY_CFG_TSECSRDB	0x434
+#define MC_STREAMID_SECURITY_CFG_TSECSWRB	0x43C
+#define MC_STREAMID_SECURITY_CFG_GPUSRD2	0x444
+#define MC_STREAMID_SECURITY_CFG_GPUSWR2	0x44C
+#define MC_STREAMID_SECURITY_CFG_AXISR		0x464
+#define MC_STREAMID_SECURITY_CFG_AXISW		0x46C
+#define MC_STREAMID_SECURITY_CFG_EQOSR		0x474
+#define MC_STREAMID_SECURITY_CFG_EQOSW		0x47C
+#define MC_STREAMID_SECURITY_CFG_UFSHCR		0x484
+#define MC_STREAMID_SECURITY_CFG_UFSHCW		0x48C
+#define MC_STREAMID_SECURITY_CFG_NVDISPLAYR	0x494
+#define MC_STREAMID_SECURITY_CFG_BPMPR		0x49C
+#define MC_STREAMID_SECURITY_CFG_BPMPW		0x4A4
+#define MC_STREAMID_SECURITY_CFG_BPMPDMAR	0x4AC
+#define MC_STREAMID_SECURITY_CFG_BPMPDMAW	0x4B4
+#define MC_STREAMID_SECURITY_CFG_AONR		0x4BC
+#define MC_STREAMID_SECURITY_CFG_AONW		0x4C4
+#define MC_STREAMID_SECURITY_CFG_AONDMAR	0x4CC
+#define MC_STREAMID_SECURITY_CFG_AONDMAW	0x4D4
+#define MC_STREAMID_SECURITY_CFG_SCER		0x4DC
+#define MC_STREAMID_SECURITY_CFG_SCEW		0x4E4
+#define MC_STREAMID_SECURITY_CFG_SCEDMAR	0x4EC
+#define MC_STREAMID_SECURITY_CFG_SCEDMAW	0x4F4
+#define MC_STREAMID_SECURITY_CFG_APEDMAR	0x4FC
+#define MC_STREAMID_SECURITY_CFG_APEDMAW	0x504
+#define MC_STREAMID_SECURITY_CFG_NVDISPLAYR1	0x50C
+#define MC_STREAMID_SECURITY_CFG_VICSRD1	0x514
+#define MC_STREAMID_SECURITY_CFG_NVDECSRD1	0x51C
+
+/*******************************************************************************
+ * Memory Controller SMMU Bypass config register
+ ******************************************************************************/
+#define MC_SMMU_BYPASS_CONFIG			0x1820
+#define MC_SMMU_BYPASS_CTRL_MASK		0x3
+#define MC_SMMU_BYPASS_CTRL_SHIFT		0
+#define MC_SMMU_CTRL_TBU_BYPASS_ALL		(0 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_RSVD			(1 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID	(2 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_BYPASS_NONE		(3 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT	(1 << 31)
+#define MC_SMMU_BYPASS_CONFIG_SETTINGS		(MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \
+						 MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID)
+
+/*******************************************************************************
+ * Memory Controller transaction override config registers
+ ******************************************************************************/
+#define MC_TXN_OVERRIDE_CONFIG_HDAR		0x10a8
+#define MC_TXN_OVERRIDE_CONFIG_BPMPW		0x14a0
+#define MC_TXN_OVERRIDE_CONFIG_PTCR		0x1000
+#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR	0x1490
+#define MC_TXN_OVERRIDE_CONFIG_EQOSW		0x1478
+#define MC_TXN_OVERRIDE_CONFIG_NVJPGSWR		0x13f8
+#define MC_TXN_OVERRIDE_CONFIG_ISPRA		0x1220
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAA		0x1328
+#define MC_TXN_OVERRIDE_CONFIG_VICSRD		0x1360
+#define MC_TXN_OVERRIDE_CONFIG_MPCOREW		0x11c8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSRD		0x12c0
+#define MC_TXN_OVERRIDE_CONFIG_AXISR		0x1460
+#define MC_TXN_OVERRIDE_CONFIG_SCEDMAW		0x14f0
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCW		0x1330
+#define MC_TXN_OVERRIDE_CONFIG_EQOSR		0x1470
+#define MC_TXN_OVERRIDE_CONFIG_APEDMAR		0x14f8
+#define MC_TXN_OVERRIDE_CONFIG_NVENCSRD		0x10e0
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAB		0x1318
+#define MC_TXN_OVERRIDE_CONFIG_VICSRD1		0x1510
+#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAR		0x14a8
+#define MC_TXN_OVERRIDE_CONFIG_VIW		0x1390
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAA		0x1308
+#define MC_TXN_OVERRIDE_CONFIG_AXISW		0x1468
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVR	0x1260
+#define MC_TXN_OVERRIDE_CONFIG_UFSHCR		0x1480
+#define MC_TXN_OVERRIDE_CONFIG_TSECSWR		0x12a8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSWR		0x12c8
+#define MC_TXN_OVERRIDE_CONFIG_SATAR		0x10f8
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTW	0x1258
+#define MC_TXN_OVERRIDE_CONFIG_TSECSWRB		0x1438
+#define MC_TXN_OVERRIDE_CONFIG_GPUSRD2		0x1440
+#define MC_TXN_OVERRIDE_CONFIG_SCEDMAR		0x14e8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSWR2		0x1448
+#define MC_TXN_OVERRIDE_CONFIG_AONDMAW		0x14d0
+#define MC_TXN_OVERRIDE_CONFIG_APEDMAW		0x1500
+#define MC_TXN_OVERRIDE_CONFIG_AONW		0x14c0
+#define MC_TXN_OVERRIDE_CONFIG_HOST1XDMAR	0x10b0
+#define MC_TXN_OVERRIDE_CONFIG_ETRR		0x1420
+#define MC_TXN_OVERRIDE_CONFIG_SESWR		0x1408
+#define MC_TXN_OVERRIDE_CONFIG_NVJPGSRD		0x13f0
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD		0x13c0
+#define MC_TXN_OVERRIDE_CONFIG_TSECSRDB		0x1430
+#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAW		0x14b0
+#define MC_TXN_OVERRIDE_CONFIG_APER		0x13d0
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD1	0x1518
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTR	0x1250
+#define MC_TXN_OVERRIDE_CONFIG_ISPWA		0x1230
+#define MC_TXN_OVERRIDE_CONFIG_SESRD		0x1400
+#define MC_TXN_OVERRIDE_CONFIG_SCER		0x14d8
+#define MC_TXN_OVERRIDE_CONFIG_AONR		0x14b8
+#define MC_TXN_OVERRIDE_CONFIG_MPCORER		0x1138
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWA		0x1320
+#define MC_TXN_OVERRIDE_CONFIG_HDAW		0x11a8
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSWR		0x13c8
+#define MC_TXN_OVERRIDE_CONFIG_UFSHCW		0x1488
+#define MC_TXN_OVERRIDE_CONFIG_AONDMAR		0x14c8
+#define MC_TXN_OVERRIDE_CONFIG_SATAW		0x11e8
+#define MC_TXN_OVERRIDE_CONFIG_ETRW		0x1428
+#define MC_TXN_OVERRIDE_CONFIG_VICSWR		0x1368
+#define MC_TXN_OVERRIDE_CONFIG_NVENCSWR		0x1158
+#define MC_TXN_OVERRIDE_CONFIG_AFIR		0x1070
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAB		0x1338
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRA		0x1300
+#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR1	0x1508
+#define MC_TXN_OVERRIDE_CONFIG_ISPWB		0x1238
+#define MC_TXN_OVERRIDE_CONFIG_BPMPR		0x1498
+#define MC_TXN_OVERRIDE_CONFIG_APEW		0x13d8
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCR		0x1310
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVW	0x1268
+#define MC_TXN_OVERRIDE_CONFIG_TSECSRD		0x12a0
+#define MC_TXN_OVERRIDE_CONFIG_AFIW		0x1188
+#define MC_TXN_OVERRIDE_CONFIG_SCEW		0x14e0
+
+/*******************************************************************************
+ * Non-SO_DEV transactions override values for CGID_TAG bitfield for the
+ * MC_TXN_OVERRIDE_CONFIG_{module} registers
+ ******************************************************************************/
+#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT	0
+#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID	1
+#define MC_TXN_OVERRIDE_CGID_TAG_ZERO		2
+#define MC_TXN_OVERRIDE_CGID_TAG_ADR		3
+#define MC_TXN_OVERRIDE_CGID_TAG_MASK		3
+
+/*******************************************************************************
+ * Structure to hold the transaction override settings to use to override
+ * client inputs
+ ******************************************************************************/
+typedef struct mc_txn_override_cfg {
+	uint32_t offset;
+	uint8_t cgid_tag;
+} mc_txn_override_cfg_t;
+
+#define mc_make_txn_override_cfg(off, val) \
+	{ \
+		.offset = MC_TXN_OVERRIDE_CONFIG_ ## off, \
+		.cgid_tag = MC_TXN_OVERRIDE_ ## val \
+	}
+
+/*******************************************************************************
+ * Structure to hold the Stream ID to use to override client inputs
+ ******************************************************************************/
+typedef struct mc_streamid_override_cfg {
+	uint32_t offset;
+	uint8_t stream_id;
+} mc_streamid_override_cfg_t;
+
+/*******************************************************************************
+ * Structure to hold the Stream ID Security Configuration settings
+ ******************************************************************************/
+typedef struct mc_streamid_security_cfg {
+	char *name;
+	uint32_t offset;
+	int override_enable;
+	int override_client_inputs;
+	int override_client_ns_flag;
+} mc_streamid_security_cfg_t;
+
+#define OVERRIDE_DISABLE			1
+#define OVERRIDE_ENABLE				0
+#define CLIENT_FLAG_SECURE			0
+#define CLIENT_FLAG_NON_SECURE			1
+#define CLIENT_INPUTS_OVERRIDE			1
+#define CLIENT_INPUTS_NO_OVERRIDE		0
+
+#define mc_make_sec_cfg(off, ns, ovrrd, access) \
+		{ \
+			.name = # off, \
+			.offset = MC_STREAMID_SECURITY_CFG_ ## off, \
+			.override_client_ns_flag = CLIENT_FLAG_ ## ns, \
+			.override_client_inputs = CLIENT_INPUTS_ ## ovrrd, \
+			.override_enable = OVERRIDE_ ## access \
+		}
+
+/*******************************************************************************
+ * TZDRAM carveout configuration registers
+ ******************************************************************************/
+#define MC_SECURITY_CFG0_0			0x70
+#define MC_SECURITY_CFG1_0			0x74
+#define MC_SECURITY_CFG3_0			0x9BC
+
+/*******************************************************************************
+ * Video Memory carveout configuration registers
+ ******************************************************************************/
+#define MC_VIDEO_PROTECT_BASE_HI		0x978
+#define MC_VIDEO_PROTECT_BASE_LO		0x648
+#define MC_VIDEO_PROTECT_SIZE_MB		0x64c
+
+/*******************************************************************************
+ * TZRAM carveout configuration registers
+ ******************************************************************************/
+#define MC_TZRAM_BASE				0x1850
+#define MC_TZRAM_END				0x1854
+#define MC_TZRAM_HI_ADDR_BITS			0x1588
+ #define TZRAM_ADDR_HI_BITS_MASK		0x3
+ #define TZRAM_END_HI_BITS_SHIFT		8
+#define MC_TZRAM_REG_CTRL			0x185c
+ #define DISABLE_TZRAM_ACCESS			1
+
+static inline uint32_t tegra_mc_read_32(uint32_t off)
+{
+	return mmio_read_32(TEGRA_MC_BASE + off);
+}
+
+static inline void tegra_mc_write_32(uint32_t off, uint32_t val)
+{
+	mmio_write_32(TEGRA_MC_BASE + off, val);
+}
+
+static inline uint32_t tegra_mc_streamid_read_32(uint32_t off)
+{
+	return mmio_read_32(TEGRA_MC_STREAMID_BASE + off);
+}
+
+static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val)
+{
+	mmio_write_32(TEGRA_MC_STREAMID_BASE + off, val);
+}
+
+#endif /* __MEMCTRLV2_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/smmu.h b/plat/nvidia/tegra/include/drivers/smmu.h
new file mode 100644
index 0000000..bb08a55
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/smmu.h
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SMMU_H
+#define __SMMU_H
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * SMMU Register constants
+ ******************************************************************************/
+#define SMMU_CBn_SCTLR				(0x0)
+#define SMMU_CBn_SCTLR_STAGE2			(0x0)
+#define SMMU_CBn_ACTLR				(0x4)
+#define SMMU_CBn_RESUME				(0x8)
+#define SMMU_CBn_TCR2				(0x10)
+#define SMMU_CBn_TTBR0_LO			(0x20)
+#define SMMU_CBn_TTBR0_HI			(0x24)
+#define SMMU_CBn_TTBR1_LO			(0x28)
+#define SMMU_CBn_TTBR1_HI			(0x2c)
+#define SMMU_CBn_TCR_LPAE			(0x30)
+#define SMMU_CBn_TCR				(0x30)
+#define SMMU_CBn_TCR_EAE_1			(0x30)
+#define SMMU_CBn_TCR				(0x30)
+#define SMMU_CBn_CONTEXTIDR			(0x34)
+#define SMMU_CBn_CONTEXTIDR_EAE_1		(0x34)
+#define SMMU_CBn_PRRR_MAIR0			(0x38)
+#define SMMU_CBn_NMRR_MAIR1			(0x3c)
+#define SMMU_CBn_SMMU_CBn_PAR			(0x50)
+#define SMMU_CBn_SMMU_CBn_PAR0			(0x50)
+#define SMMU_CBn_SMMU_CBn_PAR1			(0x54)
+/*      SMMU_CBn_SMMU_CBn_PAR0_Fault		(0x50) */
+/*      SMMU_CBn_SMMU_CBn_PAR0_Fault		(0x54) */
+#define SMMU_CBn_FSR				(0x58)
+#define SMMU_CBn_FSRRESTORE			(0x5c)
+#define SMMU_CBn_FAR_LO				(0x60)
+#define SMMU_CBn_FAR_HI				(0x64)
+#define SMMU_CBn_FSYNR0				(0x68)
+#define SMMU_CBn_IPAFAR_LO			(0x70)
+#define SMMU_CBn_IPAFAR_HI			(0x74)
+#define SMMU_CBn_TLBIVA_LO			(0x600)
+#define SMMU_CBn_TLBIVA_HI			(0x604)
+#define SMMU_CBn_TLBIVA_AARCH_32		(0x600)
+#define SMMU_CBn_TLBIVAA_LO			(0x608)
+#define SMMU_CBn_TLBIVAA_HI			(0x60c)
+#define SMMU_CBn_TLBIVAA_AARCH_32		(0x608)
+#define SMMU_CBn_TLBIASID			(0x610)
+#define SMMU_CBn_TLBIALL			(0x618)
+#define SMMU_CBn_TLBIVAL_LO			(0x620)
+#define SMMU_CBn_TLBIVAL_HI			(0x624)
+#define SMMU_CBn_TLBIVAL_AARCH_32		(0x618)
+#define SMMU_CBn_TLBIVAAL_LO			(0x628)
+#define SMMU_CBn_TLBIVAAL_HI			(0x62c)
+#define SMMU_CBn_TLBIVAAL_AARCH_32		(0x628)
+#define SMMU_CBn_TLBIIPAS2_LO			(0x630)
+#define SMMU_CBn_TLBIIPAS2_HI			(0x634)
+#define SMMU_CBn_TLBIIPAS2L_LO			(0x638)
+#define SMMU_CBn_TLBIIPAS2L_HI			(0x63c)
+#define SMMU_CBn_TLBSYNC			(0x7f0)
+#define SMMU_CBn_TLBSTATUS			(0x7f4)
+#define SMMU_CBn_ATSR				(0x800)
+#define SMMU_CBn_PMEVCNTR0			(0xe00)
+#define SMMU_CBn_PMEVCNTR1			(0xe04)
+#define SMMU_CBn_PMEVCNTR2			(0xe08)
+#define SMMU_CBn_PMEVCNTR3			(0xe0c)
+#define SMMU_CBn_PMEVTYPER0			(0xe80)
+#define SMMU_CBn_PMEVTYPER1			(0xe84)
+#define SMMU_CBn_PMEVTYPER2			(0xe88)
+#define SMMU_CBn_PMEVTYPER3			(0xe8c)
+#define SMMU_CBn_PMCFGR				(0xf00)
+#define SMMU_CBn_PMCR				(0xf04)
+#define SMMU_CBn_PMCEID				(0xf20)
+#define SMMU_CBn_PMCNTENSE			(0xf40)
+#define SMMU_CBn_PMCNTENCLR			(0xf44)
+#define SMMU_CBn_PMCNTENSET			(0xf48)
+#define SMMU_CBn_PMINTENCLR			(0xf4c)
+#define SMMU_CBn_PMOVSCLR			(0xf50)
+#define SMMU_CBn_PMOVSSET			(0xf58)
+#define SMMU_CBn_PMAUTHSTATUS			(0xfb8)
+#define SMMU_GNSR0_CR0				(0x0)
+#define SMMU_GNSR0_CR2				(0x8)
+#define SMMU_GNSR0_ACR				(0x10)
+#define SMMU_GNSR0_IDR0				(0x20)
+#define SMMU_GNSR0_IDR1				(0x24)
+#define SMMU_GNSR0_IDR2				(0x28)
+#define SMMU_GNSR0_IDR7				(0x3c)
+#define SMMU_GNSR0_GFAR_LO			(0x40)
+#define SMMU_GNSR0_GFAR_HI			(0x44)
+#define SMMU_GNSR0_GFSR				(0x48)
+#define SMMU_GNSR0_GFSRRESTORE			(0x4c)
+#define SMMU_GNSR0_GFSYNR0			(0x50)
+#define SMMU_GNSR0_GFSYNR1			(0x54)
+#define SMMU_GNSR0_GFSYNR1_v2			(0x54)
+#define SMMU_GNSR0_TLBIVMID			(0x64)
+#define SMMU_GNSR0_TLBIALLNSNH			(0x68)
+#define SMMU_GNSR0_TLBIALLH			(0x6c)
+#define SMMU_GNSR0_TLBGSYNC			(0x70)
+#define SMMU_GNSR0_TLBGSTATUS			(0x74)
+#define SMMU_GNSR0_TLBIVAH_LO			(0x78)
+#define SMMU_GNSR0_TLBIVALH64_LO		(0xb0)
+#define SMMU_GNSR0_TLBIVALH64_HI		(0xb4)
+#define SMMU_GNSR0_TLBIVMIDS1			(0xb8)
+#define SMMU_GNSR0_TLBIVAH64_LO			(0xc0)
+#define SMMU_GNSR0_TLBIVAH64_HI			(0xc4)
+#define SMMU_GNSR0_SMR0				(0x800)
+#define SMMU_GNSR0_SMRn				(0x800)
+#define SMMU_GNSR0_SMR1				(0x804)
+#define SMMU_GNSR0_SMR2				(0x808)
+#define SMMU_GNSR0_SMR3				(0x80c)
+#define SMMU_GNSR0_SMR4				(0x810)
+#define SMMU_GNSR0_SMR5				(0x814)
+#define SMMU_GNSR0_SMR6				(0x818)
+#define SMMU_GNSR0_SMR7				(0x81c)
+#define SMMU_GNSR0_SMR8				(0x820)
+#define SMMU_GNSR0_SMR9				(0x824)
+#define SMMU_GNSR0_SMR10			(0x828)
+#define SMMU_GNSR0_SMR11			(0x82c)
+#define SMMU_GNSR0_SMR12			(0x830)
+#define SMMU_GNSR0_SMR13			(0x834)
+#define SMMU_GNSR0_SMR14			(0x838)
+#define SMMU_GNSR0_SMR15			(0x83c)
+#define SMMU_GNSR0_SMR16			(0x840)
+#define SMMU_GNSR0_SMR17			(0x844)
+#define SMMU_GNSR0_SMR18			(0x848)
+#define SMMU_GNSR0_SMR19			(0x84c)
+#define SMMU_GNSR0_SMR20			(0x850)
+#define SMMU_GNSR0_SMR21			(0x854)
+#define SMMU_GNSR0_SMR22			(0x858)
+#define SMMU_GNSR0_SMR23			(0x85c)
+#define SMMU_GNSR0_SMR24			(0x860)
+#define SMMU_GNSR0_SMR25			(0x864)
+#define SMMU_GNSR0_SMR26			(0x868)
+#define SMMU_GNSR0_SMR27			(0x86c)
+#define SMMU_GNSR0_SMR28			(0x870)
+#define SMMU_GNSR0_SMR29			(0x874)
+#define SMMU_GNSR0_SMR30			(0x878)
+#define SMMU_GNSR0_SMR31			(0x87c)
+#define SMMU_GNSR0_SMR32			(0x880)
+#define SMMU_GNSR0_SMR33			(0x884)
+#define SMMU_GNSR0_SMR34			(0x888)
+#define SMMU_GNSR0_SMR35			(0x88c)
+#define SMMU_GNSR0_SMR36			(0x890)
+#define SMMU_GNSR0_SMR37			(0x894)
+#define SMMU_GNSR0_SMR38			(0x898)
+#define SMMU_GNSR0_SMR39			(0x89c)
+#define SMMU_GNSR0_SMR40			(0x8a0)
+#define SMMU_GNSR0_SMR41			(0x8a4)
+#define SMMU_GNSR0_SMR42			(0x8a8)
+#define SMMU_GNSR0_SMR43			(0x8ac)
+#define SMMU_GNSR0_SMR44			(0x8b0)
+#define SMMU_GNSR0_SMR45			(0x8b4)
+#define SMMU_GNSR0_SMR46			(0x8b8)
+#define SMMU_GNSR0_SMR47			(0x8bc)
+#define SMMU_GNSR0_SMR48			(0x8c0)
+#define SMMU_GNSR0_SMR49			(0x8c4)
+#define SMMU_GNSR0_SMR50			(0x8c8)
+#define SMMU_GNSR0_SMR51			(0x8cc)
+#define SMMU_GNSR0_SMR52			(0x8d0)
+#define SMMU_GNSR0_SMR53			(0x8d4)
+#define SMMU_GNSR0_SMR54			(0x8d8)
+#define SMMU_GNSR0_SMR55			(0x8dc)
+#define SMMU_GNSR0_SMR56			(0x8e0)
+#define SMMU_GNSR0_SMR57			(0x8e4)
+#define SMMU_GNSR0_SMR58			(0x8e8)
+#define SMMU_GNSR0_SMR59			(0x8ec)
+#define SMMU_GNSR0_SMR60			(0x8f0)
+#define SMMU_GNSR0_SMR61			(0x8f4)
+#define SMMU_GNSR0_SMR62			(0x8f8)
+#define SMMU_GNSR0_SMR63			(0x8fc)
+#define SMMU_GNSR0_SMR64			(0x900)
+#define SMMU_GNSR0_SMR65			(0x904)
+#define SMMU_GNSR0_SMR66			(0x908)
+#define SMMU_GNSR0_SMR67			(0x90c)
+#define SMMU_GNSR0_SMR68			(0x910)
+#define SMMU_GNSR0_SMR69			(0x914)
+#define SMMU_GNSR0_SMR70			(0x918)
+#define SMMU_GNSR0_SMR71			(0x91c)
+#define SMMU_GNSR0_SMR72			(0x920)
+#define SMMU_GNSR0_SMR73			(0x924)
+#define SMMU_GNSR0_SMR74			(0x928)
+#define SMMU_GNSR0_SMR75			(0x92c)
+#define SMMU_GNSR0_SMR76			(0x930)
+#define SMMU_GNSR0_SMR77			(0x934)
+#define SMMU_GNSR0_SMR78			(0x938)
+#define SMMU_GNSR0_SMR79			(0x93c)
+#define SMMU_GNSR0_SMR80			(0x940)
+#define SMMU_GNSR0_SMR81			(0x944)
+#define SMMU_GNSR0_SMR82			(0x948)
+#define SMMU_GNSR0_SMR83			(0x94c)
+#define SMMU_GNSR0_SMR84			(0x950)
+#define SMMU_GNSR0_SMR85			(0x954)
+#define SMMU_GNSR0_SMR86			(0x958)
+#define SMMU_GNSR0_SMR87			(0x95c)
+#define SMMU_GNSR0_SMR88			(0x960)
+#define SMMU_GNSR0_SMR89			(0x964)
+#define SMMU_GNSR0_SMR90			(0x968)
+#define SMMU_GNSR0_SMR91			(0x96c)
+#define SMMU_GNSR0_SMR92			(0x970)
+#define SMMU_GNSR0_SMR93			(0x974)
+#define SMMU_GNSR0_SMR94			(0x978)
+#define SMMU_GNSR0_SMR95			(0x97c)
+#define SMMU_GNSR0_SMR96			(0x980)
+#define SMMU_GNSR0_SMR97			(0x984)
+#define SMMU_GNSR0_SMR98			(0x988)
+#define SMMU_GNSR0_SMR99			(0x98c)
+#define SMMU_GNSR0_SMR100			(0x990)
+#define SMMU_GNSR0_SMR101			(0x994)
+#define SMMU_GNSR0_SMR102			(0x998)
+#define SMMU_GNSR0_SMR103			(0x99c)
+#define SMMU_GNSR0_SMR104			(0x9a0)
+#define SMMU_GNSR0_SMR105			(0x9a4)
+#define SMMU_GNSR0_SMR106			(0x9a8)
+#define SMMU_GNSR0_SMR107			(0x9ac)
+#define SMMU_GNSR0_SMR108			(0x9b0)
+#define SMMU_GNSR0_SMR109			(0x9b4)
+#define SMMU_GNSR0_SMR110			(0x9b8)
+#define SMMU_GNSR0_SMR111			(0x9bc)
+#define SMMU_GNSR0_SMR112			(0x9c0)
+#define SMMU_GNSR0_SMR113			(0x9c4)
+#define SMMU_GNSR0_SMR114			(0x9c8)
+#define SMMU_GNSR0_SMR115			(0x9cc)
+#define SMMU_GNSR0_SMR116			(0x9d0)
+#define SMMU_GNSR0_SMR117			(0x9d4)
+#define SMMU_GNSR0_SMR118			(0x9d8)
+#define SMMU_GNSR0_SMR119			(0x9dc)
+#define SMMU_GNSR0_SMR120			(0x9e0)
+#define SMMU_GNSR0_SMR121			(0x9e4)
+#define SMMU_GNSR0_SMR122			(0x9e8)
+#define SMMU_GNSR0_SMR123			(0x9ec)
+#define SMMU_GNSR0_SMR124			(0x9f0)
+#define SMMU_GNSR0_SMR125			(0x9f4)
+#define SMMU_GNSR0_SMR126			(0x9f8)
+#define SMMU_GNSR0_SMR127			(0x9fc)
+#define SMMU_GNSR0_S2CR0			(0xc00)
+#define SMMU_GNSR0_S2CRn			(0xc00)
+#define SMMU_GNSR0_S2CRn			(0xc00)
+#define SMMU_GNSR0_S2CR1			(0xc04)
+#define SMMU_GNSR0_S2CR2			(0xc08)
+#define SMMU_GNSR0_S2CR3			(0xc0c)
+#define SMMU_GNSR0_S2CR4			(0xc10)
+#define SMMU_GNSR0_S2CR5			(0xc14)
+#define SMMU_GNSR0_S2CR6			(0xc18)
+#define SMMU_GNSR0_S2CR7			(0xc1c)
+#define SMMU_GNSR0_S2CR8			(0xc20)
+#define SMMU_GNSR0_S2CR9			(0xc24)
+#define SMMU_GNSR0_S2CR10			(0xc28)
+#define SMMU_GNSR0_S2CR11			(0xc2c)
+#define SMMU_GNSR0_S2CR12			(0xc30)
+#define SMMU_GNSR0_S2CR13			(0xc34)
+#define SMMU_GNSR0_S2CR14			(0xc38)
+#define SMMU_GNSR0_S2CR15			(0xc3c)
+#define SMMU_GNSR0_S2CR16			(0xc40)
+#define SMMU_GNSR0_S2CR17			(0xc44)
+#define SMMU_GNSR0_S2CR18			(0xc48)
+#define SMMU_GNSR0_S2CR19			(0xc4c)
+#define SMMU_GNSR0_S2CR20			(0xc50)
+#define SMMU_GNSR0_S2CR21			(0xc54)
+#define SMMU_GNSR0_S2CR22			(0xc58)
+#define SMMU_GNSR0_S2CR23			(0xc5c)
+#define SMMU_GNSR0_S2CR24			(0xc60)
+#define SMMU_GNSR0_S2CR25			(0xc64)
+#define SMMU_GNSR0_S2CR26			(0xc68)
+#define SMMU_GNSR0_S2CR27			(0xc6c)
+#define SMMU_GNSR0_S2CR28			(0xc70)
+#define SMMU_GNSR0_S2CR29			(0xc74)
+#define SMMU_GNSR0_S2CR30			(0xc78)
+#define SMMU_GNSR0_S2CR31			(0xc7c)
+#define SMMU_GNSR0_S2CR32			(0xc80)
+#define SMMU_GNSR0_S2CR33			(0xc84)
+#define SMMU_GNSR0_S2CR34			(0xc88)
+#define SMMU_GNSR0_S2CR35			(0xc8c)
+#define SMMU_GNSR0_S2CR36			(0xc90)
+#define SMMU_GNSR0_S2CR37			(0xc94)
+#define SMMU_GNSR0_S2CR38			(0xc98)
+#define SMMU_GNSR0_S2CR39			(0xc9c)
+#define SMMU_GNSR0_S2CR40			(0xca0)
+#define SMMU_GNSR0_S2CR41			(0xca4)
+#define SMMU_GNSR0_S2CR42			(0xca8)
+#define SMMU_GNSR0_S2CR43			(0xcac)
+#define SMMU_GNSR0_S2CR44			(0xcb0)
+#define SMMU_GNSR0_S2CR45			(0xcb4)
+#define SMMU_GNSR0_S2CR46			(0xcb8)
+#define SMMU_GNSR0_S2CR47			(0xcbc)
+#define SMMU_GNSR0_S2CR48			(0xcc0)
+#define SMMU_GNSR0_S2CR49			(0xcc4)
+#define SMMU_GNSR0_S2CR50			(0xcc8)
+#define SMMU_GNSR0_S2CR51			(0xccc)
+#define SMMU_GNSR0_S2CR52			(0xcd0)
+#define SMMU_GNSR0_S2CR53			(0xcd4)
+#define SMMU_GNSR0_S2CR54			(0xcd8)
+#define SMMU_GNSR0_S2CR55			(0xcdc)
+#define SMMU_GNSR0_S2CR56			(0xce0)
+#define SMMU_GNSR0_S2CR57			(0xce4)
+#define SMMU_GNSR0_S2CR58			(0xce8)
+#define SMMU_GNSR0_S2CR59			(0xcec)
+#define SMMU_GNSR0_S2CR60			(0xcf0)
+#define SMMU_GNSR0_S2CR61			(0xcf4)
+#define SMMU_GNSR0_S2CR62			(0xcf8)
+#define SMMU_GNSR0_S2CR63			(0xcfc)
+#define SMMU_GNSR0_S2CR64			(0xd00)
+#define SMMU_GNSR0_S2CR65			(0xd04)
+#define SMMU_GNSR0_S2CR66			(0xd08)
+#define SMMU_GNSR0_S2CR67			(0xd0c)
+#define SMMU_GNSR0_S2CR68			(0xd10)
+#define SMMU_GNSR0_S2CR69			(0xd14)
+#define SMMU_GNSR0_S2CR70			(0xd18)
+#define SMMU_GNSR0_S2CR71			(0xd1c)
+#define SMMU_GNSR0_S2CR72			(0xd20)
+#define SMMU_GNSR0_S2CR73			(0xd24)
+#define SMMU_GNSR0_S2CR74			(0xd28)
+#define SMMU_GNSR0_S2CR75			(0xd2c)
+#define SMMU_GNSR0_S2CR76			(0xd30)
+#define SMMU_GNSR0_S2CR77			(0xd34)
+#define SMMU_GNSR0_S2CR78			(0xd38)
+#define SMMU_GNSR0_S2CR79			(0xd3c)
+#define SMMU_GNSR0_S2CR80			(0xd40)
+#define SMMU_GNSR0_S2CR81			(0xd44)
+#define SMMU_GNSR0_S2CR82			(0xd48)
+#define SMMU_GNSR0_S2CR83			(0xd4c)
+#define SMMU_GNSR0_S2CR84			(0xd50)
+#define SMMU_GNSR0_S2CR85			(0xd54)
+#define SMMU_GNSR0_S2CR86			(0xd58)
+#define SMMU_GNSR0_S2CR87			(0xd5c)
+#define SMMU_GNSR0_S2CR88			(0xd60)
+#define SMMU_GNSR0_S2CR89			(0xd64)
+#define SMMU_GNSR0_S2CR90			(0xd68)
+#define SMMU_GNSR0_S2CR91			(0xd6c)
+#define SMMU_GNSR0_S2CR92			(0xd70)
+#define SMMU_GNSR0_S2CR93			(0xd74)
+#define SMMU_GNSR0_S2CR94			(0xd78)
+#define SMMU_GNSR0_S2CR95			(0xd7c)
+#define SMMU_GNSR0_S2CR96			(0xd80)
+#define SMMU_GNSR0_S2CR97			(0xd84)
+#define SMMU_GNSR0_S2CR98			(0xd88)
+#define SMMU_GNSR0_S2CR99			(0xd8c)
+#define SMMU_GNSR0_S2CR100			(0xd90)
+#define SMMU_GNSR0_S2CR101			(0xd94)
+#define SMMU_GNSR0_S2CR102			(0xd98)
+#define SMMU_GNSR0_S2CR103			(0xd9c)
+#define SMMU_GNSR0_S2CR104			(0xda0)
+#define SMMU_GNSR0_S2CR105			(0xda4)
+#define SMMU_GNSR0_S2CR106			(0xda8)
+#define SMMU_GNSR0_S2CR107			(0xdac)
+#define SMMU_GNSR0_S2CR108			(0xdb0)
+#define SMMU_GNSR0_S2CR109			(0xdb4)
+#define SMMU_GNSR0_S2CR110			(0xdb8)
+#define SMMU_GNSR0_S2CR111			(0xdbc)
+#define SMMU_GNSR0_S2CR112			(0xdc0)
+#define SMMU_GNSR0_S2CR113			(0xdc4)
+#define SMMU_GNSR0_S2CR114			(0xdc8)
+#define SMMU_GNSR0_S2CR115			(0xdcc)
+#define SMMU_GNSR0_S2CR116			(0xdd0)
+#define SMMU_GNSR0_S2CR117			(0xdd4)
+#define SMMU_GNSR0_S2CR118			(0xdd8)
+#define SMMU_GNSR0_S2CR119			(0xddc)
+#define SMMU_GNSR0_S2CR120			(0xde0)
+#define SMMU_GNSR0_S2CR121			(0xde4)
+#define SMMU_GNSR0_S2CR122			(0xde8)
+#define SMMU_GNSR0_S2CR123			(0xdec)
+#define SMMU_GNSR0_S2CR124			(0xdf0)
+#define SMMU_GNSR0_S2CR125			(0xdf4)
+#define SMMU_GNSR0_S2CR126			(0xdf8)
+#define SMMU_GNSR0_S2CR127			(0xdfc)
+#define SMMU_GNSR0_PIDR0			(0xfe0)
+#define SMMU_GNSR0_PIDR1			(0xfe4)
+#define SMMU_GNSR0_PIDR2			(0xfe8)
+#define SMMU_GNSR0_PIDR3			(0xfec)
+#define SMMU_GNSR0_PIDR4			(0xfd0)
+#define SMMU_GNSR0_PIDR5			(0xfd4)
+#define SMMU_GNSR0_PIDR6			(0xfd8)
+#define SMMU_GNSR0_PIDR7			(0xfdc)
+#define SMMU_GNSR0_CIDR0			(0xff0)
+#define SMMU_GNSR0_CIDR1			(0xff4)
+#define SMMU_GNSR0_CIDR2			(0xff8)
+#define SMMU_GNSR0_CIDR3			(0xffc)
+#define SMMU_GNSR1_CBAR0			(0x0)
+#define SMMU_GNSR1_CBARn			(0x0)
+#define SMMU_GNSR1_CBFRSYNRA0			(0x400)
+#define SMMU_GNSR1_CBA2R0			(0x800)
+#define SMMU_GNSR1_CBAR1			(0x4)
+#define SMMU_GNSR1_CBFRSYNRA1			(0x404)
+#define SMMU_GNSR1_CBA2R1			(0x804)
+#define SMMU_GNSR1_CBAR2			(0x8)
+#define SMMU_GNSR1_CBFRSYNRA2			(0x408)
+#define SMMU_GNSR1_CBA2R2			(0x808)
+#define SMMU_GNSR1_CBAR3			(0xc)
+#define SMMU_GNSR1_CBFRSYNRA3			(0x40c)
+#define SMMU_GNSR1_CBA2R3			(0x80c)
+#define SMMU_GNSR1_CBAR4			(0x10)
+#define SMMU_GNSR1_CBFRSYNRA4			(0x410)
+#define SMMU_GNSR1_CBA2R4			(0x810)
+#define SMMU_GNSR1_CBAR5			(0x14)
+#define SMMU_GNSR1_CBFRSYNRA5			(0x414)
+#define SMMU_GNSR1_CBA2R5			(0x814)
+#define SMMU_GNSR1_CBAR6			(0x18)
+#define SMMU_GNSR1_CBFRSYNRA6			(0x418)
+#define SMMU_GNSR1_CBA2R6			(0x818)
+#define SMMU_GNSR1_CBAR7			(0x1c)
+#define SMMU_GNSR1_CBFRSYNRA7			(0x41c)
+#define SMMU_GNSR1_CBA2R7			(0x81c)
+#define SMMU_GNSR1_CBAR8			(0x20)
+#define SMMU_GNSR1_CBFRSYNRA8			(0x420)
+#define SMMU_GNSR1_CBA2R8			(0x820)
+#define SMMU_GNSR1_CBAR9			(0x24)
+#define SMMU_GNSR1_CBFRSYNRA9			(0x424)
+#define SMMU_GNSR1_CBA2R9			(0x824)
+#define SMMU_GNSR1_CBAR10			(0x28)
+#define SMMU_GNSR1_CBFRSYNRA10			(0x428)
+#define SMMU_GNSR1_CBA2R10			(0x828)
+#define SMMU_GNSR1_CBAR11			(0x2c)
+#define SMMU_GNSR1_CBFRSYNRA11			(0x42c)
+#define SMMU_GNSR1_CBA2R11			(0x82c)
+#define SMMU_GNSR1_CBAR12			(0x30)
+#define SMMU_GNSR1_CBFRSYNRA12			(0x430)
+#define SMMU_GNSR1_CBA2R12			(0x830)
+#define SMMU_GNSR1_CBAR13			(0x34)
+#define SMMU_GNSR1_CBFRSYNRA13			(0x434)
+#define SMMU_GNSR1_CBA2R13			(0x834)
+#define SMMU_GNSR1_CBAR14			(0x38)
+#define SMMU_GNSR1_CBFRSYNRA14			(0x438)
+#define SMMU_GNSR1_CBA2R14			(0x838)
+#define SMMU_GNSR1_CBAR15			(0x3c)
+#define SMMU_GNSR1_CBFRSYNRA15			(0x43c)
+#define SMMU_GNSR1_CBA2R15			(0x83c)
+#define SMMU_GNSR1_CBAR16			(0x40)
+#define SMMU_GNSR1_CBFRSYNRA16			(0x440)
+#define SMMU_GNSR1_CBA2R16			(0x840)
+#define SMMU_GNSR1_CBAR17			(0x44)
+#define SMMU_GNSR1_CBFRSYNRA17			(0x444)
+#define SMMU_GNSR1_CBA2R17			(0x844)
+#define SMMU_GNSR1_CBAR18			(0x48)
+#define SMMU_GNSR1_CBFRSYNRA18			(0x448)
+#define SMMU_GNSR1_CBA2R18			(0x848)
+#define SMMU_GNSR1_CBAR19			(0x4c)
+#define SMMU_GNSR1_CBFRSYNRA19			(0x44c)
+#define SMMU_GNSR1_CBA2R19			(0x84c)
+#define SMMU_GNSR1_CBAR20			(0x50)
+#define SMMU_GNSR1_CBFRSYNRA20			(0x450)
+#define SMMU_GNSR1_CBA2R20			(0x850)
+#define SMMU_GNSR1_CBAR21			(0x54)
+#define SMMU_GNSR1_CBFRSYNRA21			(0x454)
+#define SMMU_GNSR1_CBA2R21			(0x854)
+#define SMMU_GNSR1_CBAR22			(0x58)
+#define SMMU_GNSR1_CBFRSYNRA22			(0x458)
+#define SMMU_GNSR1_CBA2R22			(0x858)
+#define SMMU_GNSR1_CBAR23			(0x5c)
+#define SMMU_GNSR1_CBFRSYNRA23			(0x45c)
+#define SMMU_GNSR1_CBA2R23			(0x85c)
+#define SMMU_GNSR1_CBAR24			(0x60)
+#define SMMU_GNSR1_CBFRSYNRA24			(0x460)
+#define SMMU_GNSR1_CBA2R24			(0x860)
+#define SMMU_GNSR1_CBAR25			(0x64)
+#define SMMU_GNSR1_CBFRSYNRA25			(0x464)
+#define SMMU_GNSR1_CBA2R25			(0x864)
+#define SMMU_GNSR1_CBAR26			(0x68)
+#define SMMU_GNSR1_CBFRSYNRA26			(0x468)
+#define SMMU_GNSR1_CBA2R26			(0x868)
+#define SMMU_GNSR1_CBAR27			(0x6c)
+#define SMMU_GNSR1_CBFRSYNRA27			(0x46c)
+#define SMMU_GNSR1_CBA2R27			(0x86c)
+#define SMMU_GNSR1_CBAR28			(0x70)
+#define SMMU_GNSR1_CBFRSYNRA28			(0x470)
+#define SMMU_GNSR1_CBA2R28			(0x870)
+#define SMMU_GNSR1_CBAR29			(0x74)
+#define SMMU_GNSR1_CBFRSYNRA29			(0x474)
+#define SMMU_GNSR1_CBA2R29			(0x874)
+#define SMMU_GNSR1_CBAR30			(0x78)
+#define SMMU_GNSR1_CBFRSYNRA30			(0x478)
+#define SMMU_GNSR1_CBA2R30			(0x878)
+#define SMMU_GNSR1_CBAR31			(0x7c)
+#define SMMU_GNSR1_CBFRSYNRA31			(0x47c)
+#define SMMU_GNSR1_CBA2R31			(0x87c)
+#define SMMU_GNSR1_CBAR32			(0x80)
+#define SMMU_GNSR1_CBFRSYNRA32			(0x480)
+#define SMMU_GNSR1_CBA2R32			(0x880)
+#define SMMU_GNSR1_CBAR33			(0x84)
+#define SMMU_GNSR1_CBFRSYNRA33			(0x484)
+#define SMMU_GNSR1_CBA2R33			(0x884)
+#define SMMU_GNSR1_CBAR34			(0x88)
+#define SMMU_GNSR1_CBFRSYNRA34			(0x488)
+#define SMMU_GNSR1_CBA2R34			(0x888)
+#define SMMU_GNSR1_CBAR35			(0x8c)
+#define SMMU_GNSR1_CBFRSYNRA35			(0x48c)
+#define SMMU_GNSR1_CBA2R35			(0x88c)
+#define SMMU_GNSR1_CBAR36			(0x90)
+#define SMMU_GNSR1_CBFRSYNRA36			(0x490)
+#define SMMU_GNSR1_CBA2R36			(0x890)
+#define SMMU_GNSR1_CBAR37			(0x94)
+#define SMMU_GNSR1_CBFRSYNRA37			(0x494)
+#define SMMU_GNSR1_CBA2R37			(0x894)
+#define SMMU_GNSR1_CBAR38			(0x98)
+#define SMMU_GNSR1_CBFRSYNRA38			(0x498)
+#define SMMU_GNSR1_CBA2R38			(0x898)
+#define SMMU_GNSR1_CBAR39			(0x9c)
+#define SMMU_GNSR1_CBFRSYNRA39			(0x49c)
+#define SMMU_GNSR1_CBA2R39			(0x89c)
+#define SMMU_GNSR1_CBAR40			(0xa0)
+#define SMMU_GNSR1_CBFRSYNRA40			(0x4a0)
+#define SMMU_GNSR1_CBA2R40			(0x8a0)
+#define SMMU_GNSR1_CBAR41			(0xa4)
+#define SMMU_GNSR1_CBFRSYNRA41			(0x4a4)
+#define SMMU_GNSR1_CBA2R41			(0x8a4)
+#define SMMU_GNSR1_CBAR42			(0xa8)
+#define SMMU_GNSR1_CBFRSYNRA42			(0x4a8)
+#define SMMU_GNSR1_CBA2R42			(0x8a8)
+#define SMMU_GNSR1_CBAR43			(0xac)
+#define SMMU_GNSR1_CBFRSYNRA43			(0x4ac)
+#define SMMU_GNSR1_CBA2R43			(0x8ac)
+#define SMMU_GNSR1_CBAR44			(0xb0)
+#define SMMU_GNSR1_CBFRSYNRA44			(0x4b0)
+#define SMMU_GNSR1_CBA2R44			(0x8b0)
+#define SMMU_GNSR1_CBAR45			(0xb4)
+#define SMMU_GNSR1_CBFRSYNRA45			(0x4b4)
+#define SMMU_GNSR1_CBA2R45			(0x8b4)
+#define SMMU_GNSR1_CBAR46			(0xb8)
+#define SMMU_GNSR1_CBFRSYNRA46			(0x4b8)
+#define SMMU_GNSR1_CBA2R46			(0x8b8)
+#define SMMU_GNSR1_CBAR47			(0xbc)
+#define SMMU_GNSR1_CBFRSYNRA47			(0x4bc)
+#define SMMU_GNSR1_CBA2R47			(0x8bc)
+#define SMMU_GNSR1_CBAR48			(0xc0)
+#define SMMU_GNSR1_CBFRSYNRA48			(0x4c0)
+#define SMMU_GNSR1_CBA2R48			(0x8c0)
+#define SMMU_GNSR1_CBAR49			(0xc4)
+#define SMMU_GNSR1_CBFRSYNRA49			(0x4c4)
+#define SMMU_GNSR1_CBA2R49			(0x8c4)
+#define SMMU_GNSR1_CBAR50			(0xc8)
+#define SMMU_GNSR1_CBFRSYNRA50			(0x4c8)
+#define SMMU_GNSR1_CBA2R50			(0x8c8)
+#define SMMU_GNSR1_CBAR51			(0xcc)
+#define SMMU_GNSR1_CBFRSYNRA51			(0x4cc)
+#define SMMU_GNSR1_CBA2R51			(0x8cc)
+#define SMMU_GNSR1_CBAR52			(0xd0)
+#define SMMU_GNSR1_CBFRSYNRA52			(0x4d0)
+#define SMMU_GNSR1_CBA2R52			(0x8d0)
+#define SMMU_GNSR1_CBAR53			(0xd4)
+#define SMMU_GNSR1_CBFRSYNRA53			(0x4d4)
+#define SMMU_GNSR1_CBA2R53			(0x8d4)
+#define SMMU_GNSR1_CBAR54			(0xd8)
+#define SMMU_GNSR1_CBFRSYNRA54			(0x4d8)
+#define SMMU_GNSR1_CBA2R54			(0x8d8)
+#define SMMU_GNSR1_CBAR55			(0xdc)
+#define SMMU_GNSR1_CBFRSYNRA55			(0x4dc)
+#define SMMU_GNSR1_CBA2R55			(0x8dc)
+#define SMMU_GNSR1_CBAR56			(0xe0)
+#define SMMU_GNSR1_CBFRSYNRA56			(0x4e0)
+#define SMMU_GNSR1_CBA2R56			(0x8e0)
+#define SMMU_GNSR1_CBAR57			(0xe4)
+#define SMMU_GNSR1_CBFRSYNRA57			(0x4e4)
+#define SMMU_GNSR1_CBA2R57			(0x8e4)
+#define SMMU_GNSR1_CBAR58			(0xe8)
+#define SMMU_GNSR1_CBFRSYNRA58			(0x4e8)
+#define SMMU_GNSR1_CBA2R58			(0x8e8)
+#define SMMU_GNSR1_CBAR59			(0xec)
+#define SMMU_GNSR1_CBFRSYNRA59			(0x4ec)
+#define SMMU_GNSR1_CBA2R59			(0x8ec)
+#define SMMU_GNSR1_CBAR60			(0xf0)
+#define SMMU_GNSR1_CBFRSYNRA60			(0x4f0)
+#define SMMU_GNSR1_CBA2R60			(0x8f0)
+#define SMMU_GNSR1_CBAR61			(0xf4)
+#define SMMU_GNSR1_CBFRSYNRA61			(0x4f4)
+#define SMMU_GNSR1_CBA2R61			(0x8f4)
+#define SMMU_GNSR1_CBAR62			(0xf8)
+#define SMMU_GNSR1_CBFRSYNRA62			(0x4f8)
+#define SMMU_GNSR1_CBA2R62			(0x8f8)
+#define SMMU_GNSR1_CBAR63			(0xfc)
+#define SMMU_GNSR1_CBFRSYNRA63			(0x4fc)
+#define SMMU_GNSR1_CBA2R63			(0x8fc)
+
+/*******************************************************************************
+ * SMMU Global Secure Aux. Configuration Register
+ ******************************************************************************/
+#define SMMU_GSR0_SECURE_ACR			0x10
+#define SMMU_GSR0_PGSIZE_SHIFT			16
+#define SMMU_GSR0_PGSIZE_4K			(0 << SMMU_GSR0_PGSIZE_SHIFT)
+#define SMMU_GSR0_PGSIZE_64K			(1 << SMMU_GSR0_PGSIZE_SHIFT)
+
+/*******************************************************************************
+ * SMMU configuration constants
+ ******************************************************************************/
+#define ID1_PAGESIZE				(1 << 31)
+#define ID1_NUMPAGENDXB_SHIFT			28
+#define ID1_NUMPAGENDXB_MASK			7
+#define ID1_NUMS2CB_SHIFT			16
+#define ID1_NUMS2CB_MASK			0xff
+#define ID1_NUMCB_SHIFT				0
+#define ID1_NUMCB_MASK				0xff
+#define PGSHIFT					16
+#define CB_SIZE					0x800000
+
+static inline uint32_t tegra_smmu_read_32(uint32_t off)
+{
+	return mmio_read_32(TEGRA_SMMU_BASE + off);
+}
+
+static inline void tegra_smmu_write_32(uint32_t off, uint32_t val)
+{
+	mmio_write_32(TEGRA_SMMU_BASE + off, val);
+}
+
+void tegra_smmu_init(void);
+void tegra_smmu_save_context(void);
+
+#endif /*__SMMU_H */
diff --git a/plat/nvidia/tegra/include/plat_macros.S b/plat/nvidia/tegra/include/plat_macros.S
index 1afe454..7db6930 100644
--- a/plat/nvidia/tegra/include/plat_macros.S
+++ b/plat/nvidia/tegra/include/plat_macros.S
@@ -52,7 +52,7 @@
  */
 .macro plat_crash_print_regs
 	mov_imm	x16, TEGRA_GICC_BASE
-	cbz	x16, 1f
+
 	/* gicc base address is now in x16 */
 	adr	x6, gicc_regs	/* Load the gicc reg list to x6 */
 	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
@@ -63,6 +63,7 @@
 	bl	str_in_crash_buf_print
 
 	/* Print the GICD_ISPENDR regs */
+	mov_imm	x16, TEGRA_GICD_BASE
 	add	x7, x16, #GICD_ISPENDR
 	adr	x4, gicd_pend_reg
 	bl	asm_print_str
diff --git a/plat/nvidia/tegra/include/platform_def.h b/plat/nvidia/tegra/include/platform_def.h
index ad245e2..4df309d 100644
--- a/plat/nvidia/tegra/include/platform_def.h
+++ b/plat/nvidia/tegra/include/platform_def.h
@@ -77,7 +77,7 @@
 /*******************************************************************************
  * Platform specific page table and MMU setup constants
  ******************************************************************************/
-#define ADDR_SPACE_SIZE			(1ull << 32)
+#define ADDR_SPACE_SIZE			(1ull << 35)
 
 /*******************************************************************************
  * Some data must be aligned on the biggest cache line size in the platform.
diff --git a/plat/nvidia/tegra/include/t132/tegra_def.h b/plat/nvidia/tegra/include/t132/tegra_def.h
index 318f4de..314b700 100644
--- a/plat/nvidia/tegra/include/t132/tegra_def.h
+++ b/plat/nvidia/tegra/include/t132/tegra_def.h
@@ -80,6 +80,12 @@
 #define TEGRA_EVP_BASE			0x6000F000
 
 /*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE			0x70000000
+#define  HARDWARE_REVISION_OFFSET	0x804
+
+/*******************************************************************************
  * Tegra UART controller base addresses
  ******************************************************************************/
 #define TEGRA_UARTA_BASE		0x70006000
diff --git a/plat/nvidia/tegra/include/t186/tegra_def.h b/plat/nvidia/tegra/include/t186/tegra_def.h
new file mode 100644
index 0000000..f3fbb89
--- /dev/null
+++ b/plat/nvidia/tegra/include/t186/tegra_def.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TEGRA_DEF_H__
+#define __TEGRA_DEF_H__
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * These values are used by the PSCI implementation during the `CPU_SUSPEND`
+ * and `SYSTEM_SUSPEND` calls as the `state-id` field in the 'power state'
+ * parameter.
+ ******************************************************************************/
+#define PSTATE_ID_CORE_IDLE		6
+#define PSTATE_ID_CORE_POWERDN		7
+#define PSTATE_ID_SOC_POWERDN		2
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE		1
+#define PLAT_MAX_OFF_STATE		8
+
+/*******************************************************************************
+ * Implementation defined ACTLR_EL3 bit definitions
+ ******************************************************************************/
+#define ACTLR_EL3_L2ACTLR_BIT		(1 << 6)
+#define ACTLR_EL3_L2ECTLR_BIT		(1 << 5)
+#define ACTLR_EL3_L2CTLR_BIT		(1 << 4)
+#define ACTLR_EL3_CPUECTLR_BIT		(1 << 1)
+#define ACTLR_EL3_CPUACTLR_BIT		(1 << 0)
+#define ACTLR_EL3_ENABLE_ALL_ACCESS	(ACTLR_EL3_L2ACTLR_BIT | \
+					 ACTLR_EL3_L2ECTLR_BIT | \
+					 ACTLR_EL3_L2CTLR_BIT | \
+					 ACTLR_EL3_CPUECTLR_BIT | \
+					 ACTLR_EL3_CPUACTLR_BIT)
+
+/*******************************************************************************
+ * Secure IRQ definitions
+ ******************************************************************************/
+#define TEGRA186_TOP_WDT_IRQ		49
+#define TEGRA186_AON_WDT_IRQ		50
+
+#define TEGRA186_SEC_IRQ_TARGET_MASK	0xF3 /* 4 A57 - 2 Denver */
+
+/*******************************************************************************
+ * Tegra Miscellanous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE			0x00100000
+#define  HARDWARE_REVISION_OFFSET	0x4
+#define  HARDWARE_MINOR_REVISION_MASK	0xf0000
+#define  HARDWARE_MINOR_REVISION_SHIFT	0x10
+#define  HARDWARE_REVISION_A01		1
+#define  MISCREG_PFCFG			0x200C
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_STREAMID_BASE		0x02C00000
+#define TEGRA_MC_BASE			0x02C10000
+
+/*******************************************************************************
+ * Tegra UART Controller constants
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE		0x03100000
+#define TEGRA_UARTB_BASE		0x03110000
+#define TEGRA_UARTC_BASE		0x0C280000
+#define TEGRA_UARTD_BASE		0x03130000
+#define TEGRA_UARTE_BASE		0x03140000
+#define TEGRA_UARTF_BASE		0x03150000
+#define TEGRA_UARTG_BASE		0x0C290000
+
+/*******************************************************************************
+ * GICv2 & interrupt handling related constants
+ ******************************************************************************/
+#define TEGRA_GICD_BASE			0x03881000
+#define TEGRA_GICC_BASE			0x03882000
+
+/*******************************************************************************
+ * Security Engine related constants
+ ******************************************************************************/
+#define TEGRA_SE0_BASE			0x03AC0000
+#define  SE_MUTEX_WATCHDOG_NS_LIMIT	0x6C
+#define TEGRA_PKA1_BASE			0x03AD0000
+#define  PKA_MUTEX_WATCHDOG_NS_LIMIT	0x8144
+#define TEGRA_RNG1_BASE			0x03AE0000
+#define  RNG_MUTEX_WATCHDOG_NS_LIMIT	0xFE0
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE		0x05000000
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE		0x0C2E0000
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE			0x0C360000
+
+/*******************************************************************************
+ * Tegra scratch registers constants
+ ******************************************************************************/
+#define TEGRA_SCRATCH_BASE		0x0C390000
+#define  SECURE_SCRATCH_RSV6		0x680
+#define  SECURE_SCRATCH_RSV11_LO	0x6A8
+#define  SECURE_SCRATCH_RSV11_HI	0x6AC
+
+/*******************************************************************************
+ * Tegra Memory Mapped Control Register Access Bus constants
+ ******************************************************************************/
+#define TEGRA_MMCRAB_BASE		0x0E000000
+
+/*******************************************************************************
+ * Tegra SMMU Controller constants
+ ******************************************************************************/
+#define TEGRA_SMMU_BASE			0x12000000
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE		0x30000000
+#define TEGRA_TZRAM_SIZE		0x50000
+
+#endif /* __TEGRA_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/t210/tegra_def.h b/plat/nvidia/tegra/include/t210/tegra_def.h
index ce85427..d24377d 100644
--- a/plat/nvidia/tegra/include/t210/tegra_def.h
+++ b/plat/nvidia/tegra/include/t210/tegra_def.h
@@ -105,6 +105,12 @@
 #define TEGRA_EVP_BASE			0x6000F000
 
 /*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE			0x70000000
+#define  HARDWARE_REVISION_OFFSET	0x804
+
+/*******************************************************************************
  * Tegra UART controller base addresses
  ******************************************************************************/
 #define TEGRA_UARTA_BASE		0x70006000
diff --git a/plat/nvidia/tegra/include/tegra_platform.h b/plat/nvidia/tegra/include/tegra_platform.h
new file mode 100644
index 0000000..a2813a8
--- /dev/null
+++ b/plat/nvidia/tegra/include/tegra_platform.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TEGRA_PLATFORM_H__
+#define __TEGRA_PLATFORM_H__
+
+#include <sys/cdefs.h>
+
+/*
+ * Tegra chip major/minor version
+ */
+uint32_t tegra_get_chipid_major(void);
+uint32_t tegra_get_chipid_minor(void);
+
+/*
+ * Tegra chip identifiers
+ */
+uint8_t tegra_is_t132(void);
+uint8_t tegra_is_t210(void);
+
+/*
+ * Tegra platform identifiers
+ */
+uint8_t tegra_platform_is_silicon(void);
+uint8_t tegra_platform_is_qt(void);
+uint8_t tegra_platform_is_emulation(void);
+uint8_t tegra_platform_is_fpga(void);
+
+#endif /* __TEGRA_PLATFORM_H__ */
diff --git a/plat/nvidia/tegra/include/tegra_private.h b/plat/nvidia/tegra/include/tegra_private.h
index 75416ec..012bfd7 100644
--- a/plat/nvidia/tegra/include/tegra_private.h
+++ b/plat/nvidia/tegra/include/tegra_private.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -42,6 +42,9 @@
 #define TEGRA_DRAM_BASE		0x80000000
 #define TEGRA_DRAM_END		0x27FFFFFFF
 
+/*******************************************************************************
+ * Struct for parameters received from BL2
+ ******************************************************************************/
 typedef struct plat_params_from_bl2 {
 	/* TZ memory size */
 	uint64_t tzdram_size;
@@ -51,6 +54,26 @@
 	int uart_id;
 } plat_params_from_bl2_t;
 
+/*******************************************************************************
+ * Per-CPU struct describing FIQ state to be stored
+ ******************************************************************************/
+typedef struct pcpu_fiq_state {
+	uint64_t elr_el3;
+	uint64_t spsr_el3;
+} pcpu_fiq_state_t;
+
+/*******************************************************************************
+ * Struct describing per-FIQ configuration settings
+ ******************************************************************************/
+typedef struct irq_sec_cfg {
+	/* IRQ number */
+	unsigned int irq;
+	/* Target CPUs servicing this interrupt */
+	unsigned int target_cpus;
+	/* type = INTR_TYPE_S_EL1 or INTR_TYPE_EL3 */
+	uint32_t type;
+} irq_sec_cfg_t;
+
 /* Declarations for plat_psci_handlers.c */
 int32_t tegra_soc_validate_power_state(unsigned int power_state,
 		psci_power_state_t *req_state);
@@ -58,13 +81,21 @@
 /* Declarations for plat_setup.c */
 const mmap_region_t *plat_get_mmio_map(void);
 uint32_t plat_get_console_from_id(int id);
+void plat_gic_setup(void);
+bl31_params_t *plat_get_bl31_params(void);
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void);
 
 /* Declarations for plat_secondary.c */
 void plat_secondary_setup(void);
 int plat_lock_cpu_vectors(void);
 
+/* Declarations for tegra_fiq_glue.c */
+void tegra_fiq_handler_setup(void);
+int tegra_fiq_get_intr_context(void);
+void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint);
+
 /* Declarations for tegra_gic.c */
-void tegra_gic_setup(void);
+void tegra_gic_setup(const irq_sec_cfg_t *irq_sec_ptr, unsigned int num_irqs);
 void tegra_gic_cpuif_deactivate(void);
 
 /* Declarations for tegra_security.c */
@@ -83,6 +114,7 @@
 /* Declarations for tegra_bl31_setup.c */
 plat_params_from_bl2_t *bl31_get_plat_params(void);
 int bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes);
+void plat_early_platform_setup(void);
 
 /* Declarations for tegra_delay_timer.c */
 void tegra_delay_timer_init(void);
diff --git a/plat/nvidia/tegra/platform.mk b/plat/nvidia/tegra/platform.mk
index 756899c..2eeffca 100644
--- a/plat/nvidia/tegra/platform.mk
+++ b/plat/nvidia/tegra/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
diff --git a/plat/nvidia/tegra/soc/t132/plat_setup.c b/plat/nvidia/tegra/soc/t132/plat_setup.c
index 337a2c5..651bd08 100644
--- a/plat/nvidia/tegra/soc/t132/plat_setup.c
+++ b/plat/nvidia/tegra/soc/t132/plat_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -28,8 +28,11 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <xlat_tables.h>
+#include <arch_helpers.h>
+#include <bl_common.h>
 #include <tegra_def.h>
+#include <tegra_private.h>
+#include <xlat_tables.h>
 
 /*******************************************************************************
  * The Tegra power domain tree has a single system level power domain i.e. a
@@ -106,3 +109,11 @@
 
 	return tegra132_uart_addresses[id];
 }
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+	tegra_gic_setup(NULL, 0);
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/mce.h b/plat/nvidia/tegra/soc/t186/drivers/include/mce.h
new file mode 100644
index 0000000..7078b8b
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/include/mce.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MCE_H__
+#define __MCE_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * MCE apertures used by the ARI interface
+ *
+ * Aperture 0 - Cpu0 (ARM Cortex A-57)
+ * Aperture 1 - Cpu1 (ARM Cortex A-57)
+ * Aperture 2 - Cpu2 (ARM Cortex A-57)
+ * Aperture 3 - Cpu3 (ARM Cortex A-57)
+ * Aperture 4 - Cpu4 (Denver15)
+ * Aperture 5 - Cpu5 (Denver15)
+ ******************************************************************************/
+#define MCE_ARI_APERTURE_0_OFFSET	0x0
+#define MCE_ARI_APERTURE_1_OFFSET	0x10000
+#define MCE_ARI_APERTURE_2_OFFSET	0x20000
+#define MCE_ARI_APERTURE_3_OFFSET	0x30000
+#define MCE_ARI_APERTURE_4_OFFSET	0x40000
+#define MCE_ARI_APERTURE_5_OFFSET	0x50000
+#define MCE_ARI_APERTURE_OFFSET_MAX	MCE_APERTURE_5_OFFSET
+
+/* number of apertures */
+#define MCE_ARI_APERTURES_MAX		6
+
+/* each ARI aperture is 64KB */
+#define MCE_ARI_APERTURE_SIZE		0x10000
+
+/*******************************************************************************
+ * CPU core ids - used by the MCE_ONLINE_CORE ARI
+ ******************************************************************************/
+typedef enum mce_core_id {
+	MCE_CORE_ID_DENVER_15_0,
+	MCE_CORE_ID_DENVER_15_1,
+	/* 2 and 3 are reserved */
+	MCE_CORE_ID_A57_0 = 4,
+	MCE_CORE_ID_A57_1,
+	MCE_CORE_ID_A57_2,
+	MCE_CORE_ID_A57_3,
+	MCE_CORE_ID_MAX
+} mce_core_id_t;
+
+#define MCE_CORE_ID_MASK			0x7
+
+/*******************************************************************************
+ * MCE commands
+ ******************************************************************************/
+typedef enum mce_cmd {
+	MCE_CMD_ENTER_CSTATE = 0,
+	MCE_CMD_UPDATE_CSTATE_INFO,
+	MCE_CMD_UPDATE_CROSSOVER_TIME,
+	MCE_CMD_READ_CSTATE_STATS,
+	MCE_CMD_WRITE_CSTATE_STATS,
+	MCE_CMD_IS_SC7_ALLOWED,
+	MCE_CMD_ONLINE_CORE,
+	MCE_CMD_CC3_CTRL,
+	MCE_CMD_ECHO_DATA,
+	MCE_CMD_READ_VERSIONS,
+	MCE_CMD_ENUM_FEATURES,
+	MCE_CMD_ROC_FLUSH_CACHE_TRBITS,
+	MCE_CMD_ENUM_READ_MCA,
+	MCE_CMD_ENUM_WRITE_MCA,
+	MCE_CMD_ROC_FLUSH_CACHE,
+	MCE_CMD_ROC_CLEAN_CACHE,
+	MCE_CMD_IS_CCX_ALLOWED = 0xFE,
+	MCE_CMD_MAX = 0xFF,
+} mce_cmd_t;
+
+#define MCE_CMD_MASK				0xFF
+
+/*******************************************************************************
+ * Macros to prepare CSTATE info request
+ ******************************************************************************/
+/* Description of the parameters for UPDATE_CSTATE_INFO request */
+#define CLUSTER_CSTATE_MASK			0x7
+#define CLUSTER_CSTATE_SHIFT			0
+#define CLUSTER_CSTATE_UPDATE_BIT		(1 << 7)
+#define CCPLEX_CSTATE_MASK			0x3
+#define CCPLEX_CSTATE_SHIFT			8
+#define CCPLEX_CSTATE_UPDATE_BIT		(1 << 15)
+#define SYSTEM_CSTATE_MASK			0xF
+#define SYSTEM_CSTATE_SHIFT			16
+#define SYSTEM_CSTATE_FORCE_UPDATE_SHIFT	22
+#define SYSTEM_CSTATE_FORCE_UPDATE_BIT		(1 << 22)
+#define SYSTEM_CSTATE_UPDATE_BIT		(1 << 23)
+#define CSTATE_WAKE_MASK_UPDATE_BIT		(1 << 31)
+#define CSTATE_WAKE_MASK_SHIFT			32
+#define CSTATE_WAKE_MASK_CLEAR			0xFFFFFFFF
+
+/*******************************************************************************
+ * Auto-CC3 control macros
+ ******************************************************************************/
+#define MCE_AUTO_CC3_FREQ_MASK			0x1FF
+#define MCE_AUTO_CC3_FREQ_SHIFT			0
+#define MCE_AUTO_CC3_VTG_MASK			0x7F
+#define MCE_AUTO_CC3_VTG_SHIFT			16
+#define MCE_AUTO_CC3_ENABLE_BIT			(1 << 31)
+
+/*******************************************************************************
+ * Macros for the 'IS_SC7_ALLOWED' command
+ ******************************************************************************/
+#define MCE_SC7_ALLOWED_MASK			0x7
+#define MCE_SC7_WAKE_TIME_SHIFT			32
+
+/*******************************************************************************
+ * Macros for 'read/write ctats' commands
+ ******************************************************************************/
+#define MCE_CSTATE_STATS_TYPE_SHIFT		32
+#define MCE_CSTATE_WRITE_DATA_LO_MASK		0xF
+
+/*******************************************************************************
+ * Macros for 'update crossover threshold' command
+ ******************************************************************************/
+#define MCE_CROSSOVER_THRESHOLD_TIME_SHIFT	32
+
+/*******************************************************************************
+ * Timeout value used to powerdown a core
+ ******************************************************************************/
+#define MCE_CORE_SLEEP_TIME_INFINITE		0xFFFFFFFF
+
+/*******************************************************************************
+ * MCA command struct
+ ******************************************************************************/
+typedef union mca_cmd {
+	struct command {
+		uint8_t cmd;
+		uint8_t idx;
+		uint8_t subidx;
+	} command;
+	struct input {
+		uint32_t low;
+		uint32_t high;
+	} input;
+	uint64_t data;
+} mca_cmd_t;
+
+/*******************************************************************************
+ * MCA argument struct
+ ******************************************************************************/
+typedef union mca_arg {
+	struct err {
+		uint64_t error:8;
+		uint64_t unused:48;
+		uint64_t finish:8;
+	} err;
+	struct arg {
+		uint32_t low;
+		uint32_t high;
+	} arg;
+	uint64_t data;
+} mca_arg_t;
+
+/*******************************************************************************
+ * Structure populated by arch specific code to export routines which perform
+ * common low level MCE functions
+ ******************************************************************************/
+typedef struct arch_mce_ops {
+	/*
+	 * This ARI request sets up the MCE to start execution on assertion
+	 * of STANDBYWFI, update the core power state and expected wake time,
+	 * then determine the proper power state to enter.
+	 */
+	int (*enter_cstate)(uint32_t ari_base, uint32_t state,
+			    uint32_t wake_time);
+	/*
+	 * This ARI request allows updating of the CLUSTER_CSTATE,
+	 * CCPLEX_CSTATE, and SYSTEM_CSTATE register values.
+	 */
+	int (*update_cstate_info)(uint32_t ari_base,
+				  uint32_t cluster,
+				  uint32_t ccplex,
+				  uint32_t system,
+				  uint8_t sys_state_force,
+				  uint32_t wake_mask,
+				  uint8_t update_wake_mask);
+	/*
+	 * This ARI request allows updating of power state crossover
+	 * threshold times. An index value specifies which crossover
+	 * state is being updated.
+	 */
+	int (*update_crossover_time)(uint32_t ari_base,
+				     uint32_t type,
+				     uint32_t time);
+	/*
+	 * This ARI request allows read access to statistical information
+	 * related to power states.
+	 */
+	uint64_t (*read_cstate_stats)(uint32_t ari_base,
+				     uint32_t state);
+	/*
+	 * This ARI request allows write access to statistical information
+	 * related to power states.
+	 */
+	int (*write_cstate_stats)(uint32_t ari_base,
+				  uint32_t state,
+				  uint32_t stats);
+	/*
+	 * This ARI request allows the CPU to understand the features
+	 * supported by the MCE firmware.
+	 */
+	uint64_t (*call_enum_misc)(uint32_t ari_base, uint32_t cmd,
+				   uint32_t data);
+	/*
+	 * This ARI request allows querying the CCPLEX to determine if
+	 * the CCx state is allowed given a target core C-state and wake
+	 * time. If the CCx state is allowed, the response indicates CCx
+	 * must be entered. If the CCx state is not allowed, the response
+	 * indicates CC6/CC7 can't be entered
+	 */
+	int (*is_ccx_allowed)(uint32_t ari_base, uint32_t state,
+			      uint32_t wake_time);
+	/*
+	 * This ARI request allows querying the CCPLEX to determine if
+	 * the SC7 state is allowed given a target core C-state and wake
+	 * time. If the SC7 state is allowed, all cores but the associated
+	 * core are offlined (WAKE_EVENTS are set to 0) and the response
+	 * indicates SC7 must be entered. If the SC7 state is not allowed,
+	 * the response indicates SC7 can't be entered
+	 */
+	int (*is_sc7_allowed)(uint32_t ari_base, uint32_t state,
+			      uint32_t wake_time);
+	/*
+	 * This ARI request allows a core to bring another offlined core
+	 * back online to the C0 state. Note that a core is offlined by
+	 * entering a C-state where the WAKE_MASK is all 0.
+	 */
+	int (*online_core)(uint32_t ari_base, uint32_t cpuid);
+	/*
+	 * This ARI request allows the CPU to enable/disable Auto-CC3 idle
+	 * state.
+	 */
+	int (*cc3_ctrl)(uint32_t ari_base,
+			uint32_t freq,
+			uint32_t volt,
+			uint8_t enable);
+	/*
+	 * This ARI request allows updating the reset vector register for
+	 * D15 and A57 CPUs.
+	 */
+	int (*update_reset_vector)(uint32_t ari_base,
+				   uint32_t addr_low,
+				   uint32_t addr_high);
+	/*
+	 * This ARI request instructs the ROC to flush A57 data caches in
+	 * order to maintain coherency with the Denver cluster.
+	 */
+	int (*roc_flush_cache)(uint32_t ari_base);
+	/*
+	 * This ARI request instructs the ROC to flush A57 data caches along
+	 * with the caches covering ARM code in order to maintain coherency
+	 * with the Denver cluster.
+	 */
+	int (*roc_flush_cache_trbits)(uint32_t ari_base);
+	/*
+	 * This ARI request instructs the ROC to clean A57 data caches along
+	 * with the caches covering ARM code in order to maintain coherency
+	 * with the Denver cluster.
+	 */
+	int (*roc_clean_cache)(uint32_t ari_base);
+	/*
+	 * This ARI request reads/writes the Machine Check Arch. (MCA)
+	 * registers.
+	 */
+	uint64_t (*read_write_mca)(uint32_t ari_base,
+			      mca_cmd_t cmd,
+			      uint64_t *data);
+	/*
+	 * Some MC GSC (General Security Carveout) register values are
+	 * expected to be changed by TrustZone secure ARM code after boot.
+	 * Since there is no hardware mechanism for the CCPLEX to know
+	 * that an MC GSC register has changed to allow it to update its
+	 * own internal GSC register, there needs to be a mechanism that
+	 * can be used by ARM code to cause the CCPLEX to update its GSC
+	 * register value. This ARI request allows updating the GSC register
+	 * value for a certain carveout in the CCPLEX.
+	 */
+	int (*update_ccplex_gsc)(uint32_t ari_base, uint32_t gsc_idx);
+	/*
+	 * This ARI request instructs the CCPLEX to either shutdown or
+	 * reset the entire system
+	 */
+	void (*enter_ccplex_state)(uint32_t ari_base, uint32_t state_idx);
+} arch_mce_ops_t;
+
+int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
+		uint64_t arg2);
+int mce_update_reset_vector(uint32_t addr_lo, uint32_t addr_hi);
+int mce_update_gsc_videomem(void);
+int mce_update_gsc_tzdram(void);
+int mce_update_gsc_tzram(void);
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx);
+
+/* declarations for ARI/NVG handler functions */
+int ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+	uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+	uint8_t update_wake_mask);
+int ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state);
+int ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data);
+int ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int ari_online_core(uint32_t ari_base, uint32_t core);
+int ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
+int ari_reset_vector_update(uint32_t ari_base, uint32_t lo, uint32_t hi);
+int ari_roc_flush_cache_trbits(uint32_t ari_base);
+int ari_roc_flush_cache(uint32_t ari_base);
+int ari_roc_clean_cache(uint32_t ari_base);
+uint64_t ari_read_write_mca(uint32_t ari_base, mca_cmd_t cmd, uint64_t *data);
+int ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx);
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx);
+
+int nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+		uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+		uint8_t update_wake_mask);
+int nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state);
+int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t val);
+int nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int nvg_online_core(uint32_t ari_base, uint32_t core);
+int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
+
+#endif /* __MCE_H__ */
diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h
new file mode 100644
index 0000000..3e6054b
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef T18X_TEGRA_ARI_H
+#define T18X_TEGRA_ARI_H
+
+/*
+ * ----------------------------------------------------------------------------
+ * t18x_ari.h
+ *
+ * Global ARI definitions.
+ * ----------------------------------------------------------------------------
+ */
+
+enum {
+	TEGRA_ARI_VERSION_MAJOR = 2,
+	TEGRA_ARI_VERSION_MINOR = 19,
+};
+
+typedef enum {
+	/* indexes below get the core lock */
+	TEGRA_ARI_MISC = 0,
+	/* index 1 is deprecated */
+	/* index 2 is deprecated */
+	/* index 3 is deprecated */
+	TEGRA_ARI_ONLINE_CORE = 4,
+
+	/* indexes below need cluster lock */
+	TEGRA_ARI_MISC_CLUSTER = 41,
+	TEGRA_ARI_IS_CCX_ALLOWED = 42,
+	TEGRA_ARI_CC3_CTRL = 43,
+
+	/* indexes below need ccplex lock */
+	TEGRA_ARI_ENTER_CSTATE = 80,
+	TEGRA_ARI_UPDATE_CSTATE_INFO = 81,
+	TEGRA_ARI_IS_SC7_ALLOWED = 82,
+	/* index 83 is deprecated */
+	TEGRA_ARI_PERFMON = 84,
+	TEGRA_ARI_UPDATE_CCPLEX_GSC = 85,
+	/* index 86 is depracated */
+	/* index 87 is deprecated */
+	TEGRA_ARI_ROC_FLUSH_CACHE_ONLY = 88,
+	TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS = 89,
+	TEGRA_ARI_MISC_CCPLEX = 90,
+	TEGRA_ARI_MCA = 91,
+	TEGRA_ARI_UPDATE_CROSSOVER = 92,
+	TEGRA_ARI_CSTATE_STATS = 93,
+	TEGRA_ARI_WRITE_CSTATE_STATS = 94,
+	TEGRA_ARI_COPY_MISCREG_AA64_RST = 95,
+	TEGRA_ARI_ROC_CLEAN_CACHE_ONLY = 96,
+} tegra_ari_req_id_t;
+
+typedef enum {
+	TEGRA_ARI_MISC_ECHO = 0,
+	TEGRA_ARI_MISC_VERSION = 1,
+	TEGRA_ARI_MISC_FEATURE_LEAF_0 = 2,
+} tegra_ari_misc_index_t;
+
+typedef enum {
+	TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF = 0,
+	TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT = 1,
+	TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL = 2,
+} tegra_ari_misc_ccplex_index_t;
+
+typedef enum {
+	TEGRA_ARI_CORE_C0 = 0,
+	TEGRA_ARI_CORE_C1 = 1,
+	TEGRA_ARI_CORE_C6 = 6,
+	TEGRA_ARI_CORE_C7 = 7,
+	TEGRA_ARI_CORE_WARMRSTREQ = 8,
+} tegra_ari_core_sleep_state_t;
+
+typedef enum {
+	TEGRA_ARI_CLUSTER_CC0 = 0,
+	TEGRA_ARI_CLUSTER_CC1 = 1,
+	TEGRA_ARI_CLUSTER_CC6 = 6,
+	TEGRA_ARI_CLUSTER_CC7 = 7,
+} tegra_ari_cluster_sleep_state_t;
+
+typedef enum {
+	TEGRA_ARI_CCPLEX_CCP0 = 0,
+	TEGRA_ARI_CCPLEX_CCP1 = 1,
+	TEGRA_ARI_CCPLEX_CCP3 = 3,
+} tegra_ari_ccplex_sleep_state_t;
+
+typedef enum {
+	TEGRA_ARI_SYSTEM_SC0 = 0,
+	TEGRA_ARI_SYSTEM_SC1 = 1,
+	TEGRA_ARI_SYSTEM_SC2 = 2,
+	TEGRA_ARI_SYSTEM_SC3 = 3,
+	TEGRA_ARI_SYSTEM_SC4 = 4,
+	TEGRA_ARI_SYSTEM_SC7 = 7,
+	TEGRA_ARI_SYSTEM_SC8 = 8,
+} tegra_ari_system_sleep_state_t;
+
+typedef enum {
+	TEGRA_ARI_CROSSOVER_C1_C6 = 0,
+	TEGRA_ARI_CROSSOVER_CC1_CC6 = 1,
+	TEGRA_ARI_CROSSOVER_CC1_CC7 = 2,
+	TEGRA_ARI_CROSSOVER_CCP1_CCP3 = 3,
+	TEGRA_ARI_CROSSOVER_CCP3_SC2 = 4,
+	TEGRA_ARI_CROSSOVER_CCP3_SC3 = 5,
+	TEGRA_ARI_CROSSOVER_CCP3_SC4 = 6,
+	TEGRA_ARI_CROSSOVER_CCP3_SC7 = 7,
+	TEGRA_ARI_CROSSOVER_CCP3_SC1 = 8,
+} tegra_ari_crossover_index_t;
+
+typedef enum {
+	TEGRA_ARI_CSTATE_STATS_CLEAR = 0,
+	TEGRA_ARI_CSTATE_STATS_SC7_ENTRIES = 1,
+	TEGRA_ARI_CSTATE_STATS_SC4_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_SC3_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_SC2_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_CCP3_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_CC6_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_CC7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_CC6_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_CC7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_0_C6_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_1_C6_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_0_C7_ENTRIES = 14,
+	TEGRA_ARI_CSTATE_STATS_D15_1_C7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_0_C7_ENTRIES = 18,
+	TEGRA_ARI_CSTATE_STATS_A57_1_C7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_2_C7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_3_C7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 26,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3,
+} tegra_ari_cstate_stats_index_t;
+
+typedef enum {
+	TEGRA_ARI_GSC_ALL = 0,
+
+	TEGRA_ARI_GSC_BPMP = 6,
+	TEGRA_ARI_GSC_APE = 7,
+	TEGRA_ARI_GSC_SPE = 8,
+	TEGRA_ARI_GSC_SCE = 9,
+	TEGRA_ARI_GSC_APR = 10,
+	TEGRA_ARI_GSC_TZRAM = 11,
+	TEGRA_ARI_GSC_SE = 12,
+
+	TEGRA_ARI_GSC_BPMP_TO_SPE = 16,
+	TEGRA_ARI_GSC_SPE_TO_BPMP = 17,
+	TEGRA_ARI_GSC_CPU_TZ_TO_BPMP = 18,
+	TEGRA_ARI_GSC_BPMP_TO_CPU_TZ = 19,
+	TEGRA_ARI_GSC_CPU_NS_TO_BPMP = 20,
+	TEGRA_ARI_GSC_BPMP_TO_CPU_NS = 21,
+	TEGRA_ARI_GSC_IPC_SE_SPE_SCE_BPMP = 22,
+	TEGRA_ARI_GSC_SC7_RESUME_FW = 23,
+
+	TEGRA_ARI_GSC_TZ_DRAM_IDX = 34,
+	TEGRA_ARI_GSC_VPR_IDX = 35,
+} tegra_ari_gsc_index_t;
+
+/* This macro will produce enums for __name##_LSB, __name##_MSB and __name##_MSK */
+#define TEGRA_ARI_ENUM_MASK_LSB_MSB(__name, __lsb, __msb) __name##_LSB = __lsb, __name##_MSB = __msb
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE, 0, 2),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE_PRESENT, 7, 7),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE, 8, 9),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE_PRESENT, 15, 15),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE, 16, 19),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__IGNORE_CROSSOVERS, 22, 22),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE_PRESENT, 23, 23),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__WAKE_MASK_PRESENT, 31, 31),
+} tegra_ari_update_cstate_info_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL__EN, 0, 0),
+} tegra_ari_misc_ccplex_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_FREQ, 0, 8),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_VOLT, 16, 23),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__ENABLE, 31, 31),
+} tegra_ari_cc3_ctrl_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_MCA_NOP = 0,
+	TEGRA_ARI_MCA_READ_SERR = 1,
+	TEGRA_ARI_MCA_WRITE_SERR = 2,
+	TEGRA_ARI_MCA_CLEAR_SERR = 4,
+	TEGRA_ARI_MCA_REPORT_SERR = 5,
+	TEGRA_ARI_MCA_READ_INTSTS = 6,
+	TEGRA_ARI_MCA_WRITE_INTSTS = 7,
+	TEGRA_ARI_MCA_READ_PREBOOT_SERR = 8,
+} tegra_ari_mca_commands_t;
+
+typedef enum {
+	TEGRA_ARI_MCA_RD_WR_DPMU = 0,
+	TEGRA_ARI_MCA_RD_WR_IOB = 1,
+	TEGRA_ARI_MCA_RD_WR_MCB = 2,
+	TEGRA_ARI_MCA_RD_WR_CCE = 3,
+	TEGRA_ARI_MCA_RD_WR_CQX = 4,
+	TEGRA_ARI_MCA_RD_WR_CTU = 5,
+	TEGRA_ARI_MCA_RD_BANK_INFO = 0x0f,
+	TEGRA_ARI_MCA_RD_BANK_TEMPLATE = 0x10,
+	TEGRA_ARI_MCA_RD_WR_SECURE_ACCESS_REGISTER = 0x11,
+	TEGRA_ARI_MCA_RD_WR_GLOBAL_CONFIG_REGISTER = 0x12,
+} tegra_ari_mca_rd_wr_indexes_t;
+
+typedef enum {
+	TEGRA_ARI_MCA_RD_WR_ASERRX_CTRL = 0,
+	TEGRA_ARI_MCA_RD_WR_ASERRX_STATUS = 1,
+	TEGRA_ARI_MCA_RD_WR_ASERRX_ADDR = 2,
+	TEGRA_ARI_MCA_RD_WR_ASERRX_MISC1 = 3,
+	TEGRA_ARI_MCA_RD_WR_ASERRX_MISC2 = 4,
+} tegra_ari_mca_read_asserx_subindexes_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_SETTING_ENABLES_NS_PERMITTED, 0, 0),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_READING_STATUS_NS_PERMITTED, 1, 1),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_PENDING_MCA_ERRORS_NS_PERMITTED, 2, 2),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_CLEARING_MCA_INTERRUPTS_NS_PERMITTED, 3, 3),
+} tegra_ari_mca_secure_register_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_SERR_ERR_CODE, 0, 15),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM_ERR, 16, 16),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_CRAB_ERR, 17, 17),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_RD_WR_N, 18, 18),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UCODE_ERR, 19, 19),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM, 20, 23),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_AV, 58, 58),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_MV, 59, 59),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_EN, 60, 60),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UC, 61, 61),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_OVF, 62, 62),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_VAL, 63, 63),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_ADDR, 0, 41),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_UCODE_ERRCD, 42, 52),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_PWM_ERR, 0, 0),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_CRAB_ERR, 1, 1),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_UCODE_ERR, 3, 3),
+} tegra_ari_mca_aserr0_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_SERR_ERR_CODE, 0, 15),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MSI_ERR, 16, 16),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_IHI_ERR, 17, 17),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CRI_ERR, 18, 18),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MMCRAB_ERR, 19, 19),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CSI_ERR, 20, 20),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RD_WR_N, 21, 21),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_REQ_ERRT, 22, 23),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RESP_ERRT, 24, 25),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AV, 58, 58),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MV, 59, 59),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_EN, 60, 60),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_UC, 61, 61),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_OVF, 62, 62),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_VAL, 63, 63),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AXI_ID, 0, 7),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_ID, 8, 27),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CID, 28, 31),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CMD, 32, 35),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MSI_ERR, 0, 0),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_IHI_ERR, 1, 1),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CRI_ERR, 2, 2),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MMCRAB_ERR, 3, 3),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CSI_ERR, 4, 4),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_MISC_ADDR, 0, 41),
+} tegra_ari_mca_aserr1_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SERR_ERR_CODE, 0, 15),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MC_ERR, 16, 16),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SYSRAM_ERR, 17, 17),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_CLIENT_ID, 18, 19),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_AV, 58, 58),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MV, 59, 59),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_EN, 60, 60),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_UC, 61, 61),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_OVF, 62, 62),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_VAL, 63, 63),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ID, 0, 17),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_CMD, 18, 21),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ADDR, 22, 53),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_CTRL_EN_MC_ERR, 0, 0),
+} tegra_ari_mca_aserr2_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_SERR_ERR_CODE, 0, 15),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_TO_ERR, 16, 16),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_STAT_ERR, 17, 17),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_DST_ERR, 18, 18),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UNC_ERR, 19, 19),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MH_ERR, 20, 20),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PERR, 21, 21),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PSN_ERR, 22, 22),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_AV, 58, 58),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MV, 59, 59),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_EN, 60, 60),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UC, 61, 61),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_OVF, 62, 62),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_VAL, 63, 63),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_CMD, 0, 5),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_ADDR, 6, 47),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TO, 0, 0),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_DIV4, 1, 1),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TLIMIT, 2, 11),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_PSN_ERR_CORR_MSK, 12, 25),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_MORE_INFO, 0, 17),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TO_INFO, 18, 43),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_SRC, 44, 45),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TID, 46, 52),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_TO_ERR, 0, 0),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_STAT_ERR, 1, 1),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_DST_ERR, 2, 2),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_UNC_ERR, 3, 3),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_MH_ERR, 4, 4),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PERR, 5, 5),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PSN_ERR, 6, 19),
+} tegra_ari_mca_aserr3_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SERR_ERR_CODE, 0, 15),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SRC_ERR, 16, 16),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_DST_ERR, 17, 17),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_REQ_ERR, 18, 18),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_RSP_ERR, 19, 19),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_AV, 58, 58),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_MV, 59, 59),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_EN, 60, 60),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_UC, 61, 61),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_OVF, 62, 62),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_VAL, 63, 63),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_CTRL_EN_CPE_ERR, 0, 0),
+} tegra_ari_mca_aserr4_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_SERR_ERR_CODE, 0, 15),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_CTUPAR, 16, 16),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MULTI, 17, 17),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_AV, 58, 58),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MV, 59, 59),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_EN, 60, 60),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_UC, 61, 61),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_OVF, 62, 62),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_VAL, 63, 63),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_SRC, 0, 7),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ID, 8, 15),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_DATA, 16, 26),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_CMD, 32, 35),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ADDR, 36, 45),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_CTRL_EN_CTUPAR, 0, 0),
+} tegra_ari_mca_aserr5_bitmasks_t;
+
+#undef TEGRA_ARI_ENUM_MASK_LSB_MSB
+
+typedef enum {
+	TEGRA_NVG_CHANNEL_PMIC = 0,
+	TEGRA_NVG_CHANNEL_POWER_PERF = 1,
+	TEGRA_NVG_CHANNEL_POWER_MODES = 2,
+	TEGRA_NVG_CHANNEL_WAKE_TIME = 3,
+	TEGRA_NVG_CHANNEL_CSTATE_INFO = 4,
+	TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 = 5,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC6 = 6,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC7 = 7,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP1_CCP3 = 8,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC2 = 9,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC3 = 10,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC4 = 11,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC7 = 12,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR = 13,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_SC7_ENTRIES = 14,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_SC4_ENTRIES = 15,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_SC3_ENTRIES = 16,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_SC2_ENTRIES = 17,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_CCP3_ENTRIES = 18,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC6_ENTRIES = 19,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC7_ENTRIES = 20,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC6_ENTRIES = 21,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC7_ENTRIES = 22,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C6_ENTRIES = 23,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C6_ENTRIES = 24,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C6_ENTRIES = 25, /* Reserved (for Denver15 core 2) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C6_ENTRIES = 26, /* Reserved (for Denver15 core 3) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C7_ENTRIES = 27,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C7_ENTRIES = 28,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C7_ENTRIES = 29, /* Reserved (for Denver15 core 2) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C7_ENTRIES = 30, /* Reserved (for Denver15 core 3) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_0_C7_ENTRIES = 31,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_1_C7_ENTRIES = 32,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_2_C7_ENTRIES = 33,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_3_C7_ENTRIES = 34,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0 = 35,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1 = 36,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_2 = 37, /*  Reserved (for Denver15 core 2) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_3 = 38, /* Reserved (for Denver15 core 3) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 39,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1 = 40,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2 = 41,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3 = 42,
+	TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED = 43,
+	TEGRA_NVG_CHANNEL_ONLINE_CORE = 44,
+	TEGRA_NVG_CHANNEL_CC3_CTRL = 45,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC1 = 46,
+	TEGRA_NVG_CHANNEL_LAST_INDEX,
+} tegra_nvg_channel_id_t;
+
+#endif /* T18X_TEGRA_ARI_H */
+
+
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
new file mode 100644
index 0000000..b6e4b31
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	nvg_set_request_data
+	.globl	nvg_set_request
+	.globl	nvg_get_result
+
+/* void nvg_set_request_data(uint64_t req, uint64_t data) */
+func nvg_set_request_data
+	msr	s3_0_c15_c1_2, x0
+	msr	s3_0_c15_c1_3, x1
+	ret
+endfunc nvg_set_request_data
+
+/* void nvg_set_request(uint64_t req) */
+func nvg_set_request
+	msr	s3_0_c15_c1_2, x0
+	ret
+endfunc nvg_set_request
+
+/* uint64_t nvg_get_result(void) */
+func nvg_get_result
+	mrs	x0, s3_0_c15_c1_3
+	ret
+endfunc nvg_get_result
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
new file mode 100644
index 0000000..147a358
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <denver.h>
+#include <mmio.h>
+#include <mce.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+
+/*******************************************************************************
+ * Register offsets for ARI request/results
+ ******************************************************************************/
+#define ARI_REQUEST			0x0
+#define ARI_REQUEST_EVENT_MASK		0x4
+#define ARI_STATUS			0x8
+#define ARI_REQUEST_DATA_LO		0xC
+#define ARI_REQUEST_DATA_HI		0x10
+#define ARI_RESPONSE_DATA_LO		0x14
+#define ARI_RESPONSE_DATA_HI		0x18
+
+/* Status values for the current request */
+#define ARI_REQ_PENDING			1
+#define ARI_REQ_ONGOING			3
+#define ARI_REQUEST_VALID_BIT		(1 << 8)
+#define ARI_EVT_MASK_STANDBYWFI_BIT	(1 << 7)
+
+/*******************************************************************************
+ * ARI helper functions
+ ******************************************************************************/
+static inline uint32_t ari_read_32(uint32_t ari_base, uint32_t reg)
+{
+	return mmio_read_32(ari_base + reg);
+}
+
+static inline void ari_write_32(uint32_t ari_base, uint32_t val, uint32_t reg)
+{
+	mmio_write_32(ari_base + reg, val);
+}
+
+static inline uint32_t ari_get_request_low(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_REQUEST_DATA_LO);
+}
+
+static inline uint32_t ari_get_request_high(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_REQUEST_DATA_HI);
+}
+
+static inline uint32_t ari_get_response_low(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_RESPONSE_DATA_LO);
+}
+
+static inline uint32_t ari_get_response_high(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_RESPONSE_DATA_HI);
+}
+
+static inline void ari_clobber_response(uint32_t ari_base)
+{
+	ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_LO);
+	ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_HI);
+}
+
+static int ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
+		uint32_t lo, uint32_t hi)
+{
+	int status;
+
+	/* program the request, event_mask, hi and lo registers */
+	ari_write_32(ari_base, lo, ARI_REQUEST_DATA_LO);
+	ari_write_32(ari_base, hi, ARI_REQUEST_DATA_HI);
+	ari_write_32(ari_base, evt_mask, ARI_REQUEST_EVENT_MASK);
+	ari_write_32(ari_base, req | ARI_REQUEST_VALID_BIT, ARI_REQUEST);
+
+	/*
+	 * For commands that have an event trigger, we should bypass
+	 * ARI_STATUS polling, since MCE is waiting for SW to trigger
+	 * the event.
+	 */
+	if (evt_mask)
+		return 0;
+
+	/* NOTE: add timeout check if needed */
+	status = ari_read_32(ari_base, ARI_STATUS);
+	while (status & (ARI_REQ_ONGOING | ARI_REQ_PENDING))
+		status = ari_read_32(ari_base, ARI_STATUS);
+
+	return 0;
+}
+
+int ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	/* check for allowed power state */
+	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
+	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		return EINVAL;
+	}
+
+	/* Enter the cstate, to be woken up after wake_time (TSC ticks) */
+	return ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
+		TEGRA_ARI_ENTER_CSTATE, state, wake_time);
+}
+
+int ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+	uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+	uint8_t update_wake_mask)
+{
+	uint32_t val = 0;
+
+	/* update CLUSTER_CSTATE? */
+	if (cluster)
+		val |= (cluster & CLUSTER_CSTATE_MASK) |
+			CLUSTER_CSTATE_UPDATE_BIT;
+
+	/* update CCPLEX_CSTATE? */
+	if (ccplex)
+		val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
+			CCPLEX_CSTATE_UPDATE_BIT;
+
+	/* update SYSTEM_CSTATE? */
+	if (system)
+		val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+		       ((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+			SYSTEM_CSTATE_UPDATE_BIT);
+
+	/* update wake mask value? */
+	if (update_wake_mask)
+		val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+
+	/* set the updated cstate info */
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CSTATE_INFO, val,
+			wake_mask);
+}
+
+int ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+	/* sanity check crossover type */
+	if ((type == TEGRA_ARI_CROSSOVER_C1_C6) ||
+	    (type > TEGRA_ARI_CROSSOVER_CCP3_SC1))
+		return EINVAL;
+
+	/* update crossover threshold time */
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CROSSOVER,
+			type, time);
+}
+
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+	int ret;
+
+	/* sanity check crossover type */
+	if (state == 0)
+		return EINVAL;
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_CSTATE_STATS, state, 0);
+	if (ret != 0)
+		return EINVAL;
+
+	return (uint64_t)ari_get_response_low(ari_base);
+}
+
+int ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+	/* write the cstate stats */
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_WRITE_CSTATE_STATS, state,
+			stats);
+}
+
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
+{
+	uint64_t resp;
+	int ret;
+
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/* ARI_REQUEST_DATA_HI is reserved for commands other than 'ECHO' */
+	if (cmd != TEGRA_ARI_MISC_ECHO)
+		data = 0;
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_MISC, cmd, data);
+	if (ret)
+		return (uint64_t)ret;
+
+	/* get the command response */
+	resp = ari_get_response_low(ari_base);
+	resp |= ((uint64_t)ari_get_response_high(ari_base) << 32);
+
+	return resp;
+}
+
+int ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	int ret;
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7,
+			wake_time);
+	if (ret) {
+		ERROR("%s: failed (%d)\n", __func__, ret);
+		return 0;
+	}
+
+	/* 1 = CCx allowed, 0 = CCx not allowed */
+	return (ari_get_response_low(ari_base) & 0x1);
+}
+
+int ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	int ret;
+
+	/* check for allowed power state */
+	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
+	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		return EINVAL;
+	}
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_IS_SC7_ALLOWED, state,
+			wake_time);
+	if (ret) {
+		ERROR("%s: failed (%d)\n", __func__, ret);
+		return 0;
+	}
+
+	/* 1 = SC7 allowed, 0 = SC7 not allowed */
+	return !!ari_get_response_low(ari_base);
+}
+
+int ari_online_core(uint32_t ari_base, uint32_t core)
+{
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	int cluster = (read_mpidr() & MPIDR_CLUSTER_MASK) >>
+			MPIDR_AFFINITY_BITS;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/* construct the current CPU # */
+	cpu |= (cluster << 2);
+
+	/* sanity check target core id */
+	if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
+		ERROR("%s: unsupported core id (%d)\n", __func__, core);
+		return EINVAL;
+	}
+
+	/*
+	 * The Denver cluster has 2 CPUs only - 0, 1.
+	 */
+	if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
+		ERROR("%s: unknown core id (%d)\n", __func__, core);
+		return EINVAL;
+	}
+
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_ONLINE_CORE, core, 0);
+}
+
+int ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+	int val;
+
+	/*
+	 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+	 * the SW visible voltage/frequency request registers for all non
+	 * floorswept cores valid independent of StandbyWFI and disabling
+	 * the IDLE voltage/frequency request register. If set, Auto-CC3
+	 * will be enabled by setting the ARM SW visible voltage/frequency
+	 * request registers for all non floorswept cores to be enabled by
+	 * StandbyWFI or the equivalent signal, and always keeping the IDLE
+	 * voltage/frequency request register enabled.
+	 */
+	val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
+		((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
+		(enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));
+
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_CC3_CTRL, val, 0);
+}
+
+int ari_reset_vector_update(uint32_t ari_base, uint32_t lo, uint32_t hi)
+{
+	/*
+	 * Need to program the CPU reset vector one time during cold boot
+	 * and SC7 exit
+	 */
+	ari_request_wait(ari_base, 0, TEGRA_ARI_COPY_MISCREG_AA64_RST, lo, hi);
+
+	return 0;
+}
+
+int ari_roc_flush_cache_trbits(uint32_t ari_base)
+{
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS,
+			0, 0);
+}
+
+int ari_roc_flush_cache(uint32_t ari_base)
+{
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY,
+			0, 0);
+}
+
+int ari_roc_clean_cache(uint32_t ari_base)
+{
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY,
+			0, 0);
+}
+
+uint64_t ari_read_write_mca(uint32_t ari_base, mca_cmd_t cmd, uint64_t *data)
+{
+	mca_arg_t mca_arg;
+	int ret;
+
+	/* Set data (write) */
+	mca_arg.data = data ? *data : 0ull;
+
+	/* Set command */
+	ari_write_32(ari_base, cmd.input.low, ARI_RESPONSE_DATA_LO);
+	ari_write_32(ari_base, cmd.input.high, ARI_RESPONSE_DATA_HI);
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_MCA, mca_arg.arg.low,
+			mca_arg.arg.high);
+	if (!ret) {
+		mca_arg.arg.low = ari_get_response_low(ari_base);
+		mca_arg.arg.high = ari_get_response_high(ari_base);
+		if (!mca_arg.err.finish)
+			return (uint64_t)mca_arg.err.error;
+
+		if (data) {
+			mca_arg.arg.low = ari_get_request_low(ari_base);
+			mca_arg.arg.high = ari_get_request_high(ari_base);
+			*data = mca_arg.data;
+		}
+	}
+
+	return 0;
+}
+
+int ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
+{
+	/* sanity check GSC ID */
+	if (gsc_idx > TEGRA_ARI_GSC_VPR_IDX)
+		return EINVAL;
+
+	/*
+	 * The MCE code will read the GSC carveout value, corrseponding to
+	 * the ID, from the MC registers and update the internal GSC registers
+	 * of the CCPLEX.
+	 */
+	ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0);
+
+	return 0;
+}
+
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
+{
+	/*
+	 * The MCE will shutdown or restart the entire system
+	 */
+	(void)ari_request_wait(ari_base, 0, TEGRA_ARI_MISC_CCPLEX, state_idx, 0);
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
new file mode 100644
index 0000000..745b6f4
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce.h>
+#include <mmio.h>
+#include <string.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+#include <tegra_def.h>
+
+/* NVG functions handlers */
+static arch_mce_ops_t nvg_mce_ops = {
+	.enter_cstate = nvg_enter_cstate,
+	.update_cstate_info = nvg_update_cstate_info,
+	.update_crossover_time = nvg_update_crossover_time,
+	.read_cstate_stats = nvg_read_cstate_stats,
+	.write_cstate_stats = nvg_write_cstate_stats,
+	.call_enum_misc = ari_enumeration_misc,
+	.is_ccx_allowed = nvg_is_ccx_allowed,
+	.is_sc7_allowed = nvg_is_sc7_allowed,
+	.online_core = nvg_online_core,
+	.cc3_ctrl = nvg_cc3_ctrl,
+	.update_reset_vector = ari_reset_vector_update,
+	.roc_flush_cache = ari_roc_flush_cache,
+	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+	.roc_clean_cache = ari_roc_clean_cache,
+	.read_write_mca = ari_read_write_mca,
+	.update_ccplex_gsc = ari_update_ccplex_gsc,
+	.enter_ccplex_state = ari_enter_ccplex_state
+};
+
+/* ARI functions handlers */
+static arch_mce_ops_t ari_mce_ops = {
+	.enter_cstate = ari_enter_cstate,
+	.update_cstate_info = ari_update_cstate_info,
+	.update_crossover_time = ari_update_crossover_time,
+	.read_cstate_stats = ari_read_cstate_stats,
+	.write_cstate_stats = ari_write_cstate_stats,
+	.call_enum_misc = ari_enumeration_misc,
+	.is_ccx_allowed = ari_is_ccx_allowed,
+	.is_sc7_allowed = ari_is_sc7_allowed,
+	.online_core = ari_online_core,
+	.cc3_ctrl = ari_cc3_ctrl,
+	.update_reset_vector = ari_reset_vector_update,
+	.roc_flush_cache = ari_roc_flush_cache,
+	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+	.roc_clean_cache = ari_roc_clean_cache,
+	.read_write_mca = ari_read_write_mca,
+	.update_ccplex_gsc = ari_update_ccplex_gsc,
+	.enter_ccplex_state = ari_enter_ccplex_state
+};
+
+typedef struct mce_config {
+	uint32_t ari_base;
+	arch_mce_ops_t *ops;
+} mce_config_t;
+
+/* Table to hold the per-CPU ARI base address and function handlers */
+static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
+	{
+		/* A57 Core 0 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 1 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 2 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 3 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* D15 Core 0 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
+		.ops = &nvg_mce_ops,
+	},
+	{
+		/* D15 Core 1 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
+		.ops = &nvg_mce_ops,
+	}
+};
+
+static uint32_t mce_get_curr_cpu_ari_base(void)
+{
+	uint32_t mpidr = read_mpidr();
+	int cpuid =  mpidr & MPIDR_CPU_MASK;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/*
+	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+	 * numbers start from 0. In order to get the proper arch_mce_ops_t
+	 * struct, we have to convert the Denver CPU ids to the corresponding
+	 * indices in the mce_ops_table array.
+	 */
+	if (impl == DENVER_IMPL)
+		cpuid |= 0x4;
+
+	return mce_cfg_table[cpuid].ari_base;
+}
+
+static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
+{
+	uint32_t mpidr = read_mpidr();
+	int cpuid =  mpidr & MPIDR_CPU_MASK;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/*
+	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+	 * numbers start from 0. In order to get the proper arch_mce_ops_t
+	 * struct, we have to convert the Denver CPU ids to the corresponding
+	 * indices in the mce_ops_table array.
+	 */
+	if (impl == DENVER_IMPL)
+		cpuid |= 0x4;
+
+	return mce_cfg_table[cpuid].ops;
+}
+
+/*******************************************************************************
+ * Common handler for all MCE commands
+ ******************************************************************************/
+int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
+			uint64_t arg2)
+{
+	arch_mce_ops_t *ops;
+	uint32_t cpu_ari_base;
+	uint64_t ret64 = 0, arg3, arg4, arg5;
+	int ret = 0;
+	mca_cmd_t mca_cmd;
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
+
+	assert(ctx);
+	assert(gp_regs);
+
+	/* get a pointer to the CPU's arch_mce_ops_t struct */
+	ops = mce_get_curr_cpu_ops();
+
+	/* get the CPU's ARI base address */
+	cpu_ari_base = mce_get_curr_cpu_ari_base();
+
+	switch (cmd) {
+	case MCE_CMD_ENTER_CSTATE:
+		ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
+		if (ret < 0)
+			ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_UPDATE_CSTATE_INFO:
+		/*
+		 * get the parameters required for the update cstate info
+		 * command
+		 */
+		arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
+		arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
+		arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
+
+		ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
+				(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
+				(uint32_t)arg4, (uint8_t)arg5);
+		if (ret < 0)
+			ERROR("%s: update_cstate_info failed(%d)\n",
+				__func__, ret);
+
+		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
+		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
+		write_ctx_reg(gp_regs, CTX_GPREG_X6, 0);
+
+		break;
+
+	case MCE_CMD_UPDATE_CROSSOVER_TIME:
+		ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
+		if (ret < 0)
+			ERROR("%s: update_crossover_time failed(%d)\n",
+				__func__, ret);
+
+		break;
+
+	case MCE_CMD_READ_CSTATE_STATS:
+		ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
+
+		/* update context to return cstate stats value */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64);
+
+		break;
+
+	case MCE_CMD_WRITE_CSTATE_STATS:
+		ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
+		if (ret < 0)
+			ERROR("%s: write_cstate_stats failed(%d)\n",
+				__func__, ret);
+
+		break;
+
+	case MCE_CMD_IS_CCX_ALLOWED:
+		ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
+			break;
+		}
+
+		/* update context to return CCx status value */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
+
+		break;
+
+	case MCE_CMD_IS_SC7_ALLOWED:
+		ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
+			break;
+		}
+
+		/* update context to return SC7 status value */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret);
+
+		break;
+
+	case MCE_CMD_ONLINE_CORE:
+		ret = ops->online_core(cpu_ari_base, arg0);
+		if (ret < 0)
+			ERROR("%s: online_core failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_CC3_CTRL:
+		ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
+		if (ret < 0)
+			ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_ECHO_DATA:
+		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
+				arg0);
+
+		/* update context to return if echo'd data matched source */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64 == arg0);
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64 == arg0);
+
+		break;
+
+	case MCE_CMD_READ_VERSIONS:
+		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
+			arg0);
+
+		/*
+		 * version = minor(63:32) | major(31:0). Update context
+		 * to return major and minor version number.
+		 */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint32_t)ret64);
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, (uint32_t)(ret64 >> 32));
+
+		break;
+
+	case MCE_CMD_ENUM_FEATURES:
+		ret = ops->call_enum_misc(cpu_ari_base,
+				TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
+
+		/* update context to return features value */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
+
+		ret = 0;
+
+		break;
+
+	case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+		ret = ops->roc_flush_cache_trbits(cpu_ari_base);
+		if (ret < 0)
+			ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
+				ret);
+
+		break;
+
+	case MCE_CMD_ROC_FLUSH_CACHE:
+		ret = ops->roc_flush_cache(cpu_ari_base);
+		if (ret < 0)
+			ERROR("%s: flush cache failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_ROC_CLEAN_CACHE:
+		ret = ops->roc_clean_cache(cpu_ari_base);
+		if (ret < 0)
+			ERROR("%s: clean cache failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_ENUM_READ_MCA:
+		memcpy(&mca_cmd, &arg0, sizeof(arg0));
+		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
+
+		/* update context to return MCA data/error */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, arg1);
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
+
+		break;
+
+	case MCE_CMD_ENUM_WRITE_MCA:
+		memcpy(&mca_cmd, &arg0, sizeof(arg0));
+		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
+
+		/* update context to return MCA error */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
+
+		break;
+
+	default:
+		ERROR("unknown MCE command (%d)\n", cmd);
+		return EINVAL;
+	}
+
+	return ret;
+}
+
+/*******************************************************************************
+ * Handler to update the reset vector for CPUs
+ ******************************************************************************/
+int mce_update_reset_vector(uint32_t addr_lo, uint32_t addr_hi)
+{
+	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	ops->update_reset_vector(mce_get_curr_cpu_ari_base(), addr_lo, addr_hi);
+
+	return 0;
+}
+
+static int mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
+{
+	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for Video Memory Carveout region
+ ******************************************************************************/
+int mce_update_gsc_videomem(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZDRAM aperture
+ ******************************************************************************/
+int mce_update_gsc_tzdram(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZ SysRAM aperture
+ ******************************************************************************/
+int mce_update_gsc_tzram(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
+}
+
+/*******************************************************************************
+ * Handler to shutdown/reset the entire system
+ ******************************************************************************/
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx)
+{
+	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	/* sanity check state value */
+	if (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF &&
+	    state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)
+		panic();
+
+	ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
+
+	/* wait till the CCPLEX powers down */
+	for (;;)
+		;
+
+	panic();
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
new file mode 100644
index 0000000..25479a2
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <denver.h>
+#include <mmio.h>
+#include <mce.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+
+extern void nvg_set_request_data(uint64_t req, uint64_t data);
+extern void nvg_set_request(uint64_t req);
+extern uint64_t nvg_get_result(void);
+
+int nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	/* check for allowed power state */
+	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
+	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		return EINVAL;
+	}
+
+	/* time (TSC ticks) until the core is expected to get a wake event */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
+
+	/* set the core cstate */
+	write_actlr_el1(state);
+
+	return 0;
+}
+
+/*
+ * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
+ * SYSTEM_CSTATE values.
+ */
+int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+		uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+		uint8_t update_wake_mask)
+{
+	uint64_t val = 0;
+
+	/* update CLUSTER_CSTATE? */
+	if (cluster)
+		val |= (cluster & CLUSTER_CSTATE_MASK) |
+			CLUSTER_CSTATE_UPDATE_BIT;
+
+	/* update CCPLEX_CSTATE? */
+	if (ccplex)
+		val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
+			CCPLEX_CSTATE_UPDATE_BIT;
+
+	/* update SYSTEM_CSTATE? */
+	if (system)
+		val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+		       ((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+			SYSTEM_CSTATE_UPDATE_BIT);
+
+	/* update wake mask value? */
+	if (update_wake_mask)
+		val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+
+	/* set the wake mask */
+	val &= CSTATE_WAKE_MASK_CLEAR;
+	val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT);
+
+	/* set the updated cstate info */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
+
+	return 0;
+}
+
+int nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+	/* sanity check crossover type */
+	if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)
+		return EINVAL;
+
+	/*
+	 * The crossover threshold limit types start from
+	 * TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7. The
+	 * command indices for updating the threshold can be generated
+	 * by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
+	 * command index.
+	 */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 + type,
+		(uint64_t)time);
+
+	return 0;
+}
+
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+	/* sanity check state */
+	if (state == 0)
+		return EINVAL;
+
+	/*
+	 * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+	 * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+	 * reading the threshold can be generated by adding the type to
+	 * the NVG_CLEAR_CSTATE_STATS command index.
+	 */
+	nvg_set_request(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state);
+
+	return (int64_t)nvg_get_result();
+}
+
+int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+	uint64_t val;
+
+	/*
+	 * The only difference between a CSTATE_STATS_WRITE and
+	 * CSTATE_STATS_READ is the usage of the 63:32 in the request.
+	 * 63:32 are set to '0' for a read, while a write contains the
+	 * actual stats value to be written.
+	 */
+	val = ((uint64_t)stats << MCE_CSTATE_STATS_TYPE_SHIFT) | state;
+
+	/*
+	 * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+	 * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+	 * reading the threshold can be generated by adding the type to
+	 * the NVG_CLEAR_CSTATE_STATS command index.
+	 */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state, val);
+
+	return 0;
+}
+
+int nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	/* This does not apply to the Denver cluster */
+	return 0;
+}
+
+int nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	uint64_t val;
+
+	/* check for allowed power state */
+	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
+	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		return EINVAL;
+	}
+
+	/*
+	 * Request format -
+	 * 63:32 = wake time
+	 * 31:0 = C-state for this core
+	 */
+	val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
+			(state & MCE_SC7_ALLOWED_MASK);
+
+	/* issue command to check if SC7 is allowed */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
+
+	/* 1 = SC7 allowed, 0 = SC7 not allowed */
+	return !!nvg_get_result();
+}
+
+int nvg_online_core(uint32_t ari_base, uint32_t core)
+{
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/* sanity check code id */
+	if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
+		ERROR("%s: unsupported core id (%d)\n", __func__, core);
+		return EINVAL;
+	}
+
+	/*
+	 * The Denver cluster has 2 CPUs only - 0, 1.
+	 */
+	if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
+		ERROR("%s: unknown core id (%d)\n", __func__, core);
+		return EINVAL;
+	}
+
+	/* get a core online */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE, core & MCE_CORE_ID_MASK);
+
+	return 0;
+}
+
+int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+	int val;
+
+	/*
+	 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+	 * the SW visible voltage/frequency request registers for all non
+	 * floorswept cores valid independent of StandbyWFI and disabling
+	 * the IDLE voltage/frequency request register. If set, Auto-CC3
+	 * will be enabled by setting the ARM SW visible voltage/frequency
+	 * request registers for all non floorswept cores to be enabled by
+	 * StandbyWFI or the equivalent signal, and always keeping the IDLE
+	 * voltage/frequency request register enabled.
+	 */
+	val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
+		((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
+		(enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));
+
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, val);
+
+	return 0;
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c b/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c
new file mode 100644
index 0000000..2940f58
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <memctrl_v2.h>
+#include <smmu.h>
+
+typedef struct smmu_regs {
+	uint32_t reg;
+	uint32_t val;
+} smmu_regs_t;
+
+#define mc_make_sid_override_cfg(name) \
+	{ \
+		.reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_CFG_ ## name, \
+		.val = 0x00000000, \
+	}
+
+#define mc_make_sid_security_cfg(name) \
+	{ \
+		.reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_SECURITY_CFG_ ## name, \
+		.val = 0x00000000, \
+	}
+
+#define smmu_make_gnsr0_sec_cfg(name) \
+	{ \
+		.reg = TEGRA_SMMU_BASE + SMMU_GNSR0_ ## name, \
+		.val = 0x00000000, \
+	}
+
+/*
+ * On ARM-SMMU, conditional offset to access secure aliases of non-secure registers
+ * is 0x400. So, add it to register address
+ */
+#define smmu_make_gnsr0_nsec_cfg(name) \
+	{ \
+		.reg = TEGRA_SMMU_BASE + 0x400 + SMMU_GNSR0_ ## name, \
+		.val = 0x00000000, \
+	}
+
+#define smmu_make_gnsr0_smr_cfg(n) \
+	{ \
+		.reg = TEGRA_SMMU_BASE + SMMU_GNSR0_SMR ## n, \
+		.val = 0x00000000, \
+	}
+
+#define smmu_make_gnsr0_s2cr_cfg(n) \
+	{ \
+		.reg = TEGRA_SMMU_BASE + SMMU_GNSR0_S2CR ## n, \
+		.val = 0x00000000, \
+	}
+
+#define smmu_make_gnsr1_cbar_cfg(n) \
+	{ \
+		.reg = TEGRA_SMMU_BASE + (1 << PGSHIFT) + SMMU_GNSR1_CBAR ## n, \
+		.val = 0x00000000, \
+	}
+
+#define smmu_make_gnsr1_cba2r_cfg(n) \
+	{ \
+		.reg = TEGRA_SMMU_BASE + (1 << PGSHIFT) + SMMU_GNSR1_CBA2R ## n, \
+		.val = 0x00000000, \
+	}
+
+#define make_smmu_cb_cfg(name, n) \
+	{ \
+		.reg = TEGRA_SMMU_BASE + (CB_SIZE >> 1) + (n * (1 << PGSHIFT)) \
+			+ SMMU_CBn_ ## name, \
+		.val = 0x00000000, \
+	}
+
+#define smmu_make_smrg_group(n)	\
+	smmu_make_gnsr0_smr_cfg(n),	\
+	smmu_make_gnsr0_s2cr_cfg(n),	\
+	smmu_make_gnsr1_cbar_cfg(n),	\
+	smmu_make_gnsr1_cba2r_cfg(n)	/* don't put "," here. */
+
+#define smmu_make_cb_group(n)		\
+	make_smmu_cb_cfg(SCTLR, n),	\
+	make_smmu_cb_cfg(TCR2, n),	\
+	make_smmu_cb_cfg(TTBR0_LO, n),	\
+	make_smmu_cb_cfg(TTBR0_HI, n),	\
+	make_smmu_cb_cfg(TCR, n),	\
+	make_smmu_cb_cfg(PRRR_MAIR0, n),\
+	make_smmu_cb_cfg(FSR, n),	\
+	make_smmu_cb_cfg(FAR_LO, n),	\
+	make_smmu_cb_cfg(FAR_HI, n),	\
+	make_smmu_cb_cfg(FSYNR0, n)	/* don't put "," here. */
+
+#define smmu_bypass_cfg \
+	{ \
+		.reg = TEGRA_MC_BASE + MC_SMMU_BYPASS_CONFIG, \
+		.val = 0x00000000, \
+	}
+
+#define _START_OF_TABLE_ \
+	{ \
+		.reg = 0xCAFE05C7, \
+		.val = 0x00000000, \
+	}
+
+#define _END_OF_TABLE_ \
+	{ \
+		.reg = 0xFFFFFFFF, \
+		.val = 0xFFFFFFFF, \
+	}
+
+static smmu_regs_t smmu_ctx_regs[] = {
+	_START_OF_TABLE_,
+	mc_make_sid_security_cfg(SCEW),
+	mc_make_sid_security_cfg(AFIR),
+	mc_make_sid_security_cfg(NVDISPLAYR1),
+	mc_make_sid_security_cfg(XUSB_DEVR),
+	mc_make_sid_security_cfg(VICSRD1),
+	mc_make_sid_security_cfg(NVENCSWR),
+	mc_make_sid_security_cfg(TSECSRDB),
+	mc_make_sid_security_cfg(AXISW),
+	mc_make_sid_security_cfg(SDMMCWAB),
+	mc_make_sid_security_cfg(AONDMAW),
+	mc_make_sid_security_cfg(GPUSWR2),
+	mc_make_sid_security_cfg(SATAW),
+	mc_make_sid_security_cfg(UFSHCW),
+	mc_make_sid_security_cfg(AFIW),
+	mc_make_sid_security_cfg(SDMMCR),
+	mc_make_sid_security_cfg(SCEDMAW),
+	mc_make_sid_security_cfg(UFSHCR),
+	mc_make_sid_security_cfg(SDMMCWAA),
+	mc_make_sid_security_cfg(APEDMAW),
+	mc_make_sid_security_cfg(SESWR),
+	mc_make_sid_security_cfg(MPCORER),
+	mc_make_sid_security_cfg(PTCR),
+	mc_make_sid_security_cfg(BPMPW),
+	mc_make_sid_security_cfg(ETRW),
+	mc_make_sid_security_cfg(GPUSRD),
+	mc_make_sid_security_cfg(VICSWR),
+	mc_make_sid_security_cfg(SCEDMAR),
+	mc_make_sid_security_cfg(HDAW),
+	mc_make_sid_security_cfg(ISPWA),
+	mc_make_sid_security_cfg(EQOSW),
+	mc_make_sid_security_cfg(XUSB_HOSTW),
+	mc_make_sid_security_cfg(TSECSWR),
+	mc_make_sid_security_cfg(SDMMCRAA),
+	mc_make_sid_security_cfg(APER),
+	mc_make_sid_security_cfg(VIW),
+	mc_make_sid_security_cfg(APEW),
+	mc_make_sid_security_cfg(AXISR),
+	mc_make_sid_security_cfg(SDMMCW),
+	mc_make_sid_security_cfg(BPMPDMAW),
+	mc_make_sid_security_cfg(ISPRA),
+	mc_make_sid_security_cfg(NVDECSWR),
+	mc_make_sid_security_cfg(XUSB_DEVW),
+	mc_make_sid_security_cfg(NVDECSRD),
+	mc_make_sid_security_cfg(MPCOREW),
+	mc_make_sid_security_cfg(NVDISPLAYR),
+	mc_make_sid_security_cfg(BPMPDMAR),
+	mc_make_sid_security_cfg(NVJPGSWR),
+	mc_make_sid_security_cfg(NVDECSRD1),
+	mc_make_sid_security_cfg(TSECSRD),
+	mc_make_sid_security_cfg(NVJPGSRD),
+	mc_make_sid_security_cfg(SDMMCWA),
+	mc_make_sid_security_cfg(SCER),
+	mc_make_sid_security_cfg(XUSB_HOSTR),
+	mc_make_sid_security_cfg(VICSRD),
+	mc_make_sid_security_cfg(AONDMAR),
+	mc_make_sid_security_cfg(AONW),
+	mc_make_sid_security_cfg(SDMMCRA),
+	mc_make_sid_security_cfg(HOST1XDMAR),
+	mc_make_sid_security_cfg(EQOSR),
+	mc_make_sid_security_cfg(SATAR),
+	mc_make_sid_security_cfg(BPMPR),
+	mc_make_sid_security_cfg(HDAR),
+	mc_make_sid_security_cfg(SDMMCRAB),
+	mc_make_sid_security_cfg(ETRR),
+	mc_make_sid_security_cfg(AONR),
+	mc_make_sid_security_cfg(APEDMAR),
+	mc_make_sid_security_cfg(SESRD),
+	mc_make_sid_security_cfg(NVENCSRD),
+	mc_make_sid_security_cfg(GPUSWR),
+	mc_make_sid_security_cfg(TSECSWRB),
+	mc_make_sid_security_cfg(ISPWB),
+	mc_make_sid_security_cfg(GPUSRD2),
+	mc_make_sid_override_cfg(APER),
+	mc_make_sid_override_cfg(VICSRD),
+	mc_make_sid_override_cfg(NVENCSRD),
+	mc_make_sid_override_cfg(NVJPGSWR),
+	mc_make_sid_override_cfg(AONW),
+	mc_make_sid_override_cfg(BPMPR),
+	mc_make_sid_override_cfg(BPMPW),
+	mc_make_sid_override_cfg(HDAW),
+	mc_make_sid_override_cfg(NVDISPLAYR1),
+	mc_make_sid_override_cfg(APEDMAR),
+	mc_make_sid_override_cfg(AFIR),
+	mc_make_sid_override_cfg(AXISR),
+	mc_make_sid_override_cfg(VICSRD1),
+	mc_make_sid_override_cfg(TSECSRD),
+	mc_make_sid_override_cfg(BPMPDMAW),
+	mc_make_sid_override_cfg(MPCOREW),
+	mc_make_sid_override_cfg(XUSB_HOSTR),
+	mc_make_sid_override_cfg(GPUSWR),
+	mc_make_sid_override_cfg(XUSB_DEVR),
+	mc_make_sid_override_cfg(UFSHCW),
+	mc_make_sid_override_cfg(XUSB_HOSTW),
+	mc_make_sid_override_cfg(SDMMCWAB),
+	mc_make_sid_override_cfg(SATAW),
+	mc_make_sid_override_cfg(SCEDMAR),
+	mc_make_sid_override_cfg(HOST1XDMAR),
+	mc_make_sid_override_cfg(SDMMCWA),
+	mc_make_sid_override_cfg(APEDMAW),
+	mc_make_sid_override_cfg(SESWR),
+	mc_make_sid_override_cfg(AXISW),
+	mc_make_sid_override_cfg(AONDMAW),
+	mc_make_sid_override_cfg(TSECSWRB),
+	mc_make_sid_override_cfg(MPCORER),
+	mc_make_sid_override_cfg(ISPWB),
+	mc_make_sid_override_cfg(AONR),
+	mc_make_sid_override_cfg(BPMPDMAR),
+	mc_make_sid_override_cfg(HDAR),
+	mc_make_sid_override_cfg(SDMMCRA),
+	mc_make_sid_override_cfg(ETRW),
+	mc_make_sid_override_cfg(GPUSWR2),
+	mc_make_sid_override_cfg(EQOSR),
+	mc_make_sid_override_cfg(TSECSWR),
+	mc_make_sid_override_cfg(ETRR),
+	mc_make_sid_override_cfg(NVDECSRD),
+	mc_make_sid_override_cfg(TSECSRDB),
+	mc_make_sid_override_cfg(SDMMCRAA),
+	mc_make_sid_override_cfg(NVDECSRD1),
+	mc_make_sid_override_cfg(SDMMCR),
+	mc_make_sid_override_cfg(NVJPGSRD),
+	mc_make_sid_override_cfg(SCEDMAW),
+	mc_make_sid_override_cfg(SDMMCWAA),
+	mc_make_sid_override_cfg(APEW),
+	mc_make_sid_override_cfg(AONDMAR),
+	mc_make_sid_override_cfg(PTCR),
+	mc_make_sid_override_cfg(SCER),
+	mc_make_sid_override_cfg(ISPRA),
+	mc_make_sid_override_cfg(ISPWA),
+	mc_make_sid_override_cfg(VICSWR),
+	mc_make_sid_override_cfg(SESRD),
+	mc_make_sid_override_cfg(SDMMCW),
+	mc_make_sid_override_cfg(SDMMCRAB),
+	mc_make_sid_override_cfg(EQOSW),
+	mc_make_sid_override_cfg(GPUSRD2),
+	mc_make_sid_override_cfg(SCEW),
+	mc_make_sid_override_cfg(GPUSRD),
+	mc_make_sid_override_cfg(NVDECSWR),
+	mc_make_sid_override_cfg(XUSB_DEVW),
+	mc_make_sid_override_cfg(SATAR),
+	mc_make_sid_override_cfg(NVDISPLAYR),
+	mc_make_sid_override_cfg(VIW),
+	mc_make_sid_override_cfg(UFSHCR),
+	mc_make_sid_override_cfg(NVENCSWR),
+	mc_make_sid_override_cfg(AFIW),
+	smmu_make_gnsr0_nsec_cfg(CR0),
+	smmu_make_gnsr0_sec_cfg(IDR0),
+	smmu_make_gnsr0_sec_cfg(IDR1),
+	smmu_make_gnsr0_sec_cfg(IDR2),
+	smmu_make_gnsr0_nsec_cfg(GFSR),
+	smmu_make_gnsr0_nsec_cfg(GFSYNR0),
+	smmu_make_gnsr0_nsec_cfg(GFSYNR1),
+	smmu_make_gnsr0_nsec_cfg(TLBGSTATUS),
+	smmu_make_gnsr0_nsec_cfg(PIDR2),
+	smmu_make_smrg_group(0),
+	smmu_make_smrg_group(1),
+	smmu_make_smrg_group(2),
+	smmu_make_smrg_group(3),
+	smmu_make_smrg_group(4),
+	smmu_make_smrg_group(5),
+	smmu_make_smrg_group(6),
+	smmu_make_smrg_group(7),
+	smmu_make_smrg_group(8),
+	smmu_make_smrg_group(9),
+	smmu_make_smrg_group(10),
+	smmu_make_smrg_group(11),
+	smmu_make_smrg_group(12),
+	smmu_make_smrg_group(13),
+	smmu_make_smrg_group(14),
+	smmu_make_smrg_group(15),
+	smmu_make_smrg_group(16),
+	smmu_make_smrg_group(17),
+	smmu_make_smrg_group(18),
+	smmu_make_smrg_group(19),
+	smmu_make_smrg_group(20),
+	smmu_make_smrg_group(21),
+	smmu_make_smrg_group(22),
+	smmu_make_smrg_group(23),
+	smmu_make_smrg_group(24),
+	smmu_make_smrg_group(25),
+	smmu_make_smrg_group(26),
+	smmu_make_smrg_group(27),
+	smmu_make_smrg_group(28),
+	smmu_make_smrg_group(29),
+	smmu_make_smrg_group(30),
+	smmu_make_smrg_group(31),
+	smmu_make_smrg_group(32),
+	smmu_make_smrg_group(33),
+	smmu_make_smrg_group(34),
+	smmu_make_smrg_group(35),
+	smmu_make_smrg_group(36),
+	smmu_make_smrg_group(37),
+	smmu_make_smrg_group(38),
+	smmu_make_smrg_group(39),
+	smmu_make_smrg_group(40),
+	smmu_make_smrg_group(41),
+	smmu_make_smrg_group(42),
+	smmu_make_smrg_group(43),
+	smmu_make_smrg_group(44),
+	smmu_make_smrg_group(45),
+	smmu_make_smrg_group(46),
+	smmu_make_smrg_group(47),
+	smmu_make_smrg_group(48),
+	smmu_make_smrg_group(49),
+	smmu_make_smrg_group(50),
+	smmu_make_smrg_group(51),
+	smmu_make_smrg_group(52),
+	smmu_make_smrg_group(53),
+	smmu_make_smrg_group(54),
+	smmu_make_smrg_group(55),
+	smmu_make_smrg_group(56),
+	smmu_make_smrg_group(57),
+	smmu_make_smrg_group(58),
+	smmu_make_smrg_group(59),
+	smmu_make_smrg_group(60),
+	smmu_make_smrg_group(61),
+	smmu_make_smrg_group(62),
+	smmu_make_smrg_group(63),
+	smmu_make_cb_group(0),
+	smmu_make_cb_group(1),
+	smmu_make_cb_group(2),
+	smmu_make_cb_group(3),
+	smmu_make_cb_group(4),
+	smmu_make_cb_group(5),
+	smmu_make_cb_group(6),
+	smmu_make_cb_group(7),
+	smmu_make_cb_group(8),
+	smmu_make_cb_group(9),
+	smmu_make_cb_group(10),
+	smmu_make_cb_group(11),
+	smmu_make_cb_group(12),
+	smmu_make_cb_group(13),
+	smmu_make_cb_group(14),
+	smmu_make_cb_group(15),
+	smmu_make_cb_group(16),
+	smmu_make_cb_group(17),
+	smmu_make_cb_group(18),
+	smmu_make_cb_group(19),
+	smmu_make_cb_group(20),
+	smmu_make_cb_group(21),
+	smmu_make_cb_group(22),
+	smmu_make_cb_group(23),
+	smmu_make_cb_group(24),
+	smmu_make_cb_group(25),
+	smmu_make_cb_group(26),
+	smmu_make_cb_group(27),
+	smmu_make_cb_group(28),
+	smmu_make_cb_group(29),
+	smmu_make_cb_group(30),
+	smmu_make_cb_group(31),
+	smmu_make_cb_group(32),
+	smmu_make_cb_group(33),
+	smmu_make_cb_group(34),
+	smmu_make_cb_group(35),
+	smmu_make_cb_group(36),
+	smmu_make_cb_group(37),
+	smmu_make_cb_group(38),
+	smmu_make_cb_group(39),
+	smmu_make_cb_group(40),
+	smmu_make_cb_group(41),
+	smmu_make_cb_group(42),
+	smmu_make_cb_group(43),
+	smmu_make_cb_group(44),
+	smmu_make_cb_group(45),
+	smmu_make_cb_group(46),
+	smmu_make_cb_group(47),
+	smmu_make_cb_group(48),
+	smmu_make_cb_group(49),
+	smmu_make_cb_group(50),
+	smmu_make_cb_group(51),
+	smmu_make_cb_group(52),
+	smmu_make_cb_group(53),
+	smmu_make_cb_group(54),
+	smmu_make_cb_group(55),
+	smmu_make_cb_group(56),
+	smmu_make_cb_group(57),
+	smmu_make_cb_group(58),
+	smmu_make_cb_group(59),
+	smmu_make_cb_group(60),
+	smmu_make_cb_group(61),
+	smmu_make_cb_group(62),
+	smmu_make_cb_group(63),
+	smmu_bypass_cfg,	/* TBU settings */
+	_END_OF_TABLE_,
+};
+
+/*
+ * Save SMMU settings before "System Suspend"
+ */
+void tegra_smmu_save_context(void)
+{
+	uint32_t i;
+#if DEBUG
+	uint32_t reg_id1, pgshift, cb_size;
+
+	/* sanity check SMMU settings c*/
+	reg_id1 = mmio_read_32((TEGRA_SMMU_BASE + SMMU_GNSR0_IDR1));
+	pgshift = (reg_id1 & ID1_PAGESIZE) ? 16 : 12;
+	cb_size = (2 << pgshift) * \
+	(1 << (((reg_id1 >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1));
+
+	assert(!((pgshift != PGSHIFT) || (cb_size != CB_SIZE)));
+#endif
+
+	/* index of _END_OF_TABLE_ */
+	smmu_ctx_regs[0].val = ARRAY_SIZE(smmu_ctx_regs) - 1;
+
+	/* save SMMU register values */
+	for (i = 1; i < ARRAY_SIZE(smmu_ctx_regs) - 1; i++)
+		smmu_ctx_regs[i].val = mmio_read_32(smmu_ctx_regs[i].reg);
+
+	/* save the SMMU table address */
+	mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO,
+		(uint32_t)(unsigned long)smmu_ctx_regs);
+	mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_HI,
+		(uint32_t)(((unsigned long)smmu_ctx_regs) >> 32));
+}
+
+/*
+ * Init SMMU during boot or "System Suspend" exit
+ */
+void tegra_smmu_init(void)
+{
+	uint32_t val;
+
+	/* Program the SMMU pagesize */
+	val = tegra_smmu_read_32(SMMU_GSR0_SECURE_ACR);
+	val |= SMMU_GSR0_PGSIZE_64K;
+	tegra_smmu_write_32(SMMU_GSR0_SECURE_ACR, val);
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
new file mode 100644
index 0000000..7e35cc6
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce.h>
+#include <psci.h>
+#include <smmu.h>
+#include <t18x_ari.h>
+#include <tegra_private.h>
+
+extern void prepare_cpu_pwr_dwn(void);
+
+/* state id mask */
+#define TEGRA186_STATE_ID_MASK		0xF
+/* constants to get power state's wake time */
+#define TEGRA186_WAKE_TIME_MASK		0xFFFFFF
+#define TEGRA186_WAKE_TIME_SHIFT	4
+/* context size to save during system suspend */
+#define TEGRA186_SE_CONTEXT_SIZE		3
+
+static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
+static unsigned int wake_time[PLATFORM_CORE_COUNT];
+
+/* System power down state */
+uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF;
+
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+					psci_power_state_t *req_state)
+{
+	int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	if (impl == DENVER_IMPL)
+		cpu |= 0x4;
+
+	wake_time[cpu] = (power_state  >> TEGRA186_WAKE_TIME_SHIFT) &
+			 TEGRA186_WAKE_TIME_MASK;
+
+	/* Sanity check the requested state id */
+	switch (state_id) {
+	case PSTATE_ID_CORE_IDLE:
+	case PSTATE_ID_CORE_POWERDN:
+		/*
+		 * Core powerdown request only for afflvl 0
+		 */
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
+
+		break;
+
+	default:
+		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	const plat_local_state_t *pwr_domain_state;
+	unsigned int stateid_afflvl0, stateid_afflvl2;
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
+	uint32_t val;
+
+	assert(ctx);
+	assert(gp_regs);
+
+	if (impl == DENVER_IMPL)
+		cpu |= 0x4;
+
+	/* get the state ID */
+	pwr_domain_state = target_state->pwr_domain_state;
+	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
+		TEGRA186_STATE_ID_MASK;
+	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+		TEGRA186_STATE_ID_MASK;
+
+	if (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) {
+
+		/* Prepare for cpu idle */
+		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
+			TEGRA_ARI_CORE_C6, wake_time[cpu], 0);
+
+	} else if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) {
+
+		/* Prepare for cpu powerdn */
+		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
+			TEGRA_ARI_CORE_C7, wake_time[cpu], 0);
+
+	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+		/* loop until SC7 is allowed */
+		do {
+			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
+					TEGRA_ARI_CORE_C7,
+					MCE_CORE_SLEEP_TIME_INFINITE,
+					0);
+		} while (val == 0);
+
+		/* save SE registers */
+		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
+				SE_MUTEX_WATCHDOG_NS_LIMIT);
+		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
+				RNG_MUTEX_WATCHDOG_NS_LIMIT);
+		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
+				PKA_MUTEX_WATCHDOG_NS_LIMIT);
+
+		/* save 'Secure Boot' Processor Feature Config Register */
+		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
+		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
+
+		/* save SMMU context */
+		tegra_smmu_save_context();
+
+		/* Prepare for system suspend */
+		write_ctx_reg(gp_regs, CTX_GPREG_X4, 1);
+		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
+		write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
+		(void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO,
+			TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC7);
+
+		/* Enter system suspend state */
+		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
+			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
+
+	} else {
+		ERROR("%s: Unknown state id\n", __func__);
+		return PSCI_E_NOT_SUPPORTED;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+	int target_cpu = mpidr & MPIDR_CPU_MASK;
+	int target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
+			MPIDR_AFFINITY_BITS;
+
+	if (target_cluster > MPIDR_AFFLVL1) {
+		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
+		return PSCI_E_NOT_PRESENT;
+	}
+
+	/* construct the target CPU # */
+	target_cpu |= (target_cluster << 2);
+
+	mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	int state_id = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
+
+	/*
+	 * Check if we are exiting from deep sleep and restore SE
+	 * context if we are.
+	 */
+	if (state_id == PSTATE_ID_SOC_POWERDN) {
+		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
+			se_regs[0]);
+		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
+			se_regs[1]);
+		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
+			se_regs[2]);
+
+		/* Init SMMU */
+		tegra_smmu_init();
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	assert(ctx);
+	assert(gp_regs);
+
+	/* Turn off wake_mask */
+	write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
+	write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
+	write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
+	mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, TEGRA_ARI_CLUSTER_CC7,
+		0, TEGRA_ARI_SYSTEM_SC7);
+
+	/* Disable Denver's DCO operations */
+	if (impl == DENVER_IMPL)
+		denver_disable_dco();
+
+	/* Turn off CPU */
+	return mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
+			MCE_CORE_SLEEP_TIME_INFINITE, 0);
+}
+
+__dead2 void tegra_soc_prepare_system_off(void)
+{
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
+	uint32_t val;
+
+	if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) {
+
+		/* power off the entire system */
+		mce_enter_ccplex_state(tegra186_system_powerdn_state);
+
+	} else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) {
+
+		/* loop until other CPUs power down */
+		do {
+			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
+					TEGRA_ARI_CORE_C7,
+					MCE_CORE_SLEEP_TIME_INFINITE,
+					0);
+		} while (val == 0);
+
+		/* Prepare for quasi power down */
+		write_ctx_reg(gp_regs, CTX_GPREG_X4, 1);
+		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
+		write_ctx_reg(gp_regs, CTX_GPREG_X6, 1);
+		(void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO,
+			TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC8);
+
+		/* Enter quasi power down state */
+		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
+			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
+
+		/* disable GICC */
+		tegra_gic_cpuif_deactivate();
+
+		/* power down core */
+		prepare_cpu_pwr_dwn();
+
+	} else {
+		ERROR("%s: unsupported power down state (%d)\n", __func__,
+			tegra186_system_powerdn_state);
+	}
+
+	wfi();
+
+	/* wait for the system to power down */
+	for (;;) {
+		;
+	}
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
+
+	return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_secondary.c b/plat/nvidia/tegra/soc/t186/plat_secondary.c
new file mode 100644
index 0000000..df80289
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_secondary.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <debug.h>
+#include <mce.h>
+#include <mmio.h>
+#include <tegra_def.h>
+
+#define MISCREG_CPU_RESET_VECTOR	0x2000
+#define MISCREG_AA64_RST_LOW		0x2004
+#define MISCREG_AA64_RST_HIGH		0x2008
+
+#define SCRATCH_SECURE_RSV1_SCRATCH_0	0x658
+#define SCRATCH_SECURE_RSV1_SCRATCH_1	0x65C
+
+#define CPU_RESET_MODE_AA64		1
+
+extern void tegra_secure_entrypoint(void);
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+	uint32_t addr_low, addr_high;
+	uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint;
+
+	INFO("Setting up secondary CPU boot\n");
+
+	addr_low = (uint32_t)reset_addr | CPU_RESET_MODE_AA64;
+	addr_high = (uint32_t)((reset_addr >> 32) & 0x7ff);
+
+	/* write lower 32 bits first, then the upper 11 bits */
+	mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low);
+	mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_HIGH, addr_high);
+
+	/* save reset vector to be used during SYSTEM_SUSPEND exit */
+	mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_RSV1_SCRATCH_0,
+			addr_low);
+	mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_RSV1_SCRATCH_1,
+			addr_high);
+
+	/* update reset vector address to the CCPLEX */
+	mce_update_reset_vector(addr_low, addr_high);
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_setup.c b/plat/nvidia/tegra/soc/t186/plat_setup.c
new file mode 100644
index 0000000..d6b8bc3
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_setup.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+#include <xlat_tables.h>
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	1,
+	/* No of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of CPU cores - cluster0 */
+	PLATFORM_MAX_CPUS_PER_CLUSTER,
+	/* No of CPU cores - cluster1 */
+	PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+	MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000, /* 128KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000, /* 128KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000, /* 256KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000, /* 384KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_SMMU_BASE, 0x1000000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	{0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+	/* MMIO space */
+	return tegra_mmap;
+}
+
+/*******************************************************************************
+ * Handler to get the System Counter Frequency
+ ******************************************************************************/
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return 31250000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA186_MAX_UART_PORTS		7
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra186_uart_addresses[TEGRA186_MAX_UART_PORTS + 1] = {
+	0,	/* undefined - treated as an error case */
+	TEGRA_UARTA_BASE,
+	TEGRA_UARTB_BASE,
+	TEGRA_UARTC_BASE,
+	TEGRA_UARTD_BASE,
+	TEGRA_UARTE_BASE,
+	TEGRA_UARTF_BASE,
+	TEGRA_UARTG_BASE,
+};
+
+/*******************************************************************************
+ * Retrieve the UART controller base to be used as the console
+ ******************************************************************************/
+uint32_t plat_get_console_from_id(int id)
+{
+	if (id > TEGRA186_MAX_UART_PORTS)
+		return 0;
+
+	return tegra186_uart_addresses[id];
+}
+
+/* Secure IRQs for Tegra186 */
+static const irq_sec_cfg_t tegra186_sec_irqs[] = {
+	{
+		TEGRA186_TOP_WDT_IRQ,
+		TEGRA186_SEC_IRQ_TARGET_MASK,
+		INTR_TYPE_EL3,
+	},
+	{
+		TEGRA186_AON_WDT_IRQ,
+		TEGRA186_SEC_IRQ_TARGET_MASK,
+		INTR_TYPE_EL3,
+	},
+};
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+	tegra_gic_setup(tegra186_sec_irqs,
+		sizeof(tegra186_sec_irqs) / sizeof(tegra186_sec_irqs[0]));
+
+	/*
+	 * Initialize the FIQ handler only if the platform supports any
+	 * FIQ interrupt sources.
+	 */
+	if (sizeof(tegra186_sec_irqs) > 0)
+		tegra_fiq_handler_setup();
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
new file mode 100644
index 0000000..fabab01
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <mce.h>
+#include <memctrl.h>
+#include <runtime_svc.h>
+#include <t18x_ari.h>
+#include <tegra_private.h>
+
+extern uint32_t tegra186_system_powerdn_state;
+
+/*******************************************************************************
+ * Tegra186 SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_NEW_VIDEOMEM_REGION			0x82000003
+#define TEGRA_SIP_SYSTEM_SHUTDOWN_STATE			0x82FFFE01
+#define TEGRA_SIP_MCE_CMD_ENTER_CSTATE			0x82FFFF00
+#define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO		0x82FFFF01
+#define TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME		0x82FFFF02
+#define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS		0x82FFFF03
+#define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS		0x82FFFF04
+#define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED		0x82FFFF05
+#define TEGRA_SIP_MCE_CMD_ONLINE_CORE			0x82FFFF06
+#define TEGRA_SIP_MCE_CMD_CC3_CTRL			0x82FFFF07
+#define TEGRA_SIP_MCE_CMD_ECHO_DATA			0x82FFFF08
+#define TEGRA_SIP_MCE_CMD_READ_VERSIONS			0x82FFFF09
+#define TEGRA_SIP_MCE_CMD_ENUM_FEATURES			0x82FFFF0A
+#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS	0x82FFFF0B
+#define TEGRA_SIP_MCE_CMD_ENUM_READ_MCA			0x82FFFF0C
+#define TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA		0x82FFFF0D
+#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE		0x82FFFF0E
+#define TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE		0x82FFFF0F
+
+/*******************************************************************************
+ * This function is responsible for handling all T186 SiP calls
+ ******************************************************************************/
+int plat_sip_handler(uint32_t smc_fid,
+		     uint64_t x1,
+		     uint64_t x2,
+		     uint64_t x3,
+		     uint64_t x4,
+		     void *cookie,
+		     void *handle,
+		     uint64_t flags)
+{
+	int mce_ret;
+
+	switch (smc_fid) {
+
+	/*
+	 * Micro Coded Engine (MCE) commands reside in the 0x82FFFF00 -
+	 * 0x82FFFFFF SiP SMC space
+	 */
+	case TEGRA_SIP_MCE_CMD_ENTER_CSTATE:
+	case TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO:
+	case TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME:
+	case TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS:
+	case TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS:
+	case TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED:
+	case TEGRA_SIP_MCE_CMD_CC3_CTRL:
+	case TEGRA_SIP_MCE_CMD_ECHO_DATA:
+	case TEGRA_SIP_MCE_CMD_READ_VERSIONS:
+	case TEGRA_SIP_MCE_CMD_ENUM_FEATURES:
+	case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+	case TEGRA_SIP_MCE_CMD_ENUM_READ_MCA:
+	case TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA:
+	case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE:
+	case TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE:
+
+		/* clean up the high bits */
+		smc_fid &= MCE_CMD_MASK;
+
+		/* execute the command and store the result */
+		mce_ret = mce_command_handler(smc_fid, x1, x2, x3);
+		write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X0, mce_ret);
+
+		return 0;
+
+	case TEGRA_SIP_NEW_VIDEOMEM_REGION:
+		/* clean up the high bits */
+		x1 = (uint32_t)x1;
+		x2 = (uint32_t)x2;
+
+		/*
+		 * Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
+		 * or falls outside of the valid DRAM range
+		 */
+		mce_ret = bl31_check_ns_address(x1, x2);
+		if (mce_ret)
+			return -ENOTSUP;
+
+		/*
+		 * Check if Video Memory is aligned to 1MB.
+		 */
+		if ((x1 & 0xFFFFF) || (x2 & 0xFFFFF)) {
+			ERROR("Unaligned Video Memory base address!\n");
+			return -ENOTSUP;
+		}
+
+		/* new video memory carveout settings */
+		tegra_memctrl_videomem_setup(x1, x2);
+
+		return 0;
+
+	case TEGRA_SIP_SYSTEM_SHUTDOWN_STATE:
+
+		/* clean up the high bits */
+		x1 = (uint32_t)x1;
+
+		/*
+		 * SC8 is a special Tegra186 system state where the CPUs and
+		 * DRAM are powered down but the other subsystem is still
+		 * alive.
+		 */
+		if ((x1 == TEGRA_ARI_SYSTEM_SC8) ||
+		    (x1 == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF)) {
+
+			tegra186_system_powerdn_state = x1;
+			flush_dcache_range(
+				(uintptr_t)&tegra186_system_powerdn_state,
+				sizeof(tegra186_system_powerdn_state));
+
+		} else {
+
+			ERROR("%s: unhandled powerdn state (%d)\n", __func__,
+				(uint32_t)x1);
+			return -ENOTSUP;
+		}
+
+		return 0;
+
+	default:
+		break;
+	}
+
+	return -ENOTSUP;
+}
diff --git a/plat/nvidia/tegra/soc/t186/platform_t186.mk b/plat/nvidia/tegra/soc/t186/platform_t186.mk
new file mode 100644
index 0000000..adc4a9e
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/platform_t186.mk
@@ -0,0 +1,65 @@
+#
+# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+# platform configs
+ENABLE_NS_L2_CPUECTRL_RW_ACCESS		:= 1
+$(eval $(call add_define,ENABLE_NS_L2_CPUECTRL_RW_ACCESS))
+
+# platform settings
+TZDRAM_BASE				:= 0x30000000
+$(eval $(call add_define,TZDRAM_BASE))
+
+PLATFORM_CLUSTER_COUNT			:= 2
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER		:= 4
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES				:= 15
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS			:= 15
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+# platform files
+PLAT_INCLUDES		+=	-I${SOC_DIR}/drivers/include
+
+BL31_SOURCES		+=	lib/cpus/aarch64/denver.S		\
+				lib/cpus/aarch64/cortex_a57.S		\
+				${COMMON_DIR}/drivers/memctrl/memctrl_v2.c	\
+				${SOC_DIR}/drivers/mce/mce.c		\
+				${SOC_DIR}/drivers/mce/ari.c		\
+				${SOC_DIR}/drivers/mce/nvg.c		\
+				${SOC_DIR}/drivers/mce/aarch64/nvg_helpers.S \
+				${SOC_DIR}/drivers/smmu/smmu.c		\
+				${SOC_DIR}/plat_psci_handlers.c		\
+				${SOC_DIR}/plat_setup.c			\
+				${SOC_DIR}/plat_secondary.c		\
+				${SOC_DIR}/plat_sip_calls.c
diff --git a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
index 95fb93f..05028a1 100644
--- a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
+++ b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
@@ -76,7 +76,7 @@
 		 * Cluster powerdown/idle request only for afflvl 1
 		 */
 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
-		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_OFF_STATE;
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
 
 		break;
 
@@ -111,8 +111,10 @@
 
 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
 
-		assert(stateid_afflvl0 == PLAT_MAX_OFF_STATE);
-		assert(stateid_afflvl1 == PLAT_MAX_OFF_STATE);
+		assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) ||
+		       (stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
+		assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) ||
+		       (stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
 
 		/* suspend the entire soc */
 		tegra_fc_soc_powerdn(mpidr);
diff --git a/plat/nvidia/tegra/soc/t210/plat_setup.c b/plat/nvidia/tegra/soc/t210/plat_setup.c
index 246faf8..42eefe7 100644
--- a/plat/nvidia/tegra/soc/t210/plat_setup.c
+++ b/plat/nvidia/tegra/soc/t210/plat_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -28,8 +28,11 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <arch_helpers.h>
+#include <bl_common.h>
 #include <console.h>
 #include <tegra_def.h>
+#include <tegra_private.h>
 #include <xlat_tables.h>
 
 /*******************************************************************************
@@ -112,3 +115,11 @@
 
 	return tegra210_uart_addresses[id];
 }
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+	tegra_gic_setup(NULL, 0);
+}
diff --git a/plat/nvidia/tegra/soc/t210/platform_t210.mk b/plat/nvidia/tegra/soc/t210/platform_t210.mk
index 2c908f9..8946869 100644
--- a/plat/nvidia/tegra/soc/t210/platform_t210.mk
+++ b/plat/nvidia/tegra/soc/t210/platform_t210.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -28,7 +28,7 @@
 # POSSIBILITY OF SUCH DAMAGE.
 #
 
-TZDRAM_BASE				:= 0xFDC00000
+TZDRAM_BASE				:= 0xFF800000
 $(eval $(call add_define,TZDRAM_BASE))
 
 ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT	:= 1
@@ -54,6 +54,16 @@
 				${SOC_DIR}/plat_setup.c			\
 				${SOC_DIR}/plat_secondary.c
 
-# Enable workarounds for selected Cortex-A53 erratas.
-ERRATA_A53_826319	:=	1
+# Enable workarounds for selected Cortex-A57 erratas.
+A57_DISABLE_NON_TEMPORAL_HINT	:=	1
+ERRATA_A57_826974		:=	1
+ERRATA_A57_826977		:=	1
+ERRATA_A57_828024		:=	1
+ERRATA_A57_829520		:=	1
+ERRATA_A57_833471		:=	1
 
+# Enable workarounds for selected Cortex-A53 erratas.
+A53_DISABLE_NON_TEMPORAL_HINT	:=	1
+ERRATA_A53_826319		:=	1
+ERRATA_A53_836870		:=	1
+ERRATA_A53_855873		:=	1
diff --git a/plat/qemu/platform.mk b/plat/qemu/platform.mk
index aa08bd3..63720f4 100644
--- a/plat/qemu/platform.mk
+++ b/plat/qemu/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -51,7 +51,6 @@
 				lib/cpus/aarch64/aem_generic.S		\
 				lib/cpus/aarch64/cortex_a53.S		\
 				lib/cpus/aarch64/cortex_a57.S		\
-				plat/common/aarch64/platform_mp_stack.S	\
 				plat/qemu/aarch64/plat_helpers.S	\
 				plat/qemu/qemu_bl1_setup.c
 
@@ -59,7 +58,6 @@
 				drivers/io/io_storage.c			\
 				drivers/io/io_fip.c			\
 				drivers/io/io_memmap.c			\
-				plat/common/aarch64/platform_mp_stack.S	\
 				lib/semihosting/semihosting.c		\
 				lib/semihosting/aarch64/semihosting_call.S\
 				plat/qemu/qemu_io_storage.c		\
@@ -74,9 +72,7 @@
 				drivers/arm/gic/v2/gicv2_helpers.c	\
 				drivers/arm/gic/v2/gicv2_main.c		\
 				drivers/arm/gic/common/gic_common.c	\
-				plat/common/aarch64/platform_mp_stack.S	\
 				plat/common/aarch64/plat_psci_common.c	\
-				plat/common/aarch64/plat_common.c	\
 				plat/qemu/qemu_pm.c			\
 				plat/qemu/topology.c			\
 				plat/qemu/aarch64/plat_helpers.S	\
diff --git a/plat/rockchip/common/include/plat_private.h b/plat/rockchip/common/include/plat_private.h
index 9638aae..7aa0d85 100644
--- a/plat/rockchip/common/include/plat_private.h
+++ b/plat/rockchip/common/include/plat_private.h
@@ -46,27 +46,6 @@
 extern uint32_t __bl31_sram_data_start, __bl31_sram_data_end;
 extern uint32_t __sram_incbin_start, __sram_incbin_end;
 
-/******************************************************************************
- * For rockchip socs pm ops
- ******************************************************************************/
-struct rockchip_pm_ops_cb {
-	int (*cores_pwr_dm_on)(unsigned long mpidr, uint64_t entrypoint);
-	int (*cores_pwr_dm_off)(void);
-	int (*cores_pwr_dm_on_finish)(void);
-	int (*cores_pwr_dm_suspend)(void);
-	int (*cores_pwr_dm_resume)(void);
-	/* hlvl is used for clusters or system level */
-	int (*hlvl_pwr_dm_suspend)(uint32_t lvl, plat_local_state_t lvl_state);
-	int (*hlvl_pwr_dm_resume)(uint32_t lvl, plat_local_state_t lvl_state);
-	int (*hlvl_pwr_dm_off)(uint32_t lvl, plat_local_state_t lvl_state);
-	int (*hlvl_pwr_dm_on_finish)(uint32_t lvl,
-				     plat_local_state_t lvl_state);
-	int (*sys_pwr_dm_suspend)(void);
-	int (*sys_pwr_dm_resume)(void);
-	void (*sys_gbl_soft_reset)(void) __dead2;
-	void (*system_off)(void) __dead2;
-	void (*sys_pwr_down_wfi)(const psci_power_state_t *state_info) __dead2;
-};
 
 /******************************************************************************
  * The register have write-mask bits, it is mean, if you want to set the bits,
@@ -121,7 +100,6 @@
 void plat_rockchip_pmusram_prepare(void);
 void plat_rockchip_pmu_init(void);
 void plat_rockchip_soc_init(void);
-void plat_setup_rockchip_pm_ops(struct rockchip_pm_ops_cb *ops);
 uintptr_t plat_get_sec_entrypoint(void);
 
 void platform_cpu_warmboot(void);
@@ -132,6 +110,28 @@
 struct apio_info *plat_get_rockchip_suspend_apio(void);
 void plat_rockchip_gpio_init(void);
 
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint);
+int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl,
+				 plat_local_state_t lvl_state);
+int rockchip_soc_cores_pwr_dm_off(void);
+int rockchip_soc_sys_pwr_dm_suspend(void);
+int rockchip_soc_cores_pwr_dm_suspend(void);
+int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl,
+				     plat_local_state_t lvl_state);
+int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl,
+				       plat_local_state_t lvl_state);
+int rockchip_soc_cores_pwr_dm_on_finish(void);
+int rockchip_soc_sys_pwr_dm_resume(void);
+
+int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl,
+				    plat_local_state_t lvl_state);
+int rockchip_soc_cores_pwr_dm_resume(void);
+void __dead2 rockchip_soc_soft_reset(void);
+void __dead2 rockchip_soc_system_off(void);
+void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(
+				const psci_power_state_t *target_state);
+void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void);
+
 extern const unsigned char rockchip_power_domain_tree_desc[];
 
 extern void *pmu_cpuson_entrypoint_start;
diff --git a/plat/rockchip/common/plat_pm.c b/plat/rockchip/common/plat_pm.c
index e926345..09c5397 100644
--- a/plat/rockchip/common/plat_pm.c
+++ b/plat/rockchip/common/plat_pm.c
@@ -48,7 +48,103 @@
 
 static uintptr_t rockchip_sec_entrypoint;
 
-static struct rockchip_pm_ops_cb *rockchip_ops;
+#pragma weak rockchip_soc_cores_pwr_dm_on
+#pragma weak rockchip_soc_hlvl_pwr_dm_off
+#pragma weak rockchip_soc_cores_pwr_dm_off
+#pragma weak rockchip_soc_sys_pwr_dm_suspend
+#pragma weak rockchip_soc_cores_pwr_dm_suspend
+#pragma weak rockchip_soc_hlvl_pwr_dm_suspend
+#pragma weak rockchip_soc_hlvl_pwr_dm_on_finish
+#pragma weak rockchip_soc_cores_pwr_dm_on_finish
+#pragma weak rockchip_soc_sys_pwr_dm_resume
+#pragma weak rockchip_soc_hlvl_pwr_dm_resume
+#pragma weak rockchip_soc_cores_pwr_dm_resume
+#pragma weak rockchip_soc_soft_reset
+#pragma weak rockchip_soc_system_off
+#pragma weak rockchip_soc_sys_pd_pwr_dn_wfi
+#pragma weak rockchip_soc_cores_pd_pwr_dn_wfi
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl,
+				 plat_local_state_t lvl_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_cores_pwr_dm_off(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_sys_pwr_dm_suspend(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_cores_pwr_dm_suspend(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl,
+				     plat_local_state_t lvl_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl,
+				       plat_local_state_t lvl_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_cores_pwr_dm_on_finish(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_sys_pwr_dm_resume(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl,
+				    plat_local_state_t lvl_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_cores_pwr_dm_resume(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+void __dead2 rockchip_soc_soft_reset(void)
+{
+	while (1)
+		;
+}
+
+void __dead2 rockchip_soc_system_off(void)
+{
+	while (1)
+		;
+}
+
+void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(
+				const psci_power_state_t *target_state)
+{
+	psci_power_down_wfi();
+}
+
+void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
+{
+	psci_power_down_wfi();
+}
 
 /*******************************************************************************
  * Rockchip standard platform handler called to check the validity of the power
@@ -131,10 +227,7 @@
  ******************************************************************************/
 int rockchip_pwr_domain_on(u_register_t mpidr)
 {
-	if (rockchip_ops && rockchip_ops->cores_pwr_dm_on)
-		rockchip_ops->cores_pwr_dm_on(mpidr, rockchip_sec_entrypoint);
-
-	return PSCI_E_SUCCESS;
+	return rockchip_soc_cores_pwr_dm_on(mpidr, rockchip_sec_entrypoint);
 }
 
 /*******************************************************************************
@@ -145,6 +238,7 @@
 {
 	uint32_t lvl;
 	plat_local_state_t lvl_state;
+	int ret;
 
 	assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE);
 
@@ -153,17 +247,13 @@
 	if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
 		plat_cci_disable();
 
-	if (!rockchip_ops || !rockchip_ops->cores_pwr_dm_off)
-		return;
-
-	rockchip_ops->cores_pwr_dm_off();
-
-	if (!rockchip_ops->hlvl_pwr_dm_off)
-		return;
+	rockchip_soc_cores_pwr_dm_off();
 
 	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
 		lvl_state = target_state->pwr_domain_state[lvl];
-		rockchip_ops->hlvl_pwr_dm_off(lvl, lvl_state);
+		ret = rockchip_soc_hlvl_pwr_dm_off(lvl, lvl_state);
+		if (ret == PSCI_E_NOT_SUPPORTED)
+			break;
 	}
 }
 
@@ -175,18 +265,15 @@
 {
 	uint32_t lvl;
 	plat_local_state_t lvl_state;
+	int ret;
 
 	if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
 		return;
 
-	if (rockchip_ops) {
-		if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE &&
-		    rockchip_ops->sys_pwr_dm_suspend) {
-			rockchip_ops->sys_pwr_dm_suspend();
-		} else if (rockchip_ops->cores_pwr_dm_suspend) {
-			rockchip_ops->cores_pwr_dm_suspend();
-		}
-	}
+	if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		rockchip_soc_sys_pwr_dm_suspend();
+	else
+		rockchip_soc_cores_pwr_dm_suspend();
 
 	/* Prevent interrupts from spuriously waking up this cpu */
 	plat_rockchip_gic_cpuif_disable();
@@ -198,12 +285,11 @@
 	if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
 		return;
 
-	if (!rockchip_ops || !rockchip_ops->hlvl_pwr_dm_suspend)
-		return;
-
 	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
 		lvl_state = target_state->pwr_domain_state[lvl];
-		rockchip_ops->hlvl_pwr_dm_suspend(lvl, lvl_state);
+		ret = rockchip_soc_hlvl_pwr_dm_suspend(lvl, lvl_state);
+		if (ret == PSCI_E_NOT_SUPPORTED)
+			break;
 	}
 }
 
@@ -216,22 +302,18 @@
 {
 	uint32_t lvl;
 	plat_local_state_t lvl_state;
+	int ret;
 
 	assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE);
 
-	if (!rockchip_ops)
-		goto comm_finish;
-
-	if (rockchip_ops->hlvl_pwr_dm_on_finish) {
-		for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
-			lvl_state = target_state->pwr_domain_state[lvl];
-			rockchip_ops->hlvl_pwr_dm_on_finish(lvl, lvl_state);
-		}
+	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		lvl_state = target_state->pwr_domain_state[lvl];
+		ret = rockchip_soc_hlvl_pwr_dm_on_finish(lvl, lvl_state);
+		if (ret == PSCI_E_NOT_SUPPORTED)
+			break;
 	}
 
-	if (rockchip_ops->cores_pwr_dm_on_finish)
-		rockchip_ops->cores_pwr_dm_on_finish();
-comm_finish:
+	rockchip_soc_cores_pwr_dm_on_finish();
 
 	/* Perform the common cluster specific operations */
 	if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
@@ -257,34 +339,30 @@
 {
 	uint32_t lvl;
 	plat_local_state_t lvl_state;
+	int ret;
 
 	/* Nothing to be done on waking up from retention from CPU level */
 	if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
 		return;
 
-	/* Perform system domain restore if woken up from system suspend */
-	if (!rockchip_ops)
-		goto comm_finish;
-
 	if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
-		if (rockchip_ops->sys_pwr_dm_resume)
-			rockchip_ops->sys_pwr_dm_resume();
+		rockchip_soc_sys_pwr_dm_resume();
 		goto comm_finish;
 	}
 
-	if (rockchip_ops->hlvl_pwr_dm_resume) {
-		for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
-			lvl_state = target_state->pwr_domain_state[lvl];
-			rockchip_ops->hlvl_pwr_dm_resume(lvl, lvl_state);
-		}
+	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		lvl_state = target_state->pwr_domain_state[lvl];
+		ret = rockchip_soc_hlvl_pwr_dm_resume(lvl, lvl_state);
+		if (ret == PSCI_E_NOT_SUPPORTED)
+			break;
 	}
 
-	if (rockchip_ops->cores_pwr_dm_resume)
-		rockchip_ops->cores_pwr_dm_resume();
+	rockchip_soc_cores_pwr_dm_resume();
+
 	/*
 	 * Program the gic per-cpu distributor or re-distributor interface.
 	 * For sys power domain operation, resuming of the gic needs to operate
-	 * in rockchip_ops->sys_pwr_dm_resume, according to the sys power mode
+	 * in rockchip_soc_sys_pwr_dm_resume(), according to the sys power mode
 	 * implements.
 	 */
 	plat_rockchip_gic_cpuif_enable();
@@ -302,9 +380,7 @@
  ******************************************************************************/
 static void __dead2 rockchip_system_reset(void)
 {
-	assert(rockchip_ops && rockchip_ops->sys_gbl_soft_reset);
-
-	rockchip_ops->sys_gbl_soft_reset();
+	rockchip_soc_soft_reset();
 }
 
 /*******************************************************************************
@@ -312,9 +388,16 @@
  ******************************************************************************/
 static void __dead2 rockchip_system_poweroff(void)
 {
-	assert(rockchip_ops && rockchip_ops->system_off);
+	rockchip_soc_system_off();
+}
 
-	rockchip_ops->system_off();
+static void __dead2 rockchip_pd_pwr_down_wfi(
+		const psci_power_state_t *target_state)
+{
+	if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		rockchip_soc_sys_pd_pwr_dn_wfi();
+	else
+		rockchip_soc_cores_pd_pwr_dn_wfi(target_state);
 }
 
 /*******************************************************************************
@@ -348,8 +431,3 @@
 	assert(rockchip_sec_entrypoint);
 	return rockchip_sec_entrypoint;
 }
-
-void plat_setup_rockchip_pm_ops(struct rockchip_pm_ops_cb *ops)
-{
-	rockchip_ops = ops;
-}
diff --git a/plat/rockchip/rk3368/drivers/pmu/pmu.c b/plat/rockchip/rk3368/drivers/pmu/pmu.c
index f44e7cf..81ab90e 100644
--- a/plat/rockchip/rk3368/drivers/pmu/pmu.c
+++ b/plat/rockchip/rk3368/drivers/pmu/pmu.c
@@ -343,7 +343,7 @@
 	}
 }
 
-static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
 {
 	uint32_t cpu, cluster;
 	uint32_t cpuon_id;
@@ -375,12 +375,12 @@
 	return 0;
 }
 
-static int cores_pwr_domain_on_finish(void)
+int rockchip_soc_cores_pwr_dm_on_finish(void)
 {
 	return 0;
 }
 
-static int sys_pwr_domain_resume(void)
+int rockchip_soc_sys_pwr_dm_resume(void)
 {
 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
 		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
@@ -394,7 +394,7 @@
 	return 0;
 }
 
-static int sys_pwr_domain_suspend(void)
+int rockchip_soc_sys_pwr_dm_suspend(void)
 {
 	nonboot_cpus_off();
 	pmu_set_sleep_mode();
@@ -404,20 +404,10 @@
 	return 0;
 }
 
-static struct rockchip_pm_ops_cb pm_ops = {
-	.cores_pwr_dm_on = cores_pwr_domain_on,
-	.cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
-	.sys_pwr_dm_suspend = sys_pwr_domain_suspend,
-	.sys_pwr_dm_resume = sys_pwr_domain_resume,
-	.sys_gbl_soft_reset = soc_sys_global_soft_reset,
-};
-
 void plat_rockchip_pmu_init(void)
 {
 	uint32_t cpu;
 
-	plat_setup_rockchip_pm_ops(&pm_ops);
-
 	/* register requires 32bits mode, switch it to 32 bits */
 	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
 
diff --git a/plat/rockchip/rk3368/drivers/soc/soc.c b/plat/rockchip/rk3368/drivers/soc/soc.c
index 601f438..ecdac01 100644
--- a/plat/rockchip/rk3368/drivers/soc/soc.c
+++ b/plat/rockchip/rk3368/drivers/soc/soc.c
@@ -198,7 +198,7 @@
 		      plls_con[NPLL_ID][3] | PLLS_MODE_WMASK);
 }
 
-void __dead2 soc_sys_global_soft_reset(void)
+void __dead2 rockchip_soc_soft_reset(void)
 {
 	uint32_t temp_val;
 
diff --git a/plat/rockchip/rk3368/drivers/soc/soc.h b/plat/rockchip/rk3368/drivers/soc/soc.h
index f0a892c..b1373d5 100644
--- a/plat/rockchip/rk3368/drivers/soc/soc.h
+++ b/plat/rockchip/rk3368/drivers/soc/soc.h
@@ -157,7 +157,6 @@
 #define regs_updata_bit_clr(addr, shift) \
 		regs_updata_bits((addr), 0x0, 0x1, (shift))
 
-void __dead2 soc_sys_global_soft_reset(void);
 void regs_updata_bits(uintptr_t addr, uint32_t val,
 		      uint32_t mask, uint32_t shift);
 void soc_sleep_config(void);
diff --git a/plat/rockchip/rk3368/platform.mk b/plat/rockchip/rk3368/platform.mk
index 73a56e3..4320446 100644
--- a/plat/rockchip/rk3368/platform.mk
+++ b/plat/rockchip/rk3368/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -50,7 +50,6 @@
 
 PLAT_BL_COMMON_SOURCES	:=	lib/xlat_tables/xlat_tables_common.c		\
 				lib/xlat_tables/aarch64/xlat_tables.c		\
-				plat/common/aarch64/plat_common.c		\
 				plat/common/plat_psci_common.c
 
 BL31_SOURCES		+=	${RK_GIC_SOURCES}				\
@@ -60,7 +59,6 @@
 				drivers/delay_timer/delay_timer.c		\
 				drivers/delay_timer/generic_delay_timer.c	\
 				lib/cpus/aarch64/cortex_a53.S			\
-				plat/common/aarch64/platform_mp_stack.S		\
 				${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
 				${RK_PLAT_COMMON}/bl31_plat_setup.c		\
 				${RK_PLAT_COMMON}/params_setup.c                \
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.c b/plat/rockchip/rk3399/drivers/pmu/pmu.c
index e04d474..31b4f07 100644
--- a/plat/rockchip/rk3399/drivers/pmu/pmu.c
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.c
@@ -625,7 +625,7 @@
 	}
 }
 
-static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
 {
 	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
 
@@ -637,19 +637,20 @@
 
 	cpus_power_domain_on(cpu_id);
 
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
-static int cores_pwr_domain_off(void)
+int rockchip_soc_cores_pwr_dm_off(void)
 {
 	uint32_t cpu_id = plat_my_core_pos();
 
 	cpus_power_domain_off(cpu_id, core_pwr_wfi);
 
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
-static int hlvl_pwr_domain_off(uint32_t lvl, plat_local_state_t lvl_state)
+int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl,
+				 plat_local_state_t lvl_state)
 {
 	switch (lvl) {
 	case MPIDR_AFFLVL1:
@@ -659,10 +660,10 @@
 		break;
 	}
 
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
-static int cores_pwr_domain_suspend(void)
+int rockchip_soc_cores_pwr_dm_suspend(void)
 {
 	uint32_t cpu_id = plat_my_core_pos();
 
@@ -674,10 +675,10 @@
 
 	cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
 
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
-static int hlvl_pwr_domain_suspend(uint32_t lvl, plat_local_state_t lvl_state)
+int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state)
 {
 	switch (lvl) {
 	case MPIDR_AFFLVL1:
@@ -687,20 +688,20 @@
 		break;
 	}
 
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
-static int cores_pwr_domain_on_finish(void)
+int rockchip_soc_cores_pwr_dm_on_finish(void)
 {
 	uint32_t cpu_id = plat_my_core_pos();
 
 	mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
 		      CORES_PM_DISABLE);
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
-static int hlvl_pwr_domain_on_finish(uint32_t lvl,
-				     plat_local_state_t lvl_state)
+int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl,
+				       plat_local_state_t lvl_state)
 {
 	switch (lvl) {
 	case MPIDR_AFFLVL1:
@@ -710,20 +711,20 @@
 		break;
 	}
 
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
-static int cores_pwr_domain_resume(void)
+int rockchip_soc_cores_pwr_dm_resume(void)
 {
 	uint32_t cpu_id = plat_my_core_pos();
 
 	/* Disable core_pm */
 	mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE);
 
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
-static int hlvl_pwr_domain_resume(uint32_t lvl, plat_local_state_t lvl_state)
+int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state)
 {
 	switch (lvl) {
 	case MPIDR_AFFLVL1:
@@ -732,7 +733,7 @@
 		break;
 	}
 
-	return 0;
+	return PSCI_E_SUCCESS;
 }
 
 /**
@@ -1073,7 +1074,7 @@
 	mmio_write_32(M0_PARAM_ADDR + PARAM_M0_FUNC, M0_FUNC_SUSPEND);
 }
 
-static int sys_pwr_domain_suspend(void)
+int rockchip_soc_sys_pwr_dm_suspend(void)
 {
 	uint32_t wait_cnt = 0;
 	uint32_t status = 0;
@@ -1138,7 +1139,7 @@
 	return 0;
 }
 
-static int sys_pwr_domain_resume(void)
+int rockchip_soc_sys_pwr_dm_resume(void)
 {
 	uint32_t wait_cnt = 0;
 	uint32_t status = 0;
@@ -1226,7 +1227,7 @@
 	return 0;
 }
 
-void __dead2 soc_soft_reset(void)
+void __dead2 rockchip_soc_soft_reset(void)
 {
 	struct gpio_info *rst_gpio;
 
@@ -1243,7 +1244,7 @@
 		;
 }
 
-void __dead2 soc_system_off(void)
+void __dead2 rockchip_soc_system_off(void)
 {
 	struct gpio_info *poweroff_gpio;
 
@@ -1268,28 +1269,11 @@
 		;
 }
 
-static struct rockchip_pm_ops_cb pm_ops = {
-	.cores_pwr_dm_on = cores_pwr_domain_on,
-	.cores_pwr_dm_off = cores_pwr_domain_off,
-	.cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
-	.cores_pwr_dm_suspend = cores_pwr_domain_suspend,
-	.cores_pwr_dm_resume = cores_pwr_domain_resume,
-	.hlvl_pwr_dm_suspend = hlvl_pwr_domain_suspend,
-	.hlvl_pwr_dm_resume = hlvl_pwr_domain_resume,
-	.hlvl_pwr_dm_off = hlvl_pwr_domain_off,
-	.hlvl_pwr_dm_on_finish = hlvl_pwr_domain_on_finish,
-	.sys_pwr_dm_suspend = sys_pwr_domain_suspend,
-	.sys_pwr_dm_resume = sys_pwr_domain_resume,
-	.sys_gbl_soft_reset = soc_soft_reset,
-	.system_off = soc_system_off,
-};
-
 void plat_rockchip_pmu_init(void)
 {
 	uint32_t cpu;
 
 	rockchip_pd_lock_init();
-	plat_setup_rockchip_pm_ops(&pm_ops);
 
 	/* register requires 32bits mode, switch it to 32 bits */
 	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
diff --git a/plat/rockchip/rk3399/platform.mk b/plat/rockchip/rk3399/platform.mk
index c72119c..308a5cb 100644
--- a/plat/rockchip/rk3399/platform.mk
+++ b/plat/rockchip/rk3399/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -53,7 +53,6 @@
 
 PLAT_BL_COMMON_SOURCES	:=	lib/xlat_tables/xlat_tables_common.c	\
 				lib/xlat_tables/aarch64/xlat_tables.c	\
-				plat/common/aarch64/plat_common.c	\
 				plat/common/plat_psci_common.c
 
 BL31_SOURCES	+=	${RK_GIC_SOURCES}				\
@@ -65,7 +64,6 @@
 			drivers/gpio/gpio.c				\
 			lib/cpus/aarch64/cortex_a53.S			\
 			lib/cpus/aarch64/cortex_a72.S			\
-			plat/common/aarch64/platform_mp_stack.S		\
 			${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
 			${RK_PLAT_COMMON}/bl31_plat_setup.c		\
 			${RK_PLAT_COMMON}/params_setup.c		\
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index d00b694..9d612dc 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -78,7 +78,6 @@
 				plat/arm/common/arm_common.c			\
 				plat/arm/common/arm_gicv2.c			\
 				plat/common/plat_gicv2.c			\
-				plat/common/aarch64/plat_common.c		\
 				plat/xilinx/zynqmp/aarch64/zynqmp_helpers.S	\
 				plat/xilinx/zynqmp/aarch64/zynqmp_common.c
 
@@ -86,7 +85,6 @@
 				lib/cpus/aarch64/aem_generic.S			\
 				lib/cpus/aarch64/cortex_a53.S			\
 				plat/common/plat_psci_common.c			\
-				plat/common/aarch64/platform_mp_stack.S		\
 				plat/xilinx/zynqmp/bl31_zynqmp_setup.c		\
 				plat/xilinx/zynqmp/plat_psci.c			\
 				plat/xilinx/zynqmp/plat_zynqmp.c		\
diff --git a/services/spd/tlkd/tlkd_main.c b/services/spd/tlkd/tlkd_main.c
index 0fddcb2..5ebe4fd 100644
--- a/services/spd/tlkd/tlkd_main.c
+++ b/services/spd/tlkd/tlkd_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -58,6 +58,11 @@
  ******************************************************************************/
 tlk_context_t tlk_ctx;
 
+/*******************************************************************************
+ * CPU number on which TLK booted up
+ ******************************************************************************/
+static int boot_cpu;
+
 /* TLK UID: RFC-4122 compliant UUID (version-5, sha-1) */
 DEFINE_SVC_UUID(tlk_uuid,
 		0xbd11e9c9, 0x2bba, 0x52ee, 0xb1, 0x72,
@@ -133,6 +138,12 @@
 	cm_init_my_context(tlk_entry_point);
 
 	/*
+	 * TLK runs only on a single CPU. Store the value of the boot
+	 * CPU for sanity checking later.
+	 */
+	boot_cpu = plat_my_core_pos();
+
+	/*
 	 * Arrange for an entry into the test secure payload.
 	 */
 	return tlkd_synchronous_sp_entry(&tlk_ctx);
@@ -163,8 +174,8 @@
 	/* Passing a NULL context is a critical programming error */
 	assert(handle);
 
-	/* These SMCs are only supported by CPU0 */
-	if ((read_mpidr() & MPIDR_CPU_MASK) != 0)
+	/* These SMCs are only supported by a single CPU */
+	if (boot_cpu != plat_my_core_pos())
 		SMC_RET1(handle, SMC_UNK);
 
 	/* Determine which security state this SMC originated from */
diff --git a/services/spd/trusty/smcall.h b/services/spd/trusty/smcall.h
index a1d91e5..2abcee6 100644
--- a/services/spd/trusty/smcall.h
+++ b/services/spd/trusty/smcall.h
@@ -94,5 +94,6 @@
 
 #define SMC_SC_VDEV_RESET	  SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
 #define SMC_SC_VDEV_KICK_VQ	  SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
+#define SMC_SC_SET_ROT_PARAMS	  SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 65535)
 
 #endif /* __LIB_SM_SMCALL_H */
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
index b21ce71..9c9de91 100644
--- a/services/spd/trusty/trusty.c
+++ b/services/spd/trusty/trusty.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -28,7 +28,8 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <assert.h>
+#include <arch_helpers.h>
+#include <assert.h> /* for context_mgmt.h */
 #include <bl_common.h>
 #include <bl31.h>
 #include <context_mgmt.h>
@@ -41,8 +42,15 @@
 #include "smcall.h"
 #include "sm_err.h"
 
+/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
+#define HYP_ENABLE_FLAG		0x286001
+
+/* length of Trusty's input parameters (in bytes) */
+#define TRUSTY_PARAMS_LEN_BYTES	(4096*2)
+
 struct trusty_stack {
 	uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
+	uint32_t end;
 };
 
 struct trusty_cpu_ctx {
@@ -65,31 +73,60 @@
 	uint64_t	r1;
 	uint64_t	r2;
 	uint64_t	r3;
+	uint64_t	r4;
+	uint64_t	r5;
+	uint64_t	r6;
+	uint64_t	r7;
 };
 
 struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
 
 struct args trusty_init_context_stack(void **sp, void *new_stack);
-struct args trusty_context_switch_helper(void **sp, uint64_t r0, uint64_t r1,
-					 uint64_t r2, uint64_t r3);
+struct args trusty_context_switch_helper(void **sp, void *smc_params);
+
+static uint32_t current_vmid;
 
 static struct trusty_cpu_ctx *get_trusty_ctx(void)
 {
 	return &trusty_cpu_ctx[plat_my_core_pos()];
 }
 
+static uint32_t is_hypervisor_mode(void)
+{
+	uint64_t hcr = read_hcr();
+
+	return !!(hcr & HYP_ENABLE_FLAG);
+}
+
 static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
 					 uint64_t r1, uint64_t r2, uint64_t r3)
 {
 	struct args ret;
 	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+	struct trusty_cpu_ctx *ctx_smc;
 
 	assert(ctx->saved_security_state != security_state);
 
+	ret.r7 = 0;
+	if (is_hypervisor_mode()) {
+		/* According to the ARM DEN0028A spec, VMID is stored in x7 */
+		ctx_smc = cm_get_context(NON_SECURE);
+		assert(ctx_smc);
+		ret.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
+	}
+	/* r4, r5, r6 reserved for future use. */
+	ret.r6 = 0;
+	ret.r5 = 0;
+	ret.r4 = 0;
+	ret.r3 = r3;
+	ret.r2 = r2;
+	ret.r1 = r1;
+	ret.r0 = r0;
+
 	cm_el1_sysregs_context_save(security_state);
 
 	ctx->saved_security_state = security_state;
-	ret = trusty_context_switch_helper(&ctx->saved_sp, r0, r1, r2, r3);
+	ret = trusty_context_switch_helper(&ctx->saved_sp, &ret);
 
 	assert(ctx->saved_security_state == !security_state);
 
@@ -200,11 +237,25 @@
 			 uint64_t flags)
 {
 	struct args ret;
+	uint32_t vmid = 0;
+	entry_point_info_t *ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+
+	/*
+	 * Return success for SET_ROT_PARAMS if Trusty is not present, as
+	 * Verified Boot is not even supported and returning success here
+	 * would not compromise the boot process.
+	 */
+	if (!ep_info && (smc_fid == SMC_SC_SET_ROT_PARAMS)) {
+		SMC_RET1(handle, 0);
+	} else if (!ep_info) {
+		SMC_RET1(handle, SMC_UNK);
+	}
 
 	if (is_caller_secure(flags)) {
 		if (smc_fid == SMC_SC_NS_RETURN) {
 			ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
-			SMC_RET4(handle, ret.r0, ret.r1, ret.r2, ret.r3);
+			SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3,
+				 ret.r4, ret.r5, ret.r6, ret.r7);
 		}
 		INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
 		     cpu %d, unknown smc\n",
@@ -220,8 +271,21 @@
 		case SMC_FC_FIQ_EXIT:
 			return trusty_fiq_exit(handle, x1, x2, x3);
 		default:
+			if (is_hypervisor_mode())
+				vmid = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+			if ((current_vmid != 0) && (current_vmid != vmid)) {
+				/* This message will cause SMC mechanism
+				 * abnormal in multi-guest environment.
+				 * Change it to WARN in case you need it.
+				 */
+				VERBOSE("Previous SMC not finished.\n");
+				SMC_RET1(handle, SM_ERR_BUSY);
+			}
+			current_vmid = vmid;
 			ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
 				x2, x3);
+			current_vmid = 0;
 			SMC_RET1(handle, ret.r0);
 		}
 	}
@@ -231,6 +295,7 @@
 {
 	void el3_exit(void);
 	entry_point_info_t *ep_info;
+	struct args zero_args = {0};
 	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
 	uint32_t cpu = plat_my_core_pos();
 	int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
@@ -262,9 +327,9 @@
 	cm_set_next_eret_context(SECURE);
 
 	ctx->saved_security_state = ~0; /* initial saved state is invalid */
-	trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack);
+	trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
 
-	trusty_context_switch_helper(&ctx->saved_sp, 0, 0, 0, 0);
+	trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
 
 	cm_el1_sysregs_context_restore(NON_SECURE);
 	cm_set_next_eret_context(NON_SECURE);
@@ -332,43 +397,35 @@
 static int32_t trusty_setup(void)
 {
 	entry_point_info_t *ep_info;
-	uint32_t instr;
 	uint32_t flags;
 	int ret;
-	int aarch32 = 0;
 
+	/* Get trusty's entry point info */
 	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
 	if (!ep_info) {
 		INFO("Trusty image missing.\n");
 		return -1;
 	}
 
-	instr = *(uint32_t *)ep_info->pc;
-
-	if (instr >> 24 == 0xea) {
-		INFO("trusty: Found 32 bit image\n");
-		aarch32 = 1;
-	} else if (instr >> 8 == 0xd53810) {
-		INFO("trusty: Found 64 bit image\n");
-	} else {
-		INFO("trusty: Found unknown image, 0x%x\n", instr);
-	}
-
+	/* Trusty runs in AARCH64 mode */
 	SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
-	if (!aarch32)
-		ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
-					DISABLE_ALL_EXCEPTIONS);
-	else
-		ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
-					    SPSR_E_LITTLE,
-					    DAIF_FIQ_BIT |
-					    DAIF_IRQ_BIT |
-					    DAIF_ABT_BIT);
+	ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
 
+	/*
+	 * arg0 = TZDRAM aperture available for BL32
+	 * arg1 = BL32 boot params
+	 * arg2 = BL32 boot params length
+	 */
+	ep_info->args.arg1 = ep_info->args.arg2;
+	ep_info->args.arg2 = TRUSTY_PARAMS_LEN_BYTES;
+
+	/* register init handler */
 	bl31_register_bl32_init(trusty_init);
 
+	/* register power management hooks */
 	psci_register_spd_pm_hook(&trusty_pm);
 
+	/* register interrupt handler */
 	flags = 0;
 	set_interrupt_rm_flag(flags, NON_SECURE);
 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
diff --git a/services/spd/trusty/trusty_helpers.S b/services/spd/trusty/trusty_helpers.S
index 9bbb044..7e459ac 100644
--- a/services/spd/trusty/trusty_helpers.S
+++ b/services/spd/trusty/trusty_helpers.S
@@ -60,8 +60,20 @@
 	pop	x21, x22
 	pop	x19, x20
 	pop	x8, xzr
-	stp	x1, x2, [x8]
-	stp	x3, x4, [x8, #16]
+
+        ldr     x2, [x1]
+        ldr     x3, [x1, #0x08]
+        ldr     x4, [x1, #0x10]
+        ldr     x5, [x1, #0x18]
+        ldr     x6, [x1, #0x20]
+        ldr     x7, [x1, #0x28]
+        ldr     x10, [x1, #0x30]
+        ldr     x11, [x1, #0x38]
+
+        stp     x2, x3, [x8]
+        stp     x4, x5, [x8, #16]
+        stp     x6, x7, [x8, #32]
+        stp     x10, x11, [x8, #48]
 
 	ret
 endfunc trusty_context_switch_helper
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index 2850e70..ff515cc 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -612,15 +612,26 @@
 			break;
 		}
 
+		assert(handle == cm_get_context(NON_SECURE));
+		cm_el1_sysregs_context_save(NON_SECURE);
+
 		/* Abort the preempted SMC request */
-		if (!tspd_abort_preempted_smc(tsp_ctx))
+		if (!tspd_abort_preempted_smc(tsp_ctx)) {
 			/*
 			 * If there was no preempted SMC to abort, return
 			 * SMC_UNK.
+			 *
+			 * Restoring the NON_SECURE context is not necessary as
+			 * the synchronous entry did not take place if the
+			 * return code of tspd_abort_preempted_smc is zero.
 			 */
-			SMC_RET1(handle, SMC_UNK);
+			cm_set_next_eret_context(NON_SECURE);
+			break;
+		}
 
-		break;
+		cm_el1_sysregs_context_restore(NON_SECURE);
+		cm_set_next_eret_context(NON_SECURE);
+		SMC_RET0(handle);
 
 		/*
 		 * Request from non secure world to resume the preempted
diff --git a/tools/fiptool/fiptool.c b/tools/fiptool/fiptool.c
index f3f831b..542a946 100644
--- a/tools/fiptool/fiptool.c
+++ b/tools/fiptool/fiptool.c
@@ -80,8 +80,6 @@
 
 static image_desc_t *image_desc_head;
 static size_t nr_image_descs;
-static image_t *image_head;
-static size_t nr_images;
 static uuid_t uuid_null = { 0 };
 static int verbose;
 
@@ -200,6 +198,7 @@
 	free(desc->name);
 	free(desc->cmdline_name);
 	free(desc->action_arg);
+	free(desc->image);
 	free(desc);
 }
 
@@ -244,74 +243,6 @@
 	}
 }
 
-static void add_image(image_t *image)
-{
-	image_t **p = &image_head;
-
-	while (*p)
-		p = &(*p)->next;
-
-	assert(*p == NULL);
-	*p = image;
-
-	nr_images++;
-}
-
-static void replace_image(image_t *image)
-{
-	image_t **p = &image_head;
-
-	while (*p) {
-		if (!memcmp(&(*p)->toc_e.uuid, &image->toc_e.uuid,
-			    sizeof(image->toc_e.uuid)))
-			break;
-		p = &(*p)->next;
-	}
-
-	assert(*p != NULL);
-
-	image->next = (*p)->next;
-	*p = image;
-}
-
-static void free_image(image_t *image)
-{
-	free(image->buffer);
-	free(image);
-}
-
-static void remove_image(image_t *image)
-{
-	image_t *tmp, **p = &image_head;
-
-	while (*p) {
-		if (*p == image)
-			break;
-		p = &(*p)->next;
-	}
-
-	assert(*p != NULL);
-
-	tmp = *p;
-	*p = tmp->next;
-	free_image(tmp);
-
-	nr_images--;
-}
-
-static void free_images(void)
-{
-	image_t *image = image_head, *tmp;
-
-	while (image != NULL) {
-		tmp = image->next;
-		free_image(image);
-		image = tmp;
-		nr_images--;
-	}
-	assert(nr_images == 0);
-}
-
 static image_desc_t *lookup_image_desc_from_uuid(const uuid_t *uuid)
 {
 	image_desc_t *desc;
@@ -332,16 +263,6 @@
 	return NULL;
 }
 
-static image_t *lookup_image_from_uuid(const uuid_t *uuid)
-{
-	image_t *image;
-
-	for (image = image_head; image != NULL; image = image->next)
-		if (!memcmp(&image->toc_e.uuid, uuid, sizeof(*uuid)))
-			return image;
-	return NULL;
-}
-
 static void uuid_to_str(char *s, size_t len, const uuid_t *u)
 {
 	assert(len >= (_UUID_STR_LEN + 1));
@@ -457,7 +378,8 @@
 			add_image_desc(desc);
 		}
 
-		add_image(image);
+		assert(desc->image == NULL);
+		desc->image = image;
 
 		toc_entry++;
 	}
@@ -542,7 +464,7 @@
 
 static int info_cmd(int argc, char *argv[])
 {
-	image_t *image;
+	image_desc_t *desc;
 	fip_toc_header_t toc_header;
 
 	if (argc != 2)
@@ -560,11 +482,11 @@
 		    (unsigned long long)toc_header.flags);
 	}
 
-	for (image = image_head; image != NULL; image = image->next) {
-		image_desc_t *desc;
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		image_t *image = desc->image;
 
-		desc = lookup_image_desc_from_uuid(&image->toc_e.uuid);
-		assert(desc != NULL);
+		if (image == NULL)
+			continue;
 		printf("%s: offset=0x%llX, size=0x%llX, cmdline=\"--%s\"",
 		       desc->name,
 		       (unsigned long long)image->toc_e.offset_address,
@@ -580,7 +502,6 @@
 		putchar('\n');
 	}
 
-	free_images();
 	return 0;
 }
 
@@ -593,11 +514,16 @@
 static int pack_images(const char *filename, uint64_t toc_flags, unsigned long align)
 {
 	FILE *fp;
-	image_t *image;
+	image_desc_t *desc;
 	fip_toc_header_t *toc_header;
 	fip_toc_entry_t *toc_entry;
 	char *buf;
 	uint64_t entry_offset, buf_size, payload_size = 0;
+	size_t nr_images = 0;
+
+	for (desc = image_desc_head; desc != NULL; desc = desc->next)
+		if (desc->image != NULL)
+			nr_images++;
 
 	buf_size = sizeof(fip_toc_header_t) +
 	    sizeof(fip_toc_entry_t) * (nr_images + 1);
@@ -614,7 +540,11 @@
 	toc_entry = (fip_toc_entry_t *)(toc_header + 1);
 
 	entry_offset = buf_size;
-	for (image = image_head; image != NULL; image = image->next) {
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		image_t *image = desc->image;
+
+		if (image == NULL)
+			continue;
 		payload_size += image->toc_e.size;
 		entry_offset = (entry_offset + align - 1) & ~(align - 1);
 		image->toc_e.offset_address = entry_offset;
@@ -640,7 +570,11 @@
 	if (verbose)
 		log_dbgx("Payload size: %zu bytes", payload_size);
 
-	for (image = image_head; image != NULL; image = image->next) {
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		image_t *image = desc->image;
+
+		if (image == NULL)
+			continue;
 		if (fseek(fp, image->toc_e.offset_address, SEEK_SET))
 			log_errx("Failed to set file position");
 
@@ -664,26 +598,26 @@
 
 	/* Add or replace images in the FIP file. */
 	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
-		image_t *new_image, *old_image;
+		image_t *image;
 
 		if (desc->action != DO_PACK)
 			continue;
 
-		new_image = read_image_from_file(&desc->uuid,
+		image = read_image_from_file(&desc->uuid,
 		    desc->action_arg);
-		old_image = lookup_image_from_uuid(&desc->uuid);
-		if (old_image != NULL) {
+		if (desc->image != NULL) {
 			if (verbose) {
 				log_dbgx("Replacing %s with %s",
 				    desc->cmdline_name,
 				    desc->action_arg);
 			}
-			replace_image(new_image);
+			free(desc->image);
+			desc->image = image;
 		} else {
 			if (verbose)
 				log_dbgx("Adding image %s",
 				    desc->action_arg);
-			add_image(new_image);
+			desc->image = image;
 		}
 	}
 }
@@ -808,7 +742,6 @@
 	update_fip();
 
 	pack_images(argv[0], toc_flags, align);
-	free_images();
 	return 0;
 }
 
@@ -922,7 +855,6 @@
 	update_fip();
 
 	pack_images(outfile, toc_flags, align);
-	free_images();
 	return 0;
 }
 
@@ -1028,7 +960,7 @@
 	/* Unpack all specified images. */
 	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
 		char file[PATH_MAX];
-		image_t *image;
+		image_t *image = desc->image;
 
 		if (!unpack_all && desc->action != DO_UNPACK)
 			continue;
@@ -1041,7 +973,6 @@
 			snprintf(file, sizeof(file), "%s",
 			    desc->action_arg);
 
-		image = lookup_image_from_uuid(&desc->uuid);
 		if (image == NULL) {
 			if (!unpack_all)
 				log_warnx("%s does not exist in %s",
@@ -1059,7 +990,6 @@
 		}
 	}
 
-	free_images();
 	return 0;
 }
 
@@ -1168,17 +1098,15 @@
 	parse_fip(argv[0], &toc_header);
 
 	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
-		image_t *image;
-
 		if (desc->action != DO_REMOVE)
 			continue;
 
-		image = lookup_image_from_uuid(&desc->uuid);
-		if (image != NULL) {
+		if (desc->image != NULL) {
 			if (verbose)
 				log_dbgx("Removing %s",
 				    desc->cmdline_name);
-			remove_image(image);
+			free(desc->image);
+			desc->image = NULL;
 		} else {
 			log_warnx("%s does not exist in %s",
 			    desc->cmdline_name, argv[0]);
@@ -1186,7 +1114,6 @@
 	}
 
 	pack_images(outfile, toc_header.flags, align);
-	free_images();
 	return 0;
 }
 
diff --git a/tools/fiptool/fiptool.h b/tools/fiptool/fiptool.h
index be0c6f0..c4c86bc 100644
--- a/tools/fiptool/fiptool.h
+++ b/tools/fiptool/fiptool.h
@@ -58,13 +58,13 @@
 	char              *cmdline_name;
 	int                action;
 	char              *action_arg;
+	struct image      *image;
 	struct image_desc *next;
 } image_desc_t;
 
 typedef struct image {
 	struct fip_toc_entry toc_e;
 	void                *buffer;
-	struct image        *next;
 } image_t;
 
 typedef struct cmd {