You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mynewt.apache.org by GitBox <gi...@apache.org> on 2017/11/03 13:07:11 UTC

[GitHub] IMGJulian closed pull request #639: BSP MIPS Ci40: brought in the MIPS HAL, fixes startup

IMGJulian closed pull request #639: BSP MIPS Ci40: brought in the MIPS HAL, fixes startup
URL: https://github.com/apache/mynewt-core/pull/639
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/compiler/mips/compiler.yml b/compiler/mips/compiler.yml
index 6b451f9b9..0769bb013 100644
--- a/compiler/mips/compiler.yml
+++ b/compiler/mips/compiler.yml
@@ -31,6 +31,6 @@ compiler.flags.debug: [compiler.flags.base, -g3]
 
 compiler.as.flags: [-x, assembler-with-cpp]
 
-compiler.ld.flags: -Wl,-defsym,__app_start=0x80000000
+compiler.ld.flags: -Wl,-defsym,__app_start=0x80000000 -Wl,-defsym,__use_excpt_boot=0 -nostartfiles
 compiler.ld.resolve_circular_deps: true
 compiler.ld.mapfile: true
diff --git a/hw/bsp/ci40/pkg.yml b/hw/bsp/ci40/pkg.yml
index 194b5f508..218a35df2 100644
--- a/hw/bsp/ci40/pkg.yml
+++ b/hw/bsp/ci40/pkg.yml
@@ -31,6 +31,10 @@ pkg.keywords:
 pkg.cflags:
 pkg.deps:
     - hw/mcu/mips/danube
+    - libc/baselibc
+    - hw/mips-hal
+
+pkg.arch: mips
 
 pkg.deps.UART_0:
     - hw/drivers/uart/uart_hal
diff --git a/hw/bsp/ci40/src/arch/mips/abiflags.S b/hw/bsp/ci40/src/arch/mips/abiflags.S
new file mode 100644
index 000000000..024f0fc3c
--- /dev/null
+++ b/hw/bsp/ci40/src/arch/mips/abiflags.S
@@ -0,0 +1,111 @@
+/*
+ * abiflags.S - MIPS ABI flags.
+ */
+
+/*
+* Copyright (c) Imagination Technologies Ltd.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+* 1. Redistributions of source code must retain the above copyright notice,
+* this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright notice,
+* this list of conditions and the following disclaimer in the documentation
+* and/or other materials provided with the distribution.
+* 3. Neither the name of the copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived from this
+* software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* Values for the xxx_size bytes of an ABI flags structure.  */
+#define AFL_REG_NONE         0x00       /* No registers.  */
+#define AFL_REG_32           0x01       /* 32-bit registers.  */
+#define AFL_REG_64           0x02       /* 64-bit registers.  */
+#define AFL_REG_128          0x03       /* 128-bit registers.  */
+
+/* Masks for the ases word of an ABI flags structure.  */
+#define AFL_ASE_DSP          0x00000001  /* DSP ASE.  */
+#define AFL_ASE_DSPR2        0x00000002  /* DSP R2 ASE.  */
+#define AFL_ASE_EVA          0x00000004  /* Enhanced VA Scheme.  */
+#define AFL_ASE_MCU          0x00000008  /* MCU (MicroController) ASE.  */
+#define AFL_ASE_MDMX         0x00000010  /* MDMX ASE.  */
+#define AFL_ASE_MIPS3D       0x00000020  /* MIPS-3D ASE.  */
+#define AFL_ASE_MT           0x00000040  /* MT ASE.  */
+#define AFL_ASE_SMARTMIPS    0x00000080  /* SmartMIPS ASE.  */
+#define AFL_ASE_VIRT         0x00000100  /* VZ ASE.  */
+#define AFL_ASE_MSA          0x00000200  /* MSA ASE.  */
+#define AFL_ASE_MIPS16       0x00000400  /* MIPS16 ASE.  */
+#define AFL_ASE_MICROMIPS    0x00000800  /* MICROMIPS ASE.  */
+#define AFL_ASE_XPA          0x00001000  /* XPA ASE.  */
+
+/* Values for the isa_ext word of an ABI flags structure.  */
+#define AFL_EXT_XLR           1  /* RMI Xlr instruction.  */
+#define AFL_EXT_OCTEON2       2  /* Cavium Networks Octeon2.  */
+#define AFL_EXT_OCTEONP       3  /* Cavium Networks OcteonP.  */
+#define AFL_EXT_LOONGSON_3A   4  /* Loongson 3A.  */
+#define AFL_EXT_OCTEON        5  /* Cavium Networks Octeon.  */
+#define AFL_EXT_5900          6  /* MIPS R5900 instruction.  */
+#define AFL_EXT_4650          7  /* MIPS R4650 instruction.  */
+#define AFL_EXT_4010          8  /* LSI R4010 instruction.  */
+#define AFL_EXT_4100          9  /* NEC VR4100 instruction.  */
+#define AFL_EXT_3900         10  /* Toshiba R3900 instruction.  */
+#define AFL_EXT_10000        11  /* MIPS R10000 instruction.  */
+#define AFL_EXT_SB1          12  /* Broadcom SB-1 instruction.  */
+#define AFL_EXT_4111         13  /* NEC VR4111/VR4181 instruction.  */
+#define AFL_EXT_4120         14  /* NEC VR4120 instruction.  */
+#define AFL_EXT_5400         15  /* NEC VR5400 instruction.  */
+#define AFL_EXT_5500         16  /* NEC VR5500 instruction.  */
+#define AFL_EXT_LOONGSON_2E  17  /* ST Microelectronics Loongson 2E.  */
+#define AFL_EXT_LOONGSON_2F  18  /* ST Microelectronics Loongson 2F.  */
+
+/* Values defined for Tag_GNU_MIPS_ABI_FP.  */
+#define Val_GNU_MIPS_ABI_FP_ANY    0  /* Not tagged or not using any ABIs affected by the differences.  */
+#define Val_GNU_MIPS_ABI_FP_DOUBLE 1  /* Using hard-float -mdouble-float.  */
+#define Val_GNU_MIPS_ABI_FP_SINGLE 2  /* Using hard-float -msingle-float.  */
+#define Val_GNU_MIPS_ABI_FP_SOFT   3  /* Using soft-float.  */
+#define Val_GNU_MIPS_ABI_FP_OLD_64 4  /* Using -mips32r2 -mfp64.  */
+#define Val_GNU_MIPS_ABI_FP_XX     5  /* Using -mfpxx */
+#define Val_GNU_MIPS_ABI_FP_64     6  /* Using -mips32r2 -mfp64.  */
+#define Val_GNU_MIPS_ABI_MSA_ANY   0  /* Not tagged or not using any ABIs affected by the differences.  */
+#define Val_GNU_MIPS_ABI_MSA_128   1  /* Using 128-bit MSA.  */
+
+/* MIPS ABI flags structure */
+  .struct 0
+ABIFlags_version:
+  .struct ABIFlags_version + 2
+ABIFlags_isa_level:
+  .struct ABIFlags_isa_level + 1
+ABIFlags_isa_rev:
+  .struct ABIFlags_isa_rev + 1
+ABIFlags_gpr_size:
+  .struct ABIFlags_gpr_size + 1
+ABIFlags_cpr1_size:
+  .struct ABIFlags_cpr1_size + 1
+ABIFlags_cpr2_size:
+  .struct ABIFlags_cpr2_size + 1
+ABIFlags_fp_abi:
+  .struct ABIFlags_fp_abi + 1
+ABIFlags_isa_ext:
+  .struct ABIFlags_isa_ext + 4
+ABIFlags_ases:
+  .struct ABIFlags_ases + 4
+ABIFlags_flags1:
+  .struct ABIFlags_flags1 + 4
+ABIFlags_flags2:
+  .struct ABIFlags_flags2 + 4
+
+/*> EOF abiflags.S <*/
diff --git a/hw/bsp/ci40/src/arch/mips/gcc_startup_mips.S b/hw/bsp/ci40/src/arch/mips/gcc_startup_mips.S
new file mode 100644
index 000000000..e4f81be38
--- /dev/null
+++ b/hw/bsp/ci40/src/arch/mips/gcc_startup_mips.S
@@ -0,0 +1,392 @@
+/*
+ * gcc_startup_mips.S -- startup file for MIPS.
+ *
+ * Copyright (c) 1995, 1996, 1997, 2001 Cygnus Support
+ *
+ * The authors hereby grant permission to use, copy, modify, distribute,
+ * and license this software and its documentation for any purpose, provided
+ * that existing copyright notices are retained in all copies and that this
+ * notice is included verbatim in any distributions. No written agreement,
+ * license, or royalty fee is required for any of the authorized uses.
+ * Modifications to this software may be copyrighted by their authors
+ * and need not follow the licensing terms described here, provided that
+ * the new terms are clearly indicated on the first page of each file where
+ * they apply.
+ */
+
+/* This file does not use any floating-point ABI.  */
+	.gnu_attribute 4,0
+	.set nomips16
+
+#include <mips/regdef.h>
+#include <mips/m32c0.h>
+#include "abiflags.S"
+
+#define STARTUP_STACK_SIZE	0x0400	  /* Temporary stack size to run C code */
+
+/* This is for referencing addresses that are not in the .sdata or
+   .sbss section under embedded-pic, or before we've set up gp.  */
+#ifdef __mips_embedded_pic
+# ifdef __mips64
+#  define LA(t,x) la t,x-PICBASE ; daddu t,s0,t
+# else
+#  define LA(t,x) la t,x-PICBASE ; addu t,s0,t
+# endif
+#else /* __mips_embedded_pic */
+# if (defined (__mips64) && (defined (_MIPS_SIM) && _MIPS_SIM == _ABI64) \
+      || defined (__mips_eabi))
+#  define LA(t,x)	dla	t,x
+#  define PTR_ADDU	daddu
+#  define PTR_L		ld
+# else
+#  define LA(t,x)	la	t,x
+#  define PTR_ADDU	addu
+#  define PTR_L		lw
+# endif
+#endif /* __mips_embedded_pic */
+
+#if (defined (__mips64) && (_MIPS_SIM == _ABI64 || _MIPS_SIM == _ABIO64 || \
+     _MIPS_SIM == _ABIN32 || defined (__mips_eabi)))
+# define REGSIZE 8
+# define LD ld
+# define ST sd
+#else
+# define REGSIZE 4
+# define LD lw
+# define ST sw
+#endif
+
+	.section .startdata, "aw", @nobits
+	.balign 16
+	.space	STARTUP_STACK_SIZE
+__lstack: # Points to the end of the stack
+__ram_extent:
+	.space 8
+
+	.data
+
+__temp_space:	   /* Temporary space to save arguments */
+	.space	REGSIZE * 3
+
+	.text
+	.align	2
+
+/* Without the following nop, GDB thinks _start0 is a data variable.
+ * This is probably a bug in GDB in handling a symbol that is at the
+ * start of the .text section.
+ */
+	nop
+
+	.globl	hardware_hazard_hook .text
+	.globl	_start0
+	.ent	_start0
+_start0:
+#ifdef __mips_embedded_pic
+#define PICBASE start_PICBASE
+	.set	noreorder
+	PICBASE = .+8
+        bal	PICBASE
+	nop
+	move	s0,$31
+	.set	reorder
+#endif
+#if __mips<3
+#  define STATUS_MASK (SR_CU1|SR_PE)
+#else
+/* Post-mips2 has no SR_PE bit.  */
+#  ifdef __mips64
+/* Turn on 64-bit addressing and additional float regs.  */
+#    define STATUS_MASK (SR_CU1|SR_FR|SR_KX|SR_SX|SR_UX)
+#  else
+#    if __mips_fpr==32
+#      define STATUS_MASK (SR_CU1)
+#    else
+/* Turn on additional float regs.  */
+#      define STATUS_MASK (SR_CU1|SR_FR)
+#    endif
+#  endif
+#endif
+
+	/* Save argument registers */
+	LA (t0, __temp_space)
+	ST	a0, (REGSIZE * 0)(t0)
+	ST	a1, (REGSIZE * 1)(t0)
+	ST	a2, (REGSIZE * 2)(t0)
+
+	/*
+	 * Save k0, k1, ra and sp and register
+	 * default exception handler.
+	*/
+	.weak	__register_excpt_handler
+	LA	(t9, __register_excpt_handler)
+	beqz	t9, 1f
+	move	a0, ra		/* save ra */
+	jalr	t9
+	b	2f
+1:
+	/* Clear Cause register.  */
+	mtc0	zero,C0_CAUSE
+	nop
+	move	v0,zero			/* Mask for C0_SR.  */
+2:
+	/* Read MIPS_abiflags structure and set status/config registers
+	   accordingly.  */
+	.weak	__MIPS_abiflags_start
+	.weak	__MIPS_abiflags_end
+	LA	(t0,__MIPS_abiflags_start)
+	LA	(t1,__MIPS_abiflags_end)
+	PTR_ADDU t1,t1,-24
+
+	/* Branch to 1f is the .MIPS.abiflags section is not 24 bytes.  This
+	   indicates it is either missing or corrupt.  */
+	bne	t0,t1,1f
+
+	/* Check isa_level.  */
+	lbu	t1,ABIFlags_isa_level(t0)
+	sltu	v1,t1,3			/* Is MIPS < 3?  */
+	xori	t1,t1,64		/* Is MIPS64?  */
+	beq	v1,zero,4f
+	li	v1,SR_PE
+	or	v0,v0,v1		/* Enable soft reset.  */
+4:
+	li	v1,(SR_KX|SR_SX|SR_UX)
+	bne	t1,zero,5f
+	or	v0,v0,v1		/* Enable extended addressing.  */
+5:
+	/* Check DSP,DSP2,MDMX ase. */
+	lw      t1,ABIFlags_ases(t0)
+	andi    t1,t1,(AFL_ASE_DSP|AFL_ASE_DSPR2|AFL_ASE_MDMX)
+	li	v1,SR_MX
+	beq	t1,zero,6f
+	or	v0,v0,v1
+6:
+	/* Check fp_abi.  */
+	lbu	t1,ABIFlags_fp_abi(t0)
+	xori	t1,t1,Val_GNU_MIPS_ABI_FP_SOFT
+	li	v1,SR_CU1
+	beq	t1,zero,2f		/* Skip MSA and cpr1_size checks.  */
+	or	v0,v0,v1		/* Enable co-processor 1.  */
+
+	/* Check cpr1_size.  */
+	lbu	t1,ABIFlags_cpr1_size(t0)
+	xori	t1,t1,AFL_REG_64
+	li	v1,SR_FR
+	bne	t1,zero,3f
+	or	v0,v0,v1		/* Enable 64-bit FPU registers.  */
+3:
+	/* Check MSA ase.  */
+	lw	t1,ABIFlags_ases(t0)
+	andi	t1,t1,AFL_ASE_MSA
+	li	v1,SR_FR
+	beq	t1,zero,2f
+	or	v0,v0,v1		/* Enable 64-bit FPU registers.  */
+	li	v1,CFG5_MSAEN
+	.set	push
+	.set	mips32
+	mtc0	v1,C0_CONFIG,5		/* Enable MSA.  */
+	.set	pop
+	b	2f
+
+1:
+	/* MIPS_abiflags structure is not available.  Set status/config
+	   registers based on flags defined by compiler.  */
+#ifdef __mips_soft_float
+	li	v0,(STATUS_MASK-(STATUS_MASK & SR_CU1))
+#else
+	li	v0,STATUS_MASK
+#endif
+
+2:
+	/* Set C0_SR,  */
+	mtc0	v0,C0_SR
+	nop
+
+	/* Avoid hazard from C0_SR changes.  */
+	LA	(t0, hardware_hazard_hook)
+	beq	t0,zero,2f
+	jalr	t0
+2:
+
+
+/* Fix high bits, if any, of the PC so that exception handling doesn't get
+   confused.  */
+	LA (v0, 3f)
+	jr	v0
+3:
+	LA (gp, _gp)				# set the global data pointer
+	.end _start0
+
+/*
+ * zero out the bss section.
+ */
+	.globl	_get_ram_info .text
+	.globl	__stack
+	.globl	__global
+	.ent	zerobss
+zerobss:
+	LA (v0, _fbss)
+	LA (v1, _end)
+	beq	v0,v1,2f
+1:
+	PTR_ADDU v0,v0,4
+	sw	zero,-4(v0)
+	bne	v0,v1,1b
+2:
+	/* setup the stack pointer */
+	LA (t0, __stack)			# is __stack set ?
+	bne	t0,zero,4f
+
+	LA (sp, __lstack)			# make a small stack so we can
+						# run some C code
+	li	a0,0				# no need for the ram base
+	LA (a1, __ram_extent)			# storage for the extent of ram
+	jal	_get_ram_range
+
+	/* NOTE: a0[0] contains the last address+1 of memory. */
+	LA (a0, __ram_extent)
+	PTR_L	t0,0(a0)			# the extent of ram
+	lw	$0,-4(t0)			# check for valid memory
+	/* Allocate 32 bytes for the register parameters.  Allocate 16
+	   bytes for a null argv and envp.  Round the result up to 64
+	   bytes to preserve alignment.  */
+4:
+	PTR_ADDU t0,t0,-64
+	move	sp,t0				# set stack pointer
+	.end	zerobss
+
+/*
+ * initialize target specific stuff. Only execute these
+ * functions it they exist.
+ */
+	.globl	hardware_init_hook .text
+	.globl	software_init_hook .text
+    .globl  _start .text
+	.globl	atexit .text
+	.globl	exit .text
+	.ent	init
+init:
+	LA (t9, hardware_init_hook)		# init the hardware if needed
+	beq	t9,zero,6f
+	jalr	t9
+6:
+	LA (t9, software_init_hook)		# init the hardware if needed
+	beq	t9,zero,7f
+	jalr	t9
+7:
+	LA (a0, 0)
+	jal	atexit
+
+#ifdef GCRT0
+	.globl	_ftext
+	.globl	_extext
+	LA (a0, _ftext)
+	LA (a1, _etext)
+	jal	monstartup
+#endif
+
+	/* restore argument registers */
+	LA (t0, __temp_space)
+	LD	a0,(REGSIZE * 0)(t0)
+	LD	a1,(REGSIZE * 1)(t0)
+	LD	a2,(REGSIZE * 2)(t0)
+
+	/* Convert pointers potentially */
+	.weak	__convert_argv_pointers
+	LA (t0, __convert_argv_pointers)
+	beqz	t0, 1f
+	jalr	t0
+1:
+	/* if a0 > 0 then we have arguments ready in a0 to a2 registers */
+	bgtz	a0,.Lmain
+	/* if a0 == 0 then no arguments have been set up */
+	beqz	a0, 1f
+	/* if a0 < -1 then we have undefined behaviour so assume no
+	   arguments have been set up */
+	slti	a0, a0, -1
+	bnez	a0, 1f
+
+	/* a0 == -1 */
+	.weak	__getargs
+	LA (t0, __getargs)
+	beqz	t0, 1f
+	jalr	t0				# get arguments
+	b	.Lmain
+1:
+	/* no arguments */
+	move	a0,zero				# set argc to 0
+	PTR_ADDU a1,sp,32			# argv = sp + 32
+	PTR_ADDU a2,sp,40			# envp = sp + 40
+	ST	zero,(a1)			# argv[argc] = 0
+	ST	zero,(a2)			# envp[0] = 0
+
+.Lmain:
+	jal	_start				# call the program start function
+
+	# fall through to the "exit" routine
+	move	a0,v0				# pass through the exit code
+	jal	exit				# call libc exit to run the G++
+						# destructors
+	.end	init
+
+
+/* Assume the PICBASE set up above is no longer valid below here.  */
+#ifdef __mips_embedded_pic
+#undef PICBASE
+#endif
+
+/*
+ * _exit -- Exit from the application. Normally we cause a user trap
+ *          to return to the ROM monitor for another run. NOTE: This is
+ *	    the only other routine we provide in the crt0.o object, since
+ *          it may be tied to the "_start0" routine. It also allows
+ *          executables that contain a complete world to be linked with
+ *          just the crt0.o object.
+ */
+	.globl	hardware_exit_hook .text
+	.globl	_exit
+	.ent _exit
+_exit:
+7:
+
+	# save exit code
+	LA (t0, __temp_space)
+	ST	a0,0(t0)
+
+#ifdef __mips_embedded_pic
+	/* Need to reinit PICBASE, since we might be called via exit()
+	   rather than via a return path which would restore old s0.  */
+#define PICBASE exit_PICBASE
+	.set	noreorder
+	PICBASE = .+8
+	bal	PICBASE
+	nop
+	move	s0,$31
+	.set	reorder
+#endif
+#ifdef GCRT0
+	LA (t0, _mcleanup)
+	jalr	t0
+#endif
+	LA (t0, hardware_exit_hook)
+	beq	t0,zero,1f
+	jalr	t0
+1:
+
+	# restore return value from main
+	LA (t0, __temp_space)
+	LD	a0,0(t0)
+
+	.global __exit .text
+	jal	__exit
+
+	# break instruction can cope with 0xfffff, but GAS limits the range:
+	break	1023
+	b	7b				# but loop back just in-case
+	.end _exit
+
+/* Assume the PICBASE set up above is no longer valid below here.  */
+#ifdef __mips_embedded_pic
+#undef PICBASE
+#endif
+
+/* EOF crt0.S */
diff --git a/hw/bsp/ci40/src/hal_bsp.c b/hw/bsp/ci40/src/hal_bsp.c
index d0dd0b7fe..a25f00d19 100644
--- a/hw/bsp/ci40/src/hal_bsp.c
+++ b/hw/bsp/ci40/src/hal_bsp.c
@@ -18,11 +18,46 @@
  */
 #include "hal/hal_bsp.h"
 #include "bsp/bsp.h"
+#include "syscfg/syscfg.h"
+#include "uart/uart.h"
+#if MYNEWT_VAL(UART_0) || MYNEWT_VAL(UART_1)
+#include "uart_hal/uart_hal.h"
+#endif
+
 #include <assert.h>
 
 const struct hal_flash *
-bsp_flash_dev(uint8_t id)
+hal_bsp_flash_dev(uint8_t id)
 {
     return 0;
 }
 
+#if MYNEWT_VAL(UART_0)
+static struct uart_dev os_bsp_uart0;
+#endif
+
+#if MYNEWT_VAL(UART_1)
+static struct uart_dev os_bsp_uart1;
+#endif
+
+void _close(int fd);
+
+void
+hal_bsp_init(void)
+{
+    int rc;
+
+#if MYNEWT_VAL(UART_0)
+    rc = os_dev_create((struct os_dev *) &os_bsp_uart0, "uart0",
+        OS_DEV_INIT_PRIMARY, 0, uart_hal_init, 0);
+    assert(rc == 0);
+#endif
+
+#if MYNEWT_VAL(UART_1)
+    rc = os_dev_create((struct os_dev *) &os_bsp_uart1, "uart1",
+        OS_DEV_INIT_PRIMARY, 0, uart_hal_init, 0);
+    assert(rc == 0);
+#endif
+
+    (void)rc;
+}
diff --git a/hw/bsp/ci40/src/os_bsp.c b/hw/bsp/ci40/src/os_bsp.c
deleted file mode 100644
index 8980c1e9b..000000000
--- a/hw/bsp/ci40/src/os_bsp.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-#include "hal/hal_bsp.h"
-#include "syscfg/syscfg.h"
-#include "uart/uart.h"
-#if MYNEWT_VAL(UART_0) || MYNEWT_VAL(UART_1)
-#include "uart_hal/uart_hal.h"
-#endif
-
-#include <assert.h>
-
-#if MYNEWT_VAL(UART_0)
-static struct uart_dev os_bsp_uart0;
-#endif
-
-#if MYNEWT_VAL(UART_1)
-static struct uart_dev os_bsp_uart1;
-#endif
-
-void _close(int fd);
-
-void
-hal_bsp_init(void)
-{
-    int rc;
-
-#if MYNEWT_VAL(UART_0)
-    rc = os_dev_create((struct os_dev *) &os_bsp_uart0, "uart0",
-        OS_DEV_INIT_PRIMARY, 0, uart_hal_init, 0);
-    assert(rc == 0);
-#endif
-
-#if MYNEWT_VAL(UART_1)
-    rc = os_dev_create((struct os_dev *) &os_bsp_uart1, "uart1",
-        OS_DEV_INIT_PRIMARY, 0, uart_hal_init, 0);
-    assert(rc == 0);
-#endif
-
-    (void)rc;
-}
diff --git a/hw/bsp/ci40/uhi32.ld b/hw/bsp/ci40/uhi32.ld
index c6e73c932..23827981b 100644
--- a/hw/bsp/ci40/uhi32.ld
+++ b/hw/bsp/ci40/uhi32.ld
@@ -34,13 +34,15 @@
  * support.
  */
 
-__entry = DEFINED(__reset_vector) ? 0xbfc00000 : _start;
+EXTERN(_start0)
+EXTERN(_start)
+__entry = DEFINED(__reset_vector) ? 0xbfc00000 : _start0;
 ENTRY(__entry)
 OUTPUT_FORMAT("elf32-tradlittlemips", "elf32-tradbigmips", "elf32-tradlittlemips")
 GROUP(-lc -luhi -lgcc -lhal)
 SEARCH_DIR(.)
 __DYNAMIC  =  0;
-STARTUP(crt0.o)
+
 /* Force the exception handler to be registered */
 EXTERN(__register_excpt_handler)
 /* Force the exception handler to be included in the link */
diff --git a/hw/mcu/mips/danube/src/gic.h b/hw/mcu/mips/danube/include/mcu/gic.h
similarity index 100%
rename from hw/mcu/mips/danube/src/gic.h
rename to hw/mcu/mips/danube/include/mcu/gic.h
diff --git a/hw/mcu/mips/danube/src/gic.c b/hw/mcu/mips/danube/src/gic.c
index 872317e9b..a0584aade 100644
--- a/hw/mcu/mips/danube/src/gic.c
+++ b/hw/mcu/mips/danube/src/gic.c
@@ -17,7 +17,7 @@
  * under the License.
  */
 
-#include "gic.h"
+#include "mcu/gic.h"
 
 #include <mips/cpu.h>
 #include <mips/hal.h>
@@ -127,7 +127,7 @@ gic_place(uint32_t base)
 int
 gic_init(void)
 {
-    /* Check for GCR and get GIC location */
+    /* Check for GCR */
     if (!((mips32_getconfig0() & CFG0_M) && (mips32_getconfig1() & CFG1_M)
         && (mips32_getconfig2() & CFG2_M)
         && (mips32_getconfig3() & CFG3_CMGCR))) {
diff --git a/hw/mcu/mips/danube/src/hal_os_tick.c b/hw/mcu/mips/danube/src/hal_os_tick.c
index 8fc68d645..5f67dc82e 100644
--- a/hw/mcu/mips/danube/src/hal_os_tick.c
+++ b/hw/mcu/mips/danube/src/hal_os_tick.c
@@ -20,6 +20,7 @@
 #include <assert.h>
 #include <os/os.h>
 #include <hal/hal_os_tick.h>
+#include "mcu/gic.h"
 
 /*
  * XXX implement tickless mode.
diff --git a/hw/mcu/mips/danube/src/hal_uart.c b/hw/mcu/mips/danube/src/hal_uart.c
index 9729f0c35..6d28dd757 100644
--- a/hw/mcu/mips/danube/src/hal_uart.c
+++ b/hw/mcu/mips/danube/src/hal_uart.c
@@ -26,7 +26,7 @@
 #include <mips/cpu.h>
 #include <mips/hal.h>
 
-#include "gic.h"
+#include "mcu/gic.h"
 
 static const uint32_t UART_0_INT_NO = 24;
 static const uint32_t UART_1_INT_NO = 25;
diff --git a/hw/mips-hal/pkg.yml b/hw/mips-hal/pkg.yml
new file mode 100644
index 000000000..147a25493
--- /dev/null
+++ b/hw/mips-hal/pkg.yml
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+pkg.name: hw/mips-hal
+pkg.description: hardware abstraction layer for MIPS processors.
+pkg.author: "MIPS"
+pkg.homepage: "http://www.mips.com"
+pkg.keywords:
+    - hal
+    - mips
+
+pkg.arch: mips
diff --git a/hw/mips-hal/src/arch/mips/__exit.c b/hw/mips-hal/src/arch/mips/__exit.c
new file mode 100644
index 000000000..7013cb3dc
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/__exit.c
@@ -0,0 +1,52 @@
+/*
+ * __exit.c
+*/
+
+/*
+ * Copyright (c) 2014, Imagination Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+ * @Synopsis     void __exit (int32_t exit_code);
+ *
+ *               Parameters:
+ *                 $4 - Exit code
+ *
+ *               Return:
+ *                 None
+ *
+ * @Description  Transfer control to the debug port
+*/
+
+#include <stdint.h>
+
+__attribute__ ((weak)) void __exit (int32_t exit_code)
+{
+  return;
+}
+
diff --git a/hw/mips-hal/src/arch/mips/cache.S b/hw/mips-hal/src/arch/mips/cache.S
new file mode 100644
index 000000000..42d66ccdc
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/cache.S
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include "cache.h"
+
+DECL(mips_icache_size,-1)
+DECL(mips_icache_linesize,-1)
+DECL(mips_icache_ways,1)
+
+DECL(mips_dcache_size,-1)
+DECL(mips_dcache_linesize,-1)
+DECL(mips_dcache_ways,1)
+
+DECL(mips_scache_size,-1)
+DECL(mips_scache_linesize,-1)
+DECL(mips_scache_ways,1)
+
+DECL(mips_tcache_size,-1)
+DECL(mips_tcache_linesize,-1)
+DECL(mips_tcache_ways,1)
+
+/*
+ * void mips_size_cache (void)
+ *
+ * Size caches without reinitialising and losing dirty cache lines.
+ */
+SWCACHE(size_cache)
+
+/*
+ * void mips_clean_icache (vaddr_t va, unsigned int size)
+ *
+ * Writeback and invalidate a virtual address range in instruction caches.
+ * Joint caches (i.e. combined I & D) will be cleaned too.
+ */
+SWCACHE(clean_icache)
diff --git a/hw/mips-hal/src/arch/mips/cache.h b/hw/mips-hal/src/arch/mips/cache.h
new file mode 100644
index 000000000..a1b6c40f3
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/cache.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <mips/asm.h>
+#include <mips/regdef.h>
+#include <mips/prid.h>
+
+/*
+ * The function of this module is to support boards which can handle
+ * multiple MIPS cpus with different cache architectures.
+ */
+
+#define DECL(x, val)		\
+	.sdata;			\
+	EXPORTS(x, 4);		\
+	.word	val
+
+#define SWCACHE(name) 		\
+LEAF(_ASMCONCAT(mips_, name));	\
+	j	_ASMCONCAT(m32_, name); \
+	jr	ra;		\
+END(_ASMCONCAT(mips_, name))
diff --git a/hw/mips-hal/src/arch/mips/cache_ops.S b/hw/mips-hal/src/arch/mips/cache_ops.S
new file mode 100644
index 000000000..e21e14eb9
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/cache_ops.S
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include "cache.h"
+
+/*
+ * void mips_flush_cache (void)
+ *
+ * Writeback and invalidate all caches in the quickest way possible.
+ */
+SWCACHE(flush_cache)
+
+/*
+ * void mips_flush_dcache (void)
+ *
+ * Writeback and invalidate only the data caches.
+ * Joint caches (i.e. combined I & D) will be flushed too.
+ */
+SWCACHE(flush_dcache)
+
+/*
+ * void mips_flush_icache (void)
+ *
+ * Writeback and invalidate only the instruction caches.
+ * Joint caches (i.e. combined I & D) will be flushed too.
+ */
+SWCACHE(flush_icache)
+
+/*
+ * void mips_clean_cache (vaddr_t va, unsigned int size)
+ *
+ * Writeback and invalidate a virtual address range in all caches.
+ */
+SWCACHE(clean_cache)
+
+/*
+ * void mips_sync_icache (vaddr_t va, unsigned int size)
+ *
+ * Synchronise i-cache with d-cache in this virtual address range;
+ * often same as mips_clean_cache.
+ */
+SWCACHE(sync_icache)
+
+/*
+ * void mips_clean_dcache (vaddr_t va, unsigned int size)
+ *
+ * Writeback and invalidate a virtual address range in data caches.
+ * Joint caches (i.e. combined I & D) will be cleaned too.
+ */
+SWCACHE(clean_dcache)
+
+/*
+ * void mips_clean_dcache_nowrite (vaddr_t va, unsigned int size)
+ *
+ * Invalidate a virtual address range in data caches.
+ * Joint caches (i.e. combined I & D) will be cleaned too.
+ * XXX Only safe if region is totally cache line aligned,
+ * i.e. it doesn't share a cache line with other data.
+ */
+SWCACHE(clean_dcache_nowrite)
+
+/*
+ * void mips_lock_dcache (vaddr_t va, unsigned int size)
+ *
+ * Load and lock a block of data into the primary data cache
+ */
+SWCACHE(lock_dcache)
+
+/*
+ * void mips_lock_icache (vaddr_t va, unsigned int size)
+ *
+ * Load and lock a block of instructions into the primary instruction cache
+ */
+SWCACHE(lock_icache)
+
+/*
+ * void mips_lock_scache (vaddr_t va, unsigned int size)
+ *
+ * Load and lock a block of memory into the secondary cache
+ */
+SWCACHE(lock_scache)
diff --git a/hw/mips-hal/src/arch/mips/link.c b/hw/mips-hal/src/arch/mips/link.c
new file mode 100644
index 000000000..8287ef3dd
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/link.c
@@ -0,0 +1,44 @@
+/*
+ * link.c
+*/
+
+/*
+ * Copyright (c) 2014, Imagination Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <errno.h>
+
+int
+link (const char *oldname, const char *newname)
+{
+  (void) oldname;
+  (void) newname;
+  errno = EIO;
+  return -1;
+}
+
diff --git a/hw/mips-hal/src/arch/mips/m32cache.S b/hw/mips-hal/src/arch/mips/m32cache.S
new file mode 100644
index 000000000..467816c4e
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/m32cache.S
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include "m32cache.h"
+/*
+ * void m32_size_cache()
+ *
+ * Work out size of I, D & S caches (assume already initialised)
+ */
+LEAF(m32_size_cache)
+	lw	t0, mips_icache_size
+	move	tmp3, ra
+	bgtz	t0, 8f				# already known?
+
+	bal	_size_cache
+	move	ra, tmp3
+
+8:	# Return
+	jr	ra
+END(m32_size_cache)
+
+/*
+ * void m32_clean_icache (unsigned kva, size_t n)
+ *
+ * Writeback and invalidate address range in instruction caches
+ */
+LEAF(m32_clean_icache)
+	SIZE_CACHE(a2, mips_icache_linesize)
+	vcacheop(a0,a1,a2,Hit_Invalidate_I)
+
+	lw	a2, mips_scache_linesize
+	blez	a2, 9f
+	vcacheop(a0,a1,a2,Hit_Writeback_Inv_S)
+	sync
+
+9:	jr.hb	ra
+END(m32_clean_icache)
+
+/*
+ * static void _size_cache()
+ *
+ * Internal routine to determine cache sizes by looking at config
+ * registers.  Sizes are returned in registers, as follows:
+ *
+ * Do not use tmp3 (reg a1) and tmp1 (reg v1) in this function.
+ */
+LEAF(_size_cache)
+	# Read $config, 0 to check presence of $config, 1
+	mfc0	cfg, C0_CONFIG
+
+	# Read Configuration register, select 1
+	mfc0	cfg, C0_CONFIG1
+
+	# Get I-cache line size
+	ext	tmp, cfg, CFG1_IL_SHIFT, CFG1_IL_BITS
+	beqz	tmp, 8f		# No I-cache
+
+	# Get number of I-cache ways
+	ext	iways, cfg, CFG1_IA_SHIFT, CFG1_IA_BITS
+	addiu	iways, iways, 1
+	move	icachesize,iways
+
+	# Total icache size = lines/way * linesize * ways
+	li	ilinesize, 1
+	addiu	tmp, tmp, 1
+	sllv	ilinesize, ilinesize, tmp
+	sllv	icachesize, icachesize, tmp
+
+	# Get I-cache lines per way
+	ext	tmp, cfg, CFG1_IS_SHIFT, CFG1_IS_BITS
+	addiu	tmp, tmp, 1
+	andi	tmp, tmp, 7
+	addiu	tmp, tmp, 5
+	sllv	icachesize, icachesize, tmp
+
+	# Store icache config
+	sw	icachesize, mips_icache_size
+	sw	ilinesize, mips_icache_linesize
+	sw	iways, mips_icache_ways
+
+8:	# No I-cache, check for D-cache
+	ext	tmp, cfg, CFG1_DL_SHIFT, CFG1_DL_BITS
+	beqz	tmp, 9f					# No D-cache
+
+	# Get number of dcache ways
+	ext	dways, cfg, CFG1_DA_SHIFT, CFG1_DA_BITS
+	addiu	dways, dways, 1
+	move	dcachesize,dways
+
+	# Total dcache size = lines/way * linesize * ways
+	li	dlinesize, 1
+	addiu	tmp, tmp, 1
+	sllv	dlinesize, dlinesize, tmp
+	sllv	dcachesize, dcachesize, tmp
+
+	# Get dcache lines per way
+	ext	tmp, cfg, CFG1_DS_SHIFT, CFG1_DS_BITS
+	addiu	tmp, tmp, 1
+	andi	tmp, tmp, 7
+	addiu	tmp, tmp, 5
+	sllv	dcachesize, dcachesize, tmp
+
+	# Store dcache config
+	sw	dcachesize, mips_dcache_size
+	sw	dlinesize, mips_dcache_linesize
+	sw	dways, mips_dcache_ways
+9:
+	LA	tmp, __cache_size_hook
+	move	tmp4, ra
+	jal	tmp
+	move	ra, tmp4
+
+	# Return
+	jr	ra
+END(_size_cache)
diff --git a/hw/mips-hal/src/arch/mips/m32cache.h b/hw/mips-hal/src/arch/mips/m32cache.h
new file mode 100644
index 000000000..d58a16dd9
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/m32cache.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <mips/asm.h>
+#include <mips/regdef.h>
+#include <mips/m32c0.h>
+
+/*
+ * MIPS32 cache operations.
+ *
+ * The _flush and _clean functions are complex composites that do whatever
+ * is necessary to flush/clean ALL caches, in the quickest possible way.
+ * The other functions are targetted explicitly at a particular cache
+ * I or D; it is up to the user to call the correct set of functions
+ * for a given system.
+ */
+
+IMPORT(mips_icache_size,4)
+IMPORT(mips_icache_linesize,4)
+IMPORT(mips_icache_ways,4)
+
+IMPORT(mips_dcache_size,4)
+IMPORT(mips_dcache_linesize,4)
+IMPORT(mips_dcache_ways,4)
+
+IMPORT(mips_scache_size,4)
+IMPORT(mips_scache_linesize,4)
+IMPORT(mips_scache_ways,4)
+
+/*
+ * Macros to automate cache operations
+ */
+
+#define addr	t0
+#define maxaddr	t1
+#define mask	t2
+
+#define cacheop(kva, n, linesize, op)	\
+	/* check for bad size */	\
+	blez	n,11f ;			\
+	PTR_ADDU maxaddr,kva,n ;	\
+	/* align to line boundaries */	\
+	PTR_SUBU mask,linesize,1 ;	\
+	not	mask ;			\
+	and	addr,kva,mask ;		\
+	PTR_SUBU addr,linesize ;	\
+	PTR_ADDU maxaddr,-1 ;		\
+	and	maxaddr,mask ;		\
+	/* the cacheop loop */		\
+10:	PTR_ADDU addr,linesize ;	\
+	cache	op,0(addr) ;	 	\
+	bne	addr,maxaddr,10b ;	\
+11:
+
+/* virtual cache op: no limit on size of region */
+#define vcacheop(kva, n, linesize, op)	\
+	cacheop(kva, n, linesize, op)
+
+/* indexed cache op: region limited to cache size */
+#define icacheop(kva, n, linesize, size, op) \
+	move	t3,n;			\
+	bltu	n,size,12f ;		\
+	move	t3,size ;		\
+12:	cacheop(kva, t3, linesize, op)
+
+
+/* caches may not have been sized yet */
+#define SIZE_CACHE(reg,which)		\
+	lw	reg,which;		\
+	move	v1,ra;			\
+	bgez	reg,9f;			\
+	bal	m32_size_cache;		\
+	lw	reg,which;		\
+	move	ra,v1;			\
+9:	blez	reg,9f;			\
+	sync
+
+#define tmp		t0
+#define cfg		t1
+#define icachesize	t2
+#define ilinesize	t3
+#define iways		ta0
+#define dcachesize	ta1
+#define dlinesize	ta2
+#define dways		ta3
+#define scachesize	t8
+#define slinesize	t9
+#define sways		v0
+#define tmp1		v1
+#define tmp2		a0
+#define tmp3		a1
+#define tmp4		a2
+#define tmp5		a3
diff --git a/hw/mips-hal/src/arch/mips/m32cache_ops.S b/hw/mips-hal/src/arch/mips/m32cache_ops.S
new file mode 100644
index 000000000..a751a455d
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/m32cache_ops.S
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include "m32cache.h"
+
+/*
+ * void m32_flush_cache (void)
+ *
+ * Writeback and invalidate all caches
+ */
+LEAF(m32_flush_cache)
+	SIZE_CACHE(a1,mips_dcache_size)
+
+	/* writeback and invalidate primary caches individually */
+	lw	a2,mips_dcache_linesize
+	li	a0,KSEG0_BASE
+	cacheop(a0,a1,a2,Index_Writeback_Inv_D)
+
+9:	lw	a1,mips_icache_size
+	lw	a2,mips_icache_linesize
+	blez	a1,9f
+	li	a0,KSEG0_BASE
+	cacheop(a0,a1,a2,Index_Invalidate_I)
+
+9:	lw	a1,mips_scache_size
+	lw	a2,mips_scache_linesize
+	blez	a1,9f
+	sync
+	li	a0,KSEG0_BASE
+	cacheop(a0,a1,a2,Index_Writeback_Inv_S)
+
+9:	sync
+	jr.hb	ra
+END(m32_flush_cache)
+
+/*
+ * void m32_flush_dcache (void)
+ *
+ * Writeback and invalidate data caches only
+ */
+LEAF(m32_flush_dcache)
+	SIZE_CACHE(a1,mips_dcache_size)
+
+	/* writeback and invalidate primary data cache */
+	lw	a2,mips_dcache_linesize
+	li	a0,KSEG0_BASE
+	cacheop(a0,a1,a2,Index_Writeback_Inv_D)
+
+9:	lw	a1,mips_scache_size
+	lw	a2,mips_scache_linesize
+	blez	a1,9f
+	sync
+	li	a0,KSEG0_BASE
+	cacheop(a0,a1,a2,Index_Writeback_Inv_S)
+
+9:	sync
+	jr.hb	ra
+END(m32_flush_dcache)
+
+/*
+ * void m32_flush_icache (void)
+ *
+ * Writeback and invalidate instruction cache only
+ */
+LEAF(m32_flush_icache)
+	SIZE_CACHE(a1,mips_icache_size)
+
+	/* writeback and invalidate primary instruction cache */
+	lw	a2,mips_icache_linesize
+	li	a0,KSEG0_BASE
+	cacheop(a0,a1,a2,Index_Invalidate_I)
+
+9:	lw	a1,mips_scache_size
+	blez	a1,9f
+	lw	a2,mips_scache_linesize
+	li	a0,KSEG0_BASE
+	cacheop(a0,a1,a2,Index_Writeback_Inv_S)
+
+9:	sync
+	jr.hb	ra
+END(m32_flush_icache)
+
+/*
+ * void m32_clean_cache (unsigned kva, size_t n)
+ *
+ * Writeback and invalidate address range in all caches
+ */
+LEAF(m32_clean_cache)
+	SIZE_CACHE(a2,mips_dcache_linesize)
+	vcacheop(a0,a1,a2,Hit_Writeback_Inv_D)
+
+9:	lw	a2,mips_icache_linesize
+	blez	a2,9f
+	vcacheop(a0,a1,a2,Hit_Invalidate_I)
+
+9:	lw	a2,mips_scache_linesize
+	blez	a2,9f
+
+	sync
+	vcacheop(a0,a1,a2,Hit_Writeback_Inv_S)
+
+9:	sync
+	jr.hb	ra
+END(m32_clean_cache)
+
+/*
+ * void m32_sync_icache (unsigned kva, size_t n)
+ *
+ * Synchronise icache and dcache for virtual address range
+ */
+LEAF(m32_sync_icache)
+	/* check for bad size */
+	PTR_ADDU	maxaddr,a0,a1
+	blez	a1,9f
+
+	/* get synci step and skip if not required */
+	rdhwr	a2,$1
+	PTR_ADDU	maxaddr,-1
+	beqz	a2,9f
+
+	/* ensure stores complete */
+	sync
+
+	/* align to line boundaries */
+	PTR_SUBU	mask,a2,1
+	not	mask
+	and	addr,a0,mask
+	PTR_SUBU   	addr,a2
+	and	maxaddr,mask
+
+	/* the cacheop loop */
+10:	PTR_ADDU   	addr,a2
+	synci	0(addr)
+	bne     addr,maxaddr,10b
+
+9:	sync
+	jr.hb	ra
+END(m32_sync_icache)
+
+/*
+ * void m32_clean_dcache (unsigned kva, size_t n)
+ *
+ * Writeback and invalidate address range in data caches
+ */
+LEAF(m32_clean_dcache)
+	SIZE_CACHE(a2,mips_dcache_linesize)
+	vcacheop(a0,a1,a2,Hit_Writeback_Inv_D)
+
+9:	lw	a2,mips_scache_linesize
+	blez	a2,9f
+	sync
+	vcacheop(a0,a1,a2,Hit_Writeback_Inv_S)
+
+9:	sync
+	jr.hb	ra
+END(m32_clean_dcache)
+
+/*
+ * void m32_clean_dcache_nowrite (unsigned kva, size_t n)
+ *
+ * Invalidate (but don't writeback) address range in data caches
+ * XXX Only safe if region is totally cache-line aligned.
+ */
+LEAF(m32_clean_dcache_nowrite)
+	SIZE_CACHE(a2,mips_dcache_linesize)
+	vcacheop(a0,a1,a2,Hit_Invalidate_D)
+
+9:	lw	a2,mips_scache_linesize
+	blez	a2,9f
+	vcacheop(a0,a1,a2,Hit_Invalidate_S)
+
+9:	sync
+	jr.hb	ra
+END(m32_clean_dcache_nowrite)
+
+/*
+ * Cache locking
+ *
+ * The MIPS32 cache architecture does support per-line cache locking.
+ *
+ * WARNING: if you lock any cache lines, then don't call the
+ * mips_flush_xcache routines, because these will flush the
+ * locked data out of the cache too; use only mips_clean_xcache.
+ */
+
+/*
+ * void m32_lock_dcache (void *data, size_t n)
+ *
+ * Load and lock a block of data into the d-cache
+ */
+LEAF(m32_lock_dcache)
+	SIZE_CACHE(a2,mips_dcache_linesize)
+	vcacheop(a0,a1,a2,Fetch_Lock_D)
+	sync
+9:	jr.hb	ra
+END(m32_lock_dcache)
+
+/*
+ * void m32_lock_icache (void *code, size_t n)
+ *
+ * Load and lock a block of instructions into the i-cache
+ */
+LEAF(m32_lock_icache)
+	SIZE_CACHE(a2,mips_icache_linesize)
+	vcacheop(a0,a1,a2,Fetch_Lock_I)
+	sync
+9:	jr.hb	ra
+END(m32_lock_icache)
+
+/*
+ * void m32_lock_scache (void * data, size_t n)
+ *
+ * Load and lock a block of data into the s-cache
+ */
+LEAF(m32_lock_scache)
+	SIZE_CACHE(a2,mips_scache_linesize)
+	vcacheop(a0,a1,a2,Fetch_Lock_S)
+	sync
+9:	jr.hb	ra
+END(m32_lock_scache)
diff --git a/hw/mips-hal/src/arch/mips/m32tlb_ops.S b/hw/mips-hal/src/arch/mips/m32tlb_ops.S
new file mode 100644
index 000000000..bb6a09b76
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/m32tlb_ops.S
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include <mips/asm.h>
+#include <mips/regdef.h>
+#include <mips/m32c0.h>
+#include <mips/m32tlb.h>
+#include <mips/asm.h>
+
+/*
+ * void mips_tlbwi2(tlbhi_t hi, tlblo_t lo0, tlblo_t lo1, unsigned mask,
+ *			unsigned idx)
+ *
+ * Writes hi,lo0,lo1 and msk in to the TLB entry specified by index.
+ *
+ */
+LEAF(mips_tlbwi2)
+#if _MIPS_SIM==_ABIO32
+	lw	ta0, 16(sp)
+#endif
+	mtc0	a0, C0_ENTRYHI
+	mtc0	a1, C0_ENTRYLO0
+	mtc0	a2, C0_ENTRYLO1
+	mtc0	a3, C0_PAGEMASK
+	mtc0	ta0, C0_INDEX
+	ehb				# mtc0, Hazard on tlbwi
+
+	tlbwi
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+END(mips_tlbwi2)
+
+
+/*
+ * void mips_tlbwr2(tlbhi_t hi, tlblo_t lo0, tlblo_t lo1, unsigned mask)
+ *
+ * Writes hi, lo0, lo1 and msk into the TLB entry specified by the
+ * Random register.
+ *
+ */
+LEAF(mips_tlbwr2)
+	mtc0	a0, C0_ENTRYHI
+	mtc0	a1, C0_ENTRYLO0
+	mtc0	a2, C0_ENTRYLO1
+	mtc0	a3, C0_PAGEMASK
+
+	ehb				# mtc0, hazard on tlbwr
+	tlbwr
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+END(mips_tlbwr2)
+
+/*
+ * int mips_tlbrwr2(tlbhi_t hi, tlblo_t lo0, tlblo_t lo1, unsigned mask)
+ *
+ * Probes the TLB for an entry matching hi and if present rewrites that
+ * entry, otherwise updates a random entry. A safe way to update the TLB.
+ *
+ */
+LEAF(mips_tlbrwr2)
+	mfc0	t0, C0_ENTRYHI
+	mtc0	a0, C0_ENTRYHI
+	ehb		# MTCO, hazard on tlbp
+
+	tlbp
+	ehb		# tlbp, hazard on MFCO C0_INDEX
+
+	mfc0	v0, C0_INDEX
+	mtc0	a1, C0_ENTRYLO0
+	mtc0	a2, C0_ENTRYLO1
+	mtc0	a3, C0_PAGEMASK
+
+	ehb		# mtc0, hazard on tlbwi
+	bltz	v0, 1f	# no matching entry
+
+	tlbwi
+	mtc0	t0, C0_ENTRYHI
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+1:	tlbwr
+	mtc0	t0, C0_ENTRYHI
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+END(mips_tlbrwr2)
+
+
+/*
+ * void mips_tlbri2(tlbhi_t *hi, tlblo_t *lo0, tlblo_t *lo1, unsigned *mask,
+ *		unsigned index)
+ *
+ * Reads the TLB entry with specified by index, and returns the EntryHi, EntryLo0,
+ * EntryLo1 and PageMask parts in *phi, *plo0, *plo1 and *pmsk respectively.
+ *
+ */
+LEAF(mips_tlbri2)
+#if _MIPS_SIM==_ABIO32
+	lw	ta0,16(sp)      	# index
+#endif
+	mtc0	ta0, C0_INDEX
+	ehb				# mtc0, hazard on tlbr
+
+	tlbr
+	ehb				# tlbr, hazard on entry*, pagemask
+
+	mfc0	t0, C0_ENTRYHI
+	mfc0	t1, C0_ENTRYLO0
+	mfc0	t2, C0_ENTRYLO1
+	mfc0	t3, C0_PAGEMASK
+	sw	t0, 0(a0)
+	sw	t1, 0(a1)
+	sw	t2, 0(a2)
+	sw	t3, 0(a3)
+	jr	ra
+END(mips_tlbri2)
+
+
+/*
+ * int mips_tlbprobe2(tlbhi_t hi, tlblo_t *lo0, tlblo_t *lo1,
+ * 	unsigned int *mask)
+ *
+ * Probes the TLB for an entry matching hi and returns its index, or -1 if
+ * not found. If found, then the EntryLo0, EntryLo1 and PageMask parts of the
+ * entry are also returned in *plo0, *plo1 and *pmsk respectively.
+ *
+ */
+LEAF(mips_tlbprobe2)
+	mfc0	t0, C0_ENTRYHI
+	mtc0	a0, C0_ENTRYHI
+	ehb				# mtc0, hazard on tlbp
+
+	tlbp
+	ehb				# tlpb, hazard on index.
+
+	mfc0	v0, C0_INDEX
+	bltz	v0, 1f			# Return -1 if not found.
+
+	tlbr
+	ehb				# tlbr, hazard on entry*, pagemask
+
+	mfc0	v1, C0_ENTRYLO0
+	mfc0	t1, C0_ENTRYLO1
+	mfc0	t2, C0_PAGEMASK
+
+	mtc0	t0, C0_ENTRYHI		# restore entry hi
+
+	sw	v1, 0(a1)
+	sw	t1, 0(a2)
+	sw	t2, 0(a3)
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+1:	mtc0	t0, C0_ENTRYHI		# restore entry hi
+	li	v0, -1
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+END(mips_tlbprobe2)
+
+
+/*
+ * void mips_tlbinval(tlbhi_t a0)
+ *
+ * Probes the TLB for an entry matching hi, and if present invalidates it.
+ *
+ */
+LEAF(mips_tlbinval)
+	mfc0	t0, C0_ENTRYHI		# save old entry hi
+	mtc0	a0, C0_ENTRYHI
+	ehb				# mtc0, Hazard on tlbp
+
+	tlbp
+	ehb				# tlbp, Hazard on index, entry*
+
+	mfc0	v0, C0_INDEX
+	bltz	v0, 4f
+
+	mtc0	zero, C0_ENTRYLO0
+	mtc0	zero, C0_ENTRYLO1
+
+	mfc0	t1, C0_CONFIG3
+	ext	t1, t1, CFG3_M_SHIFT, 1
+	beqz	t1, 2f
+
+	mfc0	t1, C0_CONFIG4
+	ext	t1, t1, CFG4_IE_SHIFT, CFG4_IE_BITS
+	beqz	t1, 2f
+
+	li	t1, C0_ENTRYHI_EHINV_MASK
+	b	3f
+
+2:	li	t1, (KSEG0_BASE - 2<<13)
+5:	addiu	t1, t1, 2<<13
+	mtc0	t1, C0_ENTRYHI
+	ehb				# mtc0, Hazard on tlbp
+
+	tlbp
+	ehb				# tlbp, hazard on index
+
+	mfc0	t2, C0_INDEX
+	bgez	t2, 5b
+
+	mtc0	v0, C0_INDEX
+
+3:	mtc0	t1, C0_ENTRYHI
+	ehb				# mtco, hazard on tlbwi
+
+	tlbwi
+	ehb				# tlbwi, hazard
+
+4:	mtc0	t0,C0_ENTRYHI		# restore entry hi
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+END(mips_tlbinval)
diff --git a/hw/mips-hal/src/arch/mips/m64tlb_ops.S b/hw/mips-hal/src/arch/mips/m64tlb_ops.S
new file mode 100644
index 000000000..a4fe3c159
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/m64tlb_ops.S
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2015, Imagination Technologies Limited and/or its
+ *                 affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*
+ * m64tlb_ops.S: MIPS XPA TLB support functions
+ *
+ */
+
+.set nomips16
+#include <mips/m64tlb.h>
+#include <mips/asm.h>
+#include <mips/endian.h>
+
+#define PTR_XO32_MTC0(top, bot, reg)	\
+#if BYTE_ORDER == BIG_ENDIAN		\
+	mtc0	bot, reg; 		\
+	mthc0	top, reg; 		\
+#else					\
+	mtc0	top, reg;		\
+	mthc0	bot, reg;		\
+#endif
+
+#define PTR_XO32_MFC0(top, bot, reg)	\
+#if BYTE_ORDER == BIG_ENDIAN		\
+	mfc0	bot, reg; 		\
+	mfhc0	top, reg; 		\
+#else					\
+	mfc0	top, reg;		\
+	mfhc0	bot, reg;		\
+#endif
+
+#define PTR_XO32_MTC0(top, bot, reg)	\
+#if BYTE_ORDER == BIG_ENDIAN		\
+	mtc0	bot, reg; 		\
+	mthc0	top, reg; 		\
+#else					\
+	mtc0	top, reg;		\
+	mthc0	bot, reg;		\
+#endif
+
+#define PTR_XO32_MFC0(top, bot, reg)	\
+#if BYTE_ORDER == BIG_ENDIAN		\
+	mfc0	bot, reg; 		\
+	mfhc0	top, reg; 		\
+#else					\
+	mfc0	top, reg;		\
+	mfhc0	bot, reg;		\
+#endif
+
+/*
+ * void m64_tlbwi2(tlbhi64_t hi, tlblo64_t lo0, tlblo64_t lo1, unsigned long long mask,
+ *		   unsigned int index)
+ *
+ * Writes hi,lo0,lo1 and msk in to the TLB entry specified by index.
+ *
+ */
+LEAF(m64_tlbwi2)
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(a0, a1, C0_ENTRYHI)
+	PTR_XO32_MTC0(a2, a3, C0_ENTRYLO0)
+
+	PTR_L	t1, 20(sp)
+	PTR_L	t2, 16(sp)
+	PTR_XO32_MTC0(t2, t1, C0_ENTRYLO1)
+
+	PTR_L	t1, 28(sp)
+	PTR_L	t2, 24(sp)
+	PTR_XO32_MTC0(t2, t1, C0_PAGEMASK)
+
+	lw	ta0, 32(sp)
+#else /* _MIPS_SIM==N32 || _MIPS_SIM==N64 */
+	PTR_MTC0 a0, C0_ENTRYHI
+	PTR_MTC0 a1, C0_ENTRYLO0
+	PTR_MTC0 a2, C0_ENTRYLO1
+	PTR_MTC0 a3, C0_PAGEMASK
+#endif
+
+	mtc0	ta0, C0_INDEX
+	ehb			# mtc0, hazard barrier on tlbwi
+	tlbwi
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+END(m64_tlbwi2)
+
+/*
+ * void m64_tlbwr2(tlbhi64_t hi, tlblo64_t lo0, tlblo64_t lo1, unsigned long long mask)
+ *
+ * Writes hi, lo0, lo1 and msk into the TLB entry specified by the
+ * Random register.
+ *
+ */
+LEAF(m64_tlbwr2)
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(a0, a1, C0_ENTRYHI)
+	PTR_XO32_MTC0(a2, a3, C0_ENTRYLO0)
+
+	PTR_L	t1, 20(sp)
+	PTR_L	t2, 16(sp)
+	PTR_XO32_MTC0(t2, t1, C0_ENTRYLO1)
+
+	PTR_L	t1, 28(sp)
+	PTR_L	t2, 24(sp)
+	PTR_XO32_MTC0(t2, t1, C0_PAGEMASK)
+#else
+	PTR_MTC0 a0, C0_ENTRYHI
+	PTR_MTC0 a1, C0_ENTRYLO0
+	PTR_MTC0 a2, C0_ENTRYLO1
+	PTR_MTC0 a3, C0_PAGEMASK
+#endif
+	ehb
+	tlbwr
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+END(m64_tlbwr2)
+
+/*
+ * int m64_tlbrwr2(tlbhi64_t hi, tlblo64_t lo0, tlblo64_t lo1, unsigned long long mask)
+ *
+ * Probes the TLB for an entry matching hi and if present rewrites that
+ * entry, otherwise updates a random entry. A safe way to update the TLB.
+ *
+ */
+LEAF(m64_tlbrwr2)
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MFC0(t1, t0, C0_ENTRYHI)
+	PTR_XO32_MTC0(a0, a1, C0_ENTRYHI)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MFC0 t0, C0_ENTRYHI
+	PTR_MTC0 a0, C0_ENTRYHI
+#endif /* _MIPS_SIM==_ABIO32 */
+	ehb		# MTCO, hazard on tlbp
+
+	tlbp
+	ehb		# tlbp, hazard on MFCO $index
+
+	mfc0	v0, C0_INDEX
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(a2, a3, C0_ENTRYLO0)
+
+	PTR_L	t2, 20(sp)
+	PTR_L	t3, 16(sp)
+	PTR_XO32_MTC0(t3, t2, C0_ENTRYLO1)
+
+	PTR_L	t2, 28(sp)
+	PTR_L	t3, 24(sp)
+	PTR_XO32_MTC0(t3, t2, C0_PAGEMASK)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MTC0 a1, C0_ENTRYLO0
+	PTR_MTC0 a2, C0_ENTRYLO1
+	PTR_MTC0 a3, C0_PAGEMASK
+#endif /* _MIPS_SIM==_ABIO32 */
+	ehb		# mtc0, hazard on tlbwi
+	bltz	v0, 1f	# no matching entry
+
+	tlbwi
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(t1, t0, C0_ENTRYHI)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MTC0 t0, C0_ENTRYHI
+#endif /* _MIPS_SIM==_ABIO32 */
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+1:	tlbwr
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(t1, t0, C0_ENTRYHI)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MTC0 t0, C0_ENTRYHI
+#endif /* _MIPS_SIM==_ABIO32 */
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+END(m64_tlbrwr2)
+
+/*
+ * void m64_tlbri2(tlbhi64_t *phi, tlblo64_t *plo0, tlblo64_t *plo1,
+ *		   unsigned long long * mask, unsigned int index)
+ *
+ * Reads the TLB entry with specified by index, and returns the EntryHi, EntryLo0,
+ * EntryLo1 and PageMask parts in *phi, *plo0, *plo1 and *pmsk respectively.
+ *
+ */
+LEAF(m64_tlbri2)
+#if _MIPS_SIM==_ABIO32
+	PTR_L   ta0, 16(sp)	# index
+#endif /* _MIPS_SIM==_ABIO32 */
+	mtc0	ta0, C0_INDEX
+	ehb			# mtc0, hazard on tlbr
+
+	tlbr
+	ehb			# tlbr, hazard on entry*, pagemask
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MFC0(t0, t1, C0_ENTRYHI)
+	PTR_XO32_MFC0(t2, t3, C0_ENTRYLO0)
+	PTR_XO32_MFC0(t4, t5, C0_ENTRYLO1)
+	PTR_XO32_MFC0(t6, t7, C0_PAGEMASK)
+	PTR_S	t0, 0(a0)
+	PTR_S	t1, 4(a0)
+	PTR_S	t2, 0(a1)
+	PTR_S	t3, 4(a1)
+	PTR_S	t4, 0(a2)
+	PTR_S	t5, 4(a2)
+	PTR_S	t6, 0(a3)
+	PTR_S	t7, 4(a3)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MFC0 t0, C0_ENTRYHI
+	PTR_MFC0 t1, C0_ENTRYLO0
+	PTR_MFC0 t2, C0_ENTRYLO1
+	PTR_MFC0 t3, C0_PAGEMASK
+	PTR_S	t0, 0(a0)
+	PTR_S	t1, 0(a1)
+	PTR_S	t2, 0(a2)
+	PTR_S	t3, 0(a3)
+#endif /* _MIPS_SIM==_ABIO32 */
+	jr	ra
+END(m64_tlbri2)
+
+/*
+ * int m64_tlbprobe2(tlbhi64_t hi, tlblo64_t *lo0, tlblo64_t *lo1,
+ * 	unsigned int *mask)
+ *
+ * Probes the TLB for an entry matching hi and returns its index, or -1 if
+ * not found. If found, then the EntryLo0, EntryLo1 and PageMask parts of the
+ * entry are also returned in *plo0, *plo1 and *pmsk respectively.
+ *
+ */
+LEAF(m64_tlbprobe2)
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MFC0(t9, t8, C0_ENTRYHI)
+	PTR_L	t0, 16(sp)
+	PTR_XO32_MTC0(a0, a1, C0_ENTRYHI)
+
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MFC0 t8, C0_ENTRYHI
+	PTR_MTC0 a0, C0_ENTRYHI
+#endif /* _MIPS_SIM==_ABIO32 */
+	ehb			# mtc0, hazard on tlbp
+
+	tlbp
+	ehb			# tlpb, hazard on index.
+
+	mfc0	v0, C0_INDEX
+	bltz	v0, 1f		# Return -1 if not found.
+
+	tlbr
+	ehb			# tlbr, hazard on entry*, pagemask
+# return entrylo, entrylo1, pagermask
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MFC0(t2, t3, C0_ENTRYLO0)
+	PTR_XO32_MFC0(t4, t5, C0_ENTRYLO1)
+	PTR_XO32_MFC0(t6, t7, C0_PAGEMASK)
+	PTR_S	t2, 0(a2)
+	PTR_S	t3, 4(a2)
+	PTR_S	t4, 0(a3)
+	PTR_S	t5, 4(a3)
+	PTR_S	t6, 0(t0)
+	PTR_S	t7, 4(t0)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MFC0 t1, C0_ENTRYLO0
+	PTR_MFC0 t2, C0_ENTRYLO1
+	PTR_MFC0 t3, C0_PAGEMASK
+	PTR_S	t1, 0(a1)
+	PTR_S	t2, 0(a2)
+	PTR_S	t3, 0(a3)
+#endif /* _MIPS_SIM==_ABIO32 */
+
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(t9, t8, C0_ENTRYHI)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MTC0 t8, C0_ENTRYHI
+#endif /* _MIPS_SIM==_ABIO32 */
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+1:	li	v0,-1
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(t9, t8, C0_ENTRYHI)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MTC0 t8, C0_ENTRYHI
+#endif /* _MIPS_SIM==_ABIO32 */
+
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+END(m64_tlbprobe2)
+
+/*
+ * void m64_tlbinval(tlbhi64_t a0)
+ *
+ * Probes the TLB for an entry matching hi, and if present invalidates it.
+ *
+ */
+LEAF(m64_tlbinval)
+
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MFC0(t9, t8, C0_ENTRYHI)
+	PTR_XO32_MTC0(a0, a1, C0_ENTRYHI)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MFC0 t8, C0_ENTRYHI
+	PTR_MTC0 a0, C0_ENTRYHI
+#endif /* _MIPS_SIM==_ABIO32 */
+	ehb			# mtc0, Hazard on tlbp
+
+	tlbp
+	ehb			# tlbp, Hazard on index, entry*
+
+	mfc0	v0, C0_INDEX
+	bltz	v0, 4f
+
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(zero, zero, C0_ENTRYLO0)
+	PTR_XO32_MTC0(zero, zero, C0_ENTRYLO1)
+#else
+	PTR_MTC0 zero, C0_ENTRYLO0
+	PTR_MTC0 zero, C0_ENTRYLO1
+#endif /* _MIPS_SIM==_ABIO32 */
+
+	mfc0	v1, C0_CONFIG3
+	ext	v1, v1, CFG3_M_SHIFT, 1
+	beqz	v1, 2f
+
+	mfc0	v1, C0_CONFIG4
+	ext	v1, v1, CFG4_IE_SHIFT, CFG4_IE_BITS
+	beqz	v1, 2f
+
+	li	v1, C0_ENTRYHI_EHINV_MASK
+	b	3f
+
+#if _MIPS_SIM==_ABIO32
+2:	li	t1, -1
+	li	v1, (KSEG0_BASE - 2<<13)
+5:	addiu	v1, v1, (2<<13)
+	PTR_XO32_MTC0(t1, v1, C0_ENTRYHI)
+	ehb				# mtc0, Hazard on tlbp
+
+	tlbp
+	ehb				# tlbp, hazard on index
+
+	mfc0	t2, C0_INDEX
+	bgez	t2, 5b
+
+	mtc0	v0, C0_INDEX
+3:	PTR_XO32_MTC0(t1, v1, C0_ENTRYHI)
+#else
+2:	li	v1, (KSEG0_BASE-2<<13)	# replace with this
+5:	addiu	v1, v1, (2<<13)
+	PTR_MTC0 v1, C0_ENTRYHI
+	ehb				# mtc0, Hazard on tlbp
+
+	tlbp
+	ehb				# tlbp, hazard on index
+
+	mfc0	t2, C0_INDEX
+	bgez	t2, 5b
+
+	mtc0	v0, C0_INDEX
+3:	PTR_MTC0 v1, C0_ENTRYHI
+#endif
+	ehb			# mtco, hazard on tlbwi
+
+	tlbwi
+	ehb			# tlbwi, hazard on C0_ENTRYHI
+
+4:
+#if _MIPS_SIM==_ABIO32
+	PTR_XO32_MTC0(t9, t8, C0_ENTRYHI)
+#else /* _MIPS_SIM==_ABIO32 */
+	PTR_MTC0 t8, C0_ENTRYHI
+#endif /* _MIPS_SIM==_ABIO32 */
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+
+END(m64_tlbinval)
diff --git a/hw/mips-hal/src/arch/mips/mips_cm3_l2size.S b/hw/mips-hal/src/arch/mips/mips_cm3_l2size.S
new file mode 100644
index 000000000..9345e50e7
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_cm3_l2size.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2015, Imagination Technologies Limited and/or its
+ *                 affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include <mips/asm.h>
+#include <mips/regdef.h>
+#include <mips/cm3.h>
+#include <mips/m32c0.h>
+
+#define tmp		t0
+#define cfg		t1
+#define scachesize	t8
+#define slinesize	t9
+#define sways		v0
+#define tmp1		v1
+#define tmp2		a0
+#define tmp3		a1
+#define tmp4		a2
+#define tmp5		a3
+/*
+ * static void __cache_size_hook()
+ *
+ * Routine for calculating L2 cache size from CM3 configuration
+ * registers.  Sizing information is stored directly to memory.
+ *
+ * Do not use tmp3 (reg a1), tmp1 (reg v1) or tmp4 (a2) in this function.
+ */
+
+LEAF(__cache_size_hook)
+
+	# Check if Coherency Manager memory-mapped
+	# Global Configuration Register Space is implemented.
+	mfc0	tmp, C0_CONFIG3
+	ext	tmp, tmp, CFG3_M_SHIFT, 1
+	beqz	tmp, 1f		# Fall back to config2 based L2
+
+	mfc0	tmp, C0_CONFIG4
+	ext	tmp, tmp, CFG4_M_SHIFT, 1
+	beqz	tmp, 1f		# Fall back to config2 based L2
+
+	# Do we have a memory mapped L2 cache config?
+	mfc0	tmp, C0_CONFIG5
+	ext	tmp, tmp, CFG5_L2C_SHIFT, 1
+	bnez	tmp, 2f
+
+1:
+	# Jump to the standard Config2 based scache config
+	j	__def_cache_size_hook
+
+2:
+	# Read CMGCRBase to find CMGCR_BASE_ADDR
+	PTR_MFC0 tmp,C0_CMGCRBASE
+	sll	tmp, tmp, 4
+	lui	tmp2, 0xb000	 # Make it virtual
+	or	tmp, tmp, tmp2
+
+	# Read GCR_L2_CONFIG
+	PTR_L	tmp, GCR_L2_CONFIG(tmp)
+
+	# Extract line size
+	ext	slinesize, tmp, GCR_L2_SL_SHIFT, GCR_L2_SL_BITS
+
+	# Check for no cache
+	beqz	slinesize, 3f
+	li	tmp2, 2
+	sllv	slinesize, tmp2, slinesize	# Now have true L2 line size
+
+	# Extract sets/way
+	ext	sways, tmp, GCR_L2_SS_SHIFT, GCR_L2_SS_BITS
+	li	tmp2, 64
+	sllv	sways, tmp2, sways		# Now we have true L2 sets/way
+
+	# Extract L2 associativity
+	ext	tmp, tmp, GCR_L2_SA_SHIFT, GCR_L2_SA_BITS
+	addiu	tmp, tmp, 1
+	mul	tmp, tmp, sways			# Get total number of sets
+	mul	scachesize, slinesize, tmp	# L2 cache size
+
+	sw	scachesize, mips_scache_size
+	sw	slinesize, mips_scache_linesize
+	sw	sways, mips_scache_ways
+
+3:
+	# Return
+	jr	ra
+END(__cache_size_hook)
diff --git a/hw/mips-hal/src/arch/mips/mips_excpt_boot.S b/hw/mips-hal/src/arch/mips/mips_excpt_boot.S
new file mode 100644
index 000000000..835fe5d71
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_excpt_boot.S
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include <mips/asm.h>
+#include <mips/cpu.h>
+#include <mips/hal.h>
+#include <mips/endian.h>
+#include <mips/regdef.h>
+
+	# Create space to store k0, k1, ra and sp
+	.data
+	.global	__start_ctx
+	.balign	SZREG
+__start_ctx:
+	.space	SZREG * 18
+#define	start_ctx_sr	(SZREG * 0)
+#define	start_ctx_s0	(SZREG * 1)
+#define	start_ctx_s1	(SZREG * 2)
+#define	start_ctx_s2	(SZREG * 3)
+#define	start_ctx_s3	(SZREG * 4)
+#define	start_ctx_s4	(SZREG * 5)
+#define	start_ctx_s5	(SZREG * 6)
+#define	start_ctx_s6	(SZREG * 7)
+#define	start_ctx_s7	(SZREG * 8)
+#define	start_ctx_k0	(SZREG * 9)
+#define	start_ctx_k1	(SZREG * 10)
+#define	start_ctx_gp	(SZREG * 11)
+#define	start_ctx_sp	(SZREG * 12)
+#define	start_ctx_fp	(SZREG * 13)
+#define	start_ctx_ra	(SZREG * 14)
+#define	start_ctx_ictl	(SZREG * 15)
+#define	start_ctx_ebase	(SZREG * 16)	/* saved EBASE */
+#define	chain_ebase	(SZREG * 17)	/* chained EBASE */
+
+#if defined (__mips_micromips)
+	.space	SZREG
+#define	start_ctx_conf3	(SZREG * 18)	/* saved Config3 $16,3 for micromips */
+#endif
+
+#
+# FUNCTION:	__register_excpt_boot
+#
+# DESCRIPTION: Save all boot state. Some state is already clobbered:
+#              $4 = Boot ra
+#	       $5 = Boot SR
+#	       $6 = caller's RA to be preserved and returned in $2
+#
+WLEAF(__register_excpt_boot)
+	.set	push
+	.set	noat
+
+	# Save C0_SR IE and BEV
+	LA	$9, __start_ctx
+	REG_S	$4, start_ctx_ra($9)	/* $4 holds $31 */
+	REG_S	$5, start_ctx_sr($9)	/* $5 holds SR */
+
+	REG_S	$16, start_ctx_s0($9)
+	REG_S	$17, start_ctx_s1($9)
+	REG_S	$18, start_ctx_s2($9)
+	REG_S	$19, start_ctx_s3($9)
+	REG_S	$20, start_ctx_s4($9)
+	REG_S	$21, start_ctx_s5($9)
+	REG_S	$22, start_ctx_s6($9)
+	REG_S	$23, start_ctx_s7($9)
+	REG_S	$26, start_ctx_k0($9)
+	REG_S	$27, start_ctx_k1($9)
+	REG_S	$28, start_ctx_gp($9)
+	REG_S	$29, start_ctx_sp($9)
+	REG_S	$30, start_ctx_fp($9)
+
+	mfc0	$12, C0_CONFIG3
+#if defined (__mips_micromips)
+	# Save Config3
+	REG_S	$12, start_ctx_conf3($9)
+#endif
+	mfc0	$12, C0_INTCTL
+	REG_S	$12, start_ctx_ictl($9)
+
+	# Save C0_EBASE
+	PTR_MFC0 $10, C0_EBASE
+	REG_S	$10, start_ctx_ebase($9)
+
+	# Check if we booted with BEV==1
+	lui	$11, %hi(SR_BEV)
+	and	$11, $8, $11
+	beqz	$11, 1f
+
+	# BEV==0 - set chain_ebase to 0xbfc00200
+	# Apply the offset of 0x200 so that the boot vector entries line up
+	# with the offsets in a non-boot vector
+	lui	$10, 0xbfc0
+	ori	$10, $10, 0x200
+
+	# No - set chain_ebase to C0_EBASE
+1:	REG_S	$10, chain_ebase($9)
+
+	# Return the third argument
+	move	$2, $6
+	jr	$31
+
+	.set	pop
+WEND(__register_excpt_boot)
+
+#
+# FUNCTION:	__return_to_boot (int exit_code)
+#
+# DESCRIPTION: UHI EXIT wasn't handled, return back to caller of _start
+#
+WLEAF(__return_to_boot)
+	.set	push
+	.set	noat
+	# Disable interrupts for safety
+	di
+	ehb
+	# Set BEV=1 to allow changing EBASE
+	mfc0	$9, C0_SR
+	lui	$10, %hi(SR_BEV)
+	or	$9, $9, $10
+	mtc0	$9, C0_SR
+	ehb
+
+	# Restore C0_EBASE
+	LA	$9, __start_ctx
+	REG_L	$9, start_ctx_ebase($9)
+	# Set the write gate to potentially change upper bits
+	ori	$10, $9, EBASE_WG
+	PTR_MTC0 $10, C0_EBASE
+	# Check if the write gate was set on startup
+	andi	$11, $9, EBASE_WG
+	bnez	$11, 1f
+
+	# If write gate wasn't set then clear the write gate again
+	PTR_MTC0 $9, C0_EBASE
+1:	ehb
+
+	# Restore original state
+	LA	$9, __start_ctx
+	REG_L	$16, start_ctx_s0($9)
+	REG_L	$17, start_ctx_s1($9)
+	REG_L	$18, start_ctx_s2($9)
+	REG_L	$19, start_ctx_s3($9)
+	REG_L	$20, start_ctx_s4($9)
+	REG_L	$21, start_ctx_s5($9)
+	REG_L	$22, start_ctx_s6($9)
+	REG_L	$23, start_ctx_s7($9)
+	REG_L	$26, start_ctx_k0($9)
+	REG_L	$27, start_ctx_k1($9)
+	REG_L	$28, start_ctx_gp($9)
+	REG_L	$29, start_ctx_sp($9)
+	REG_L	$30, start_ctx_fp($9)
+	REG_L	$31, start_ctx_ra($9)
+
+#if defined (__mips_micromips)
+	# Restore Config3
+	REG_L	$2, start_ctx_conf3($9)
+	mtc0	$2, C0_CONFIG3
+#endif
+	# Restore IntCtl
+	REG_L	$2, start_ctx_ictl($9)
+	mtc0	$2, C0_INTCTL
+
+	REG_L	$9, start_ctx_sr($9)
+
+	# Restore C0_STATUS IE and BEV to boot value
+	mtc0	$9, C0_SR
+	mtc0	$0, C0_CAUSE
+
+	# Return with exit code
+	move	$2, $4
+	jr.hb	$31
+	.set	pop
+WEND(__return_to_boot)
+
+#
+# FUNCTION:	int __chain_uhi_excpt (struct gpctx *ctx);
+#
+# DESCRIPTION: Call exception handler of the boot
+#
+WLEAF(__chain_uhi_excpt)
+	.set	push
+	.set	noat
+
+	# Move context pointer into position.  Use $3 as scratch
+	# as it is the only register that is clobbered by all
+	# UHI calls and is not used as an input.
+	move	$3, $4
+
+#if (__mips_isa_rev < 6)
+	REG_L	$9, CTX_HI0($3)
+	REG_L	$10, CTX_LO0($3)
+	mthi	$9
+	mtlo	$10
+#endif
+
+	lw	$9, CTX_STATUS($3)
+	mtc0	$9, C0_SR
+	REG_L	$9, CTX_EPC($3)
+	PTR_MTC0 $9, C0_EPC
+	ehb
+
+	# Restore the common context
+	REG_L	$1, CTX_REG(1)($3)
+	REG_L	$2, CTX_REG(2)($3)
+	REG_L	$4, CTX_REG(4)($3)
+	REG_L	$5, CTX_REG(5)($3)
+	REG_L	$6, CTX_REG(6)($3)
+	REG_L	$7, CTX_REG(7)($3)
+	REG_L	$8, CTX_REG(8)($3)
+	REG_L	$9, CTX_REG(9)($3)
+	REG_L	$10, CTX_REG(10)($3)
+	REG_L	$11, CTX_REG(11)($3)
+	REG_L	$12, CTX_REG(12)($3)
+	REG_L	$13, CTX_REG(13)($3)
+	REG_L	$14, CTX_REG(14)($3)
+	REG_L	$15, CTX_REG(15)($3)
+	REG_L	$16, CTX_REG(16)($3)
+	REG_L	$17, CTX_REG(17)($3)
+	REG_L	$18, CTX_REG(18)($3)
+	REG_L	$19, CTX_REG(19)($3)
+	REG_L	$20, CTX_REG(20)($3)
+	REG_L	$21, CTX_REG(21)($3)
+	REG_L	$22, CTX_REG(22)($3)
+	REG_L	$23, CTX_REG(23)($3)
+	REG_L	$24, CTX_REG(24)($3)
+	REG_L	$25, CTX_REG(25)($3)
+	REG_L	$28, CTX_REG(28)($3)
+	REG_L	$29, CTX_REG(29)($3)
+	REG_L	$30, CTX_REG(30)($3)
+	REG_L	$31, CTX_REG(31)($3)
+
+	# Restore chained exception handlers kernel regs
+	LA	$3, __start_ctx
+	REG_L	$26, start_ctx_k0($3)
+	REG_L	$27, start_ctx_k1($3)
+
+#if defined (__mips_micromips)
+	# OR the address with Config3.ISAOnExc bit
+	REG_L	$3, start_ctx_conf3($3)
+	srl	$3, $3, 16
+	andi	$3, $3, 1
+	beqz	$3, 1f
+
+	# Compute exception vector
+	LA	$3, __start_ctx
+	REG_L	$3, chain_ebase($3)
+	PTR_ADDU $3, $3, 0x181		# OR ISAOnExc bit
+
+	# Chain
+	jr	$3
+1:
+	# Compute exception vector
+	LA	$3, __start_ctx
+#endif
+
+	REG_L	$3, chain_ebase($3)
+	PTR_ADDU $3, $3, 0x180
+
+	# Chain
+	jr	$3
+
+	.set	pop
+WEND(__chain_uhi_excpt)
+
+#
+# FUNCTION:	int __get_startup_BEV (void)
+#
+# DESCRIPTION: Return value of BEV flag saved in
+#	       __register_excpt_handler.
+#
+WLEAF(__get_startup_BEV)
+	.set	push
+	.set	noat
+
+	LA	$2, __start_ctx
+	REG_L	$2, start_ctx_sr($2)
+	lui	$3, %hi(SR_BEV)
+	and	$2, $2, $3
+	jr	$31
+
+	.set	pop
+WEND(__get_startup_BEV)
+
+
+EXPORTS(__MIPS_UHI_BAD_POINTER, 32)
+	.ascii "UHI: BAD POINTER\000"
+
+#
+# FUNCTION: __convert_argv_pointers
+#
+# DESCRIPTION: Convert 64bit pointers to 32bit.
+#
+#if _MIPS_SIM==_ABIO32 || _MIPS_SIM==_ABIN32
+WLEAF(__convert_argv_pointers)
+	/* Early out if a0 <= 0 */
+	blez	a0, .Lend
+
+	/* Verify we came from 64-bit mode */
+	LA      t0, __start_ctx
+	REG_L   t0, start_ctx_sr(t0)
+	ext	t1, t0, SR_KX_SHIFT, 1
+	beqz	t1, .Lend
+
+	/* Set up stack pointer */
+	move	t0, a0
+	sll	t1, t0, 2
+	addiu   t1, t1, ALSZ            /* Round to stack alignment */
+	and     t1, t1, ALMASK
+
+	PTR_SUBU sp, sp, t1
+	move	t2, sp
+	move	t3, a1
+	li	t1, -1
+
+.Lloop:
+#if BYTE_ORDER == LITTLE_ENDIAN
+	lw	t8, 0(t3)
+	lw	t9, 4(t3)
+#elif BYTE_ORDER == BIG_ENDIAN
+	lw	t9, 0(t3)
+	lw	t8, 4(t3)
+#else
+#error BYTE_ORDER
+#endif
+	/* if s1 != 0 && s1 != 0xFFFFFFFF */
+	beqz	t9, .LGoodp
+	beq	t9, t1, .LGoodp
+	/* Overwrite bad pointer with stock bad value */
+	LA	t8, __MIPS_UHI_BAD_POINTER
+.LGoodp:
+	sw	t8, 0(t2)
+
+	PTR_ADDU t2, t2, 4
+	PTR_ADDU t3, t3, 8
+	addiu	t0, t0, -1
+	bnez	t0, .Lloop
+
+	move	a1, sp
+	PTR_SUBU sp, sp, (NARGSAVE*SZARG)
+
+	move	a2, zero
+.Lend:
+	jr	ra
+WEND(__convert_argv_pointers)
+#endif /* ABI TEST */
diff --git a/hw/mips-hal/src/arch/mips/mips_excpt_entry.S b/hw/mips-hal/src/arch/mips/mips_excpt_entry.S
new file mode 100644
index 000000000..a81e5a1d4
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_excpt_entry.S
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+# Keep each function in a separate named section
+#define _FUNCTION_SECTIONS_
+.set nomips16
+
+#include <mips/asm.h>
+#include <mips/regdef.h>
+#include <mips/cpu.h>
+#include <mips/hal.h>
+
+# Context size, adjusted for ABI parameter area
+#define ADJ (NARGSAVE * SZARG)
+# Round up to 16-byte boundary (maximum stack alignment required for any
+# supported ABI)
+#define CTX_SIZEROUND ((CTX_SIZE + ALSZ) & ALMASK)
+#define CTX_SIZEADJ (CTX_SIZEROUND + ADJ)
+
+#define e_ISR	s1
+#define e_CR	s3
+#define e_BADV	s4
+#define e_SR	s5
+#define e_EPC	s6
+#define e_RA	s7
+
+# DESCRIPTION: Exception entry point. This is small because it must go at
+#			   EBASE+0x180. It saves enough context to chain onwards to
+#			   __exception_save.
+#
+LEAF(__exception_entry)
+	.set	push
+	.set	noat
+.weak   _mips_tlb_refill
+	_mips_tlb_refill = __exception_save
+__tlb_refill_loop:
+	# Support an alternative entry point at the start of the exception
+	# vector.  Since the exception vector is normally placed first
+	# in the link map this allows a user to start execution from the
+	# same address that an executable is loaded to.
+	LA	k1, __first_boot
+	lw	k1, 0(k1)
+	beqz	k1, 1f
+	# The start code is responsible for clearing __first_boot prior
+	# to installing the exception handlers.
+	j	_start0
+1:
+	LA	k1, _mips_tlb_refill
+	beqz	k1, __tlb_refill_loop
+	jr	k1
+
+	.org 0x80
+.weak   _mips_xtlb_refill
+	_mips_xtlb_refill = __exception_save
+__xtlb_refill_loop:
+	LA	k1, _mips_xtlb_refill
+	beqz	k1, __xtlb_refill_loop
+	jr	k1
+
+	.org 0x100
+.weak   _mips_cache_error
+__cache_error_loop:
+	LA	k1, _mips_cache_error
+	beqz	k1, __cache_error_loop
+	jr	k1
+
+	.org 0x180
+	# Free up k1, defering sp adjustment until later
+	REG_S	k1, (-CTX_SIZEROUND + CTX_K1)(sp)
+
+	# Use k1 to invoke __exception_save
+	LA	k1, _mips_general_exception
+	jr	k1
+	.set    pop
+END(__exception_entry)
+
+#
+# FUNCTION:	__exception_save
+#
+# DESCRIPTION: Exception context save. Save the context, then fake up a call
+#              frame.
+#
+ANESTED(__exception_save, _mips_general_exception, CTX_SIZEADJ, zero)
+	.globl  __exception_save;
+	.set	push
+	.set	noat
+
+	# k1 is already saved, so use it to save the users sp
+	move	k1, sp
+	# Finally adjust sp
+	PTR_ADDU sp, sp, -CTX_SIZEADJ	# This should be picked up by the backtracer
+
+	# Save context
+	REG_S	$1, CTX_REG(1) + ADJ(sp)
+	REG_S	$2, CTX_REG(2) + ADJ(sp)
+	REG_S	$3, CTX_REG(3) + ADJ(sp)
+	REG_S	$4, CTX_REG(4) + ADJ(sp)
+	REG_S	$5, CTX_REG(5) + ADJ(sp)
+	REG_S	$6, CTX_REG(6) + ADJ(sp)
+	REG_S	$7, CTX_REG(7) + ADJ(sp)
+	REG_S	$8, CTX_REG(8) + ADJ(sp)
+	REG_S	$9, CTX_REG(9) + ADJ(sp)
+	REG_S	$10, CTX_REG(10) + ADJ(sp)
+	REG_S	$11, CTX_REG(11) + ADJ(sp)
+	REG_S	$12, CTX_REG(12) + ADJ(sp)
+	REG_S	$13, CTX_REG(13) + ADJ(sp)
+	REG_S	$14, CTX_REG(14) + ADJ(sp)
+	REG_S	$15, CTX_REG(15) + ADJ(sp)
+	REG_S	$16, CTX_REG(16) + ADJ(sp)
+	REG_S	$17, CTX_REG(17) + ADJ(sp)
+	REG_S	$18, CTX_REG(18) + ADJ(sp)
+	REG_S	$19, CTX_REG(19) + ADJ(sp)
+	REG_S	$20, CTX_REG(20) + ADJ(sp)
+	REG_S	$21, CTX_REG(21) + ADJ(sp)
+	REG_S	$22, CTX_REG(22) + ADJ(sp)
+	REG_S	$23, CTX_REG(23) + ADJ(sp)
+	REG_S	$24, CTX_REG(24) + ADJ(sp)
+	REG_S	$25, CTX_REG(25) + ADJ(sp)
+	REG_S	$26, CTX_REG(26) + ADJ(sp)
+	# k1/$27 has already been saved
+	REG_S	$28, CTX_REG(28) + ADJ(sp)
+	REG_S	k1, CTX_REG(29) + ADJ(sp) # Use saved sp from earlier
+	REG_S	$30, CTX_REG(30) + ADJ(sp)
+	REG_S	$31, CTX_REG(31) + ADJ(sp)
+	PTR_S	$0, CTX_LINK + ADJ(sp) # Clear the link field
+
+#if (__mips_isa_rev < 6)
+	mfhi	$9
+	mflo	$10
+	REG_S	$9, CTX_HI0 + ADJ(sp)
+	REG_S	$10, CTX_LO0 + ADJ(sp)
+#endif
+
+	# Trick the backtracer into stepping back to the point where the exception
+	# occurred.
+	PTR_MFC0 ra, C0_EPC
+	mfc0	e_CR, C0_CR
+	REG_S	ra, CTX_EPC + ADJ(sp)
+
+	# Finish storing the rest of the CP0 registers
+	PTR_MFC0 $9, C0_BADVADDR
+	REG_S	$9, CTX_BADVADDR + ADJ(sp)
+	sw	e_CR, CTX_CAUSE + ADJ(sp)
+
+	move	$11, $0
+	move	$12, $0
+	mfc0	$9, C0_CONFIG3
+	ext	$10, $9, CFG3_BP_SHIFT, 1
+	beqz	$10, 1f
+	mfc0	$11, C0_BADPINSTR
+1:
+	ext	$9, $9, CFG3_BI_SHIFT, 1
+	beqz	$9, 1f
+	mfc0	$12, C0_BADINSTR
+1:
+	sw	$11, CTX_BADPINSTR + ADJ(sp)
+	sw	$12, CTX_BADINSTR + ADJ(sp)
+
+	# Start computing the address of the context for a0
+	move	a0, sp
+
+	# Clear EXL.  Exceptions can now nest.
+	mfc0	e_SR, C0_SR
+	sw	e_SR, CTX_STATUS + ADJ(sp)
+	lui	$9, %hi(~SR_EXL)
+	addiu	$9, $9, %lo(~SR_EXL)
+	and	e_SR, e_SR, $9
+	mtc0	e_SR, C0_SR
+
+	# Manually set up the return address to restore the context below
+	LA	ra, 1f
+	# Extract the cause code
+	and	a1, e_CR, CR_XMASK
+
+	# Finish computing the address of the context for a0
+	addiu	a0, a0, ADJ
+
+	# Shift exception number down into expected range
+	srl	a1, a1, 2
+
+	# Call the handler, indirect through t9 albeit not for any specific
+	# reason
+	LA	t9, _mips_handle_exception
+	jr	t9
+
+1:	# Return point from handler
+	# Load context
+
+#if (__mips_isa_rev < 6)
+	REG_L	$9, CTX_HI0 + ADJ(sp)
+	REG_L	$10, CTX_LO0 + ADJ(sp)
+	mthi	$9
+	mtlo	$10
+#endif
+
+	REG_L	$1, CTX_REG(1) + ADJ(sp)
+	REG_L	$2, CTX_REG(2) + ADJ(sp)
+	REG_L	$3, CTX_REG(3) + ADJ(sp)
+	REG_L	$4, CTX_REG(4) + ADJ(sp)
+	REG_L	$5, CTX_REG(5) + ADJ(sp)
+	REG_L	$6, CTX_REG(6) + ADJ(sp)
+	REG_L	$7, CTX_REG(7) + ADJ(sp)
+	REG_L	$8, CTX_REG(8) + ADJ(sp)
+	REG_L	$9, CTX_REG(9) + ADJ(sp)
+	REG_L	$10, CTX_REG(10) + ADJ(sp)
+	REG_L	$11, CTX_REG(11) + ADJ(sp)
+	REG_L	$12, CTX_REG(12) + ADJ(sp)
+	REG_L	$13, CTX_REG(13) + ADJ(sp)
+	REG_L	$14, CTX_REG(14) + ADJ(sp)
+	REG_L	$15, CTX_REG(15) + ADJ(sp)
+	REG_L	$16, CTX_REG(16) + ADJ(sp)
+	REG_L	$17, CTX_REG(17) + ADJ(sp)
+	REG_L	$18, CTX_REG(18) + ADJ(sp)
+	REG_L	$19, CTX_REG(19) + ADJ(sp)
+	REG_L	$20, CTX_REG(20) + ADJ(sp)
+	REG_L	$21, CTX_REG(21) + ADJ(sp)
+	REG_L	$22, CTX_REG(22) + ADJ(sp)
+	REG_L	$23, CTX_REG(23) + ADJ(sp)
+	REG_L	$24, CTX_REG(24) + ADJ(sp)
+	REG_L	$25, CTX_REG(25) + ADJ(sp)
+	# $26/K0 and $27/K1 are restored with interrupts disabled
+	REG_L	$28, CTX_REG(28) + ADJ(sp)
+	# $29/SP is restored last
+	REG_L	$30, CTX_REG(30) + ADJ(sp)
+	REG_L	$31, CTX_REG(31) + ADJ(sp)
+	di
+	lw	k0, CTX_STATUS + ADJ(sp)
+	REG_L	k1, CTX_EPC + ADJ(sp)
+	mtc0	k0, C0_SR
+	PTR_MTC0 k1, C0_EPC
+	ehb
+	REG_L	k0, CTX_K0 + ADJ(sp)
+	REG_L	k1, CTX_K1 + ADJ(sp)
+	REG_L	sp, CTX_SP + ADJ(sp)
+	# Return from exception
+	eret
+	.set	pop
+END(__exception_save)
diff --git a/hw/mips-hal/src/arch/mips/mips_excpt_handler.c b/hw/mips-hal/src/arch/mips/mips_excpt_handler.c
new file mode 100644
index 000000000..ed5408cdc
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_excpt_handler.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <mips/cpu.h>
+#include <mips/fpa.h>
+#include <mips/hal.h>
+#include <mips/uhi_syscalls.h>
+
+/* Defined in .ld file */
+extern char __use_excpt_boot[];
+extern char __attribute__((weak)) __flush_to_zero[];
+
+#ifdef VERBOSE_EXCEPTIONS
+/*
+ * Write a string, a formatted number, then a string.
+ */
+static void
+putsnds (const char *pre, reg_t value, int digits, const char *post)
+{
+  char buf[digits];
+  int shift;
+  int idx = 0;
+
+  if (pre != NULL)
+    write (1, pre, strlen (pre));
+
+  for (shift = ((digits - 1) * 4) ; shift >= 0 ; shift -= 4)
+    buf[idx++] = "0123456789ABCDEF"[(value >> shift) & 0xf];
+  write (1, buf, digits);
+
+  if (post != NULL)
+    write (1, post, strlen (post));
+}
+
+static void
+putsns (const char *pre, reg_t value, const char *post)
+{
+  putsnds (pre, value, sizeof (reg_t) * 2, post);
+}
+
+# define WRITE(MSG) write (1, (MSG), strlen (MSG))
+# define PUTSNDS(PRE, VALUE, DIGITS, POST) \
+    putsnds ((PRE), (VALUE), (DIGITS), (POST))
+# define PUTSNS(PRE, VALUE, POST) \
+    putsns ((PRE), (VALUE), (POST))
+
+#else
+
+# define WRITE(MSG)
+# define PUTSNDS(PRE, VALUE, DIGITS, POST)
+# define PUTSNS(PRE, VALUE, POST)
+
+#endif // !VERBOSE_EXCEPTIONS
+
+/* Handle an exception */
+#ifdef VERBOSE_EXCEPTIONS
+void __attribute__((nomips16))
+__exception_handle_verbose (struct gpctx *ctx, int exception)
+#else
+void __attribute__((nomips16))
+__exception_handle_quiet (struct gpctx *ctx, int exception)
+#endif
+{
+  switch (exception)
+    {
+   case EXC_MOD:
+      WRITE ("TLB modification exception\n");
+      break;
+    case EXC_TLBL:
+      PUTSNS ("TLB error on load from 0x", ctx->badvaddr, NULL);
+      PUTSNS (" @0x", ctx->epc, "\n");
+      break;
+    case EXC_TLBS:
+      PUTSNS ("TLB error on store to 0x", ctx->badvaddr, NULL);
+      PUTSNS (" @0x", ctx->epc, "\n");
+      break;
+    case EXC_ADEL:
+      PUTSNS ("Address error on load from 0x", ctx->badvaddr, NULL);
+      PUTSNS (" @0x", ctx->epc, "\n");
+      break;
+    case EXC_ADES:
+      PUTSNS ("Address error on store to 0x", ctx->badvaddr, NULL);
+      PUTSNS (" @0x", ctx->epc, "\n");
+      break;
+    case EXC_IBE:
+      WRITE ("Instruction bus error\n");
+      break;
+    case EXC_DBE:
+      WRITE ("Data bus error\n");
+      break;
+    case EXC_SYS:
+      /* Process a UHI SYSCALL, all other SYSCALLs should have been processed
+	 by our caller.  __use_excpt_boot has following values:
+	 0 = Do not use exception handler present in boot.
+	 1 = Use exception handler present in boot if BEV
+	     is 0 at startup.
+	 2 = Always use exception handler present in boot.   */
+
+      /* Special handling for boot/low level failures.  */
+      if (ctx->t2[1] == __MIPS_UHI_BOOTFAIL)
+	{
+	  switch (ctx->a[0])
+	    {
+	    case __MIPS_UHI_BF_CACHE:
+	      WRITE ("L2 cache configuration error\n");
+	      break;
+	    default:
+	      WRITE ("Unknown boot failure error\n");
+	      break;
+	    }
+
+	  /* These are unrecoverable.  Abort.  */
+	  ctx->epc = (sreg_t)(long)&__exit;
+	  /* Exit code of 255 */
+	  ctx->a[0] = 0xff;
+	  return;
+	}
+
+      if (((long) __use_excpt_boot == 2
+	   || ((long) __use_excpt_boot == 1
+	       && __get_startup_BEV
+	       && __get_startup_BEV () == 0))
+	  && __chain_uhi_excpt)
+	/* This will not return.  */
+	__chain_uhi_excpt (ctx);
+      else
+	__uhi_indirect (ctx);
+      return;
+    case EXC_BP:
+      PUTSNS ("Breakpoint @0x", ctx->epc, "\n");
+      break;
+    case EXC_RI:
+      PUTSNS ("Illegal instruction @0x", ctx->epc, "\n");
+      break;
+    case EXC_CPU:
+      PUTSNS ("Coprocessor unusable @0x", ctx->epc, "\n");
+      break;
+    case EXC_OVF:
+      WRITE ("Overflow\n");
+      break;
+    case EXC_TRAP:
+      WRITE ("Trap\n");
+      break;
+    case EXC_MSAFPE:
+#if !(__mips_isa_rev == 6 && defined (__mips_micromips))
+      if (__flush_to_zero
+	  && (msa_getsr () & FPA_CSR_UNI_X)
+	  && (msa_getsr () & FPA_CSR_FS) == 0)
+	{
+	  unsigned int sr = msa_getsr ();
+	  sr &= ~FPA_CSR_UNI_X;
+	  sr |= FPA_CSR_FS;
+	  msa_setsr (sr);
+	  return;
+	}
+#endif
+      WRITE ("MSA Floating point error\n");
+      break;
+    case EXC_FPE:
+      /* Turn on flush to zero the first time we hit an unimplemented
+	 operation.  If we hit it again then stop.  */
+      if (__flush_to_zero
+	  && (fpa_getsr () & FPA_CSR_UNI_X)
+	  && (fpa_getsr () & FPA_CSR_FS) == 0)
+	{
+	  unsigned int sr = fpa_getsr ();
+	  sr &= ~FPA_CSR_UNI_X;
+	  sr |= FPA_CSR_FS;
+	  fpa_setsr (sr);
+
+	  return;
+	}
+      WRITE ("Floating point error\n");
+      break;
+    case EXC_IS1:
+      WRITE ("Implementation specific exception (16)\n");
+      break;
+    case EXC_IS2:
+      WRITE ("Implementation specific exception (17)\n");
+      break;
+    case EXC_C2E:
+      WRITE ("Precise Coprocessor 2 exception\n");
+      break;
+    case EXC_TLBRI:
+      WRITE ("TLB read inhibit exception\n");
+      break;
+    case EXC_TLBXI:
+      WRITE ("TLB execute inhibit exception\n");
+      break;
+    case EXC_MSAU:
+      PUTSNS ("MSA unusable @0x", ctx->epc, "\n");
+      break;
+    case EXC_MDMX:
+      PUTSNS ("MDMX exception @0x", ctx->epc, "\n");
+      break;
+    case EXC_WATCH:
+      PUTSNS ("Watchpoint @0x", ctx->epc, "\n");
+      break;
+    case EXC_MCHECK:
+      WRITE ("Machine check error\n");
+      break;
+    case EXC_THREAD:
+      WRITE ("Thread exception\n");
+      break;
+    case EXC_DSPU:
+      WRITE ("DSP unusable\n");
+      break;
+    case EXC_RES30:
+      WRITE ("Cache error\n");
+      break;
+    default:
+      PUTSNS ("Unhandled exception ", exception, "\n");
+    }
+
+  /* Dump registers */
+  PUTSNS (" 0:\t", 0, "\t");
+  PUTSNS ("at:\t", ctx->at, "\t");
+  PUTSNS ("v0:\t", ctx->v[0], "\t");
+  PUTSNS ("v1:\t", ctx->v[1], "\n");
+
+  PUTSNS ("a0:\t", ctx->a[0], "\t");
+  PUTSNS ("a1:\t", ctx->a[1], "\t");
+  PUTSNS ("a2:\t", ctx->a[2], "\t");
+  PUTSNS ("a3:\t", ctx->a[3], "\n");
+
+  PUTSNS ("t0:\t", ctx->t[0], "\t");
+  PUTSNS ("t1:\t", ctx->t[1], "\t");
+  PUTSNS ("t2:\t", ctx->t[2], "\t");
+  PUTSNS ("t3:\t", ctx->t[3], "\n");
+
+  PUTSNS ("t4:\t", ctx->t[4], "\t");
+  PUTSNS ("t5:\t", ctx->t[5], "\t");
+  PUTSNS ("t6:\t", ctx->t[6], "\t");
+  PUTSNS ("t7:\t", ctx->t[7], "\n");
+
+  PUTSNS ("s0:\t", ctx->s[0], "\t");
+  PUTSNS ("s1:\t", ctx->s[1], "\t");
+  PUTSNS ("s2:\t", ctx->s[2], "\t");
+  PUTSNS ("s3:\t", ctx->s[3], "\n");
+
+  PUTSNS ("s4:\t", ctx->s[4], "\t");
+  PUTSNS ("s5:\t", ctx->s[5], "\t");
+  PUTSNS ("s6:\t", ctx->s[6], "\t");
+  PUTSNS ("s7:\t", ctx->s[7], "\n");
+
+  PUTSNS ("t8:\t", ctx->t2[0], "\t");
+  PUTSNS ("t9:\t", ctx->t2[1], "\t");
+  PUTSNS ("k0:\t", ctx->k[0], "\t");
+  PUTSNS ("k1:\t", ctx->k[1], "\n");
+
+  PUTSNS ("gp:\t", ctx->gp, "\t");
+  PUTSNS ("sp:\t", ctx->sp, "\t");
+  PUTSNS ("fp:\t", ctx->fp, "\t");
+  PUTSNS ("ra:\t", ctx->ra, "\n");
+
+#if __mips_isa_rev < 6
+  PUTSNS ("hi:\t", ctx->hi, "\t");
+  PUTSNS ("lo:\t", ctx->lo, "\n");
+#endif
+
+  PUTSNS ("epc:     \t", ctx->epc, "\n");
+  PUTSNS ("BadVAddr:\t", ctx->badvaddr, "\n");
+
+  PUTSNDS ("Status:   \t", ctx->status, 8, "\n");
+  PUTSNDS ("Cause:    \t", ctx->cause, 8, "\n");
+  PUTSNDS ("BadInstr: \t", ctx->badinstr, 8, "\n");
+  PUTSNDS ("BadPInstr:\t", ctx->badpinstr, 8, "\n");
+
+  /* Raise UHI exception which may or may not return.  */
+  if (__uhi_exception (ctx, UHI_ABI) != 0)
+    {
+      /* The exception was acknowledged but not handled.  Abort.  */
+      ctx->epc = (sreg_t)(long)&__exit;
+      /* Exit code of 255 */
+      ctx->a[0] = 0xff;
+    }
+}
diff --git a/hw/mips-hal/src/arch/mips/mips_excpt_isr.S b/hw/mips-hal/src/arch/mips/mips_excpt_isr.S
new file mode 100644
index 000000000..29c598271
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_excpt_isr.S
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#
+# Keep each function in a separate named section
+#define _FUNCTION_SECTIONS_
+.set nomips16
+
+#include <mips/regdef.h>
+#include <mips/asm.h>
+#include <mips/cpu.h>
+
+#define VEC_SPACE (SZPTR * 8)
+
+LEAF(__isr_vec)
+	.set	push
+	.set	noat
+AENT(__isr_vec_sw0)
+.weak   _mips_isr_sw0
+	LA	k1, _mips_isr_sw0
+	beqz	k1, 1f
+	jr	k1
+.org	VEC_SPACE
+AENT(__isr_vec_sw1)
+.weak   _mips_isr_sw1
+	LA	k1, _mips_isr_sw1
+	beqz	k1, 1f
+	jr	k1
+.org	2 * VEC_SPACE
+AENT(__isr_vec_hw0)
+.weak   _mips_isr_hw0
+	LA	k1, _mips_isr_hw0
+	beqz	k1, 1f
+	jr	k1
+.org	3 * VEC_SPACE
+AENT(__isr_vec_hw1)
+.weak   _mips_isr_hw1
+	LA	k1, _mips_isr_hw1
+	beqz	k1, 1f
+	jr	k1
+.org	4 * VEC_SPACE
+AENT(__isr_vec_hw2)
+.weak   _mips_isr_hw2
+	LA	k1, _mips_isr_hw2
+	beqz	k1, 1f
+	jr	k1
+.org	5 * VEC_SPACE
+AENT(__isr_vec_hw3)
+.weak   _mips_isr_hw3
+	LA	k1, _mips_isr_hw3
+	beqz	k1, 1f
+	jr	k1
+.org	6 * VEC_SPACE
+AENT(__isr_vec_hw4)
+.weak   _mips_isr_hw4
+	LA	k1, _mips_isr_hw4
+	beqz	k1, 1f
+	jr	k1
+.org	7 * VEC_SPACE
+AENT(__isr_vec_hw5)
+.weak   _mips_isr_hw5
+	LA	k1, _mips_isr_hw5
+	beqz	k1, 1f
+	jr	k1
+.org	8 * VEC_SPACE
+AENT(__isr_vec_fallback)
+.weak   _mips_interrupt
+1:
+	LA      k1, _mips_interrupt
+	beqz    k1, 1b
+	jr      k1
+	.set    pop
+END(__isr_vec)
diff --git a/hw/mips-hal/src/arch/mips/mips_excpt_register.S b/hw/mips-hal/src/arch/mips/mips_excpt_register.S
new file mode 100644
index 000000000..499243947
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_excpt_register.S
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2014-2015, Imagination Technologies Limited and/or its
+ *                      affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include <mips/regdef.h>
+#include <mips/asm.h>
+#include <mips/cpu.h>
+#include <mips/hal.h>
+
+# Used to support an alternate entry point that overlays the TLB refill
+# exception entry point.  This flag must be cleared before exceptions
+# are ready to be handled.
+.data
+EXPORTS(__first_boot, 4)
+	.word	0x1
+
+_TEXT_SECTION
+
+#
+# FUNCTION:	__register_excpt_handler
+#
+# DESCRIPTION: Register __exception_entry at EBASE+0x180. Return the new
+#	       value for C0_SR.
+#
+WLEAF(__register_excpt_handler)
+	.set	push
+	.set	noat
+
+	# Fetch initial status
+	mfc0	$5, C0_SR
+
+	# Get into a sane state.
+	# Important things: base mode is kernel and ERL, ESL, IE are clear
+	# Set BEV=1 to allow changing EBASE later
+	lui	$10, %hi(SR_BEV)
+	mtc0	$10, C0_SR
+	ehb
+
+	# Enable use of a boot state hook
+	# $4 = Boot time RA
+	# $5 = Boot time SR
+	# $6 = Current RA. There is no stack so get the callee to pass this
+	#      back.
+.weak	__register_excpt_boot
+	LA	$9, __register_excpt_boot
+	beqz	$9, 1f
+	move	$6, ra
+	jalr	$9
+	move	ra, $2
+1:
+	# Clear first boot flag
+	LA	$9, __first_boot
+	sw	$0, 0($9)
+
+	mfc0	$12, C0_CONFIG3
+#if defined (__mips_micromips)
+	# Set Config3.ISAOnExc for micromips
+	lui	$4, 1			/* 0x10000 */
+	or	$4, $4, $12
+	mtc0	$4, C0_CONFIG3
+#endif
+
+	# Set desired EBASE
+	LA	$10, __excpt_ebase
+	# Always set the write gate as the requested EBASE may not be in kseg0.
+	# This may or may not exist in hardware but if it doesn't then the
+	# ebase address will simply get masked with inevitable consequences.
+	ori	$10, $10, EBASE_WG
+	PTR_MTC0 $10, C0_EBASE
+	ehb
+
+	# Set up new empty status value
+	move	$2, $0
+
+	# Set up vector spacing
+	LA	$9, __isr_vec_space
+
+	# Check for vectored interrupt support
+	ext	$10, $12, CFG3_VI_SHIFT, 1
+	ext     $11, $12, CFG3_VEIC_SHIFT, 1
+	or	$10, $10, $11
+	# Skip vector spacing setup if neither VINT nor VEIC is present
+	beqz	$10, 1f
+
+	# Set vector spacing
+	mfc0	$10, C0_INTCTL
+	ins	$10, $9, 0, 10
+	mtc0	$10, C0_INTCTL
+	b	2f
+1:
+	# Check non-zero vector spacing without vectored interrupt support.
+	# If so, do not enable interrupts.
+	bnez	$9, 3f
+2:
+	# Turn on use of the special exception vector and enable interrupts
+	lui	$9, %hi(CR_IV)
+	mtc0	$9, C0_CAUSE
+	ehb
+
+	# Check for VEIC and do not enable interrupts if EIC is active
+	ext     $10, $12, CFG3_VEIC_SHIFT, 1
+	bnez	$10, 3f
+
+	# Enable interrupts in the new status value
+	ori	$2, $2, SR_IE
+3:
+	jr	$31
+
+	.set	pop
+WEND(__register_excpt_handler)
diff --git a/hw/mips-hal/src/arch/mips/mips_fp.S b/hw/mips-hal/src/arch/mips/mips_fp.S
new file mode 100644
index 000000000..1e989d4a9
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_fp.S
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2015, Imagination Technologies Limited and/or its
+ *                 affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.module hardfloat
+.module doublefloat
+.set nomips16
+#include <mips/asm.h>
+#include <mips/regdef.h>
+#include <mips/m32c0.h>
+#include <mips/hal.h>
+
+#undef fp
+
+#
+# FUNCTION:	_fpctx_save
+#
+# DESCRIPTION:	save floating point registers to memory starting at a0
+#
+# RETURNS:	int
+#			0:	No context saved
+#			CTX_*:	Type of context stored
+#
+LEAF(_fpctx_save)
+	PTR_S 	zero, LINKCTX_NEXT(a0)
+	mfc0	t0, C0_STATUS
+	li	t1, SR_CU1
+	and	t1, t0, t1
+	bnez	t1, 1f
+	# FP not enabled, bail out
+	move	v0, zero
+	jr	ra
+
+1:	# Save FP32 base
+	li	t1, SR_FR
+	and	t0, t0, t1
+	cfc1	t2, $31
+	REG_S	t2, FP32CTX_CSR(a0)
+	sdc1	$f0, FP32CTX_0(a0)
+	sdc1	$f2, FP32CTX_2(a0)
+	sdc1	$f4, FP32CTX_4(a0)
+	sdc1	$f6, FP32CTX_6(a0)
+	sdc1	$f8, FP32CTX_8(a0)
+	sdc1	$f10, FP32CTX_10(a0)
+	sdc1	$f12, FP32CTX_12(a0)
+	sdc1	$f14, FP32CTX_14(a0)
+	sdc1	$f16, FP32CTX_16(a0)
+	sdc1	$f18, FP32CTX_18(a0)
+	sdc1	$f20, FP32CTX_20(a0)
+	sdc1	$f22, FP32CTX_22(a0)
+	sdc1	$f24, FP32CTX_24(a0)
+	sdc1	$f26, FP32CTX_26(a0)
+	sdc1	$f28, FP32CTX_28(a0)
+	sdc1	$f30, FP32CTX_30(a0)
+	bnez	t0, 2f
+	li	v0, LINKCTX_TYPE_FP32
+	REG_S	v0, LINKCTX_ID(a0)
+	jr	ra
+
+2:	# Save FP64 extra
+.set	push
+.set	fp=64
+	sdc1	$f1, FP64CTX_1(a0)
+	sdc1	$f3, FP64CTX_3(a0)
+	sdc1	$f5, FP64CTX_5(a0)
+	sdc1	$f7, FP64CTX_7(a0)
+	sdc1	$f9, FP64CTX_9(a0)
+	sdc1	$f11, FP64CTX_11(a0)
+	sdc1	$f13, FP64CTX_13(a0)
+	sdc1	$f15, FP64CTX_15(a0)
+	sdc1	$f17, FP64CTX_17(a0)
+	sdc1	$f19, FP64CTX_19(a0)
+	sdc1	$f21, FP64CTX_21(a0)
+	sdc1	$f23, FP64CTX_23(a0)
+	sdc1	$f25, FP64CTX_25(a0)
+	sdc1	$f27, FP64CTX_27(a0)
+	sdc1	$f29, FP64CTX_29(a0)
+	sdc1	$f31, FP64CTX_31(a0)
+.set	pop
+	li	v0, LINKCTX_TYPE_FP64
+	REG_S	v0, LINKCTX_ID(a0)
+	jr	ra
+END(_fpctx_save)
+
+#
+# FUNCTION:	_fpctx_load
+#
+# DESCRIPTION:	load floating point registers from context chain starting at a0
+#
+# RETURNS:	int
+#			0:	Unrecognised context
+#			CTX_*:	Type of context restored
+#
+LEAF(_fpctx_load)
+	REG_L	v0, LINKCTX_ID(a0)
+	# Detect type
+	li	t0, LINKCTX_TYPE_FP64
+	li	t1, LINKCTX_TYPE_FP32
+	li	t2, SR_CU1
+	beq	v0, t0, 0f
+	beq	v0, t1, 1f
+	# Don't recognise this context, fail
+	move	v0, zero
+	jr	ra
+
+0: 	# FP64 context
+	# Enable CU1
+	di	t3
+	ehb
+	or	t3, t3, t2
+	mtc0	t3, C0_STATUS
+	ehb
+	# Load FP64 extra
+.set	push
+.set	fp=64
+	ldc1	$f1, FP64CTX_1(a0)
+	ldc1	$f3, FP64CTX_3(a0)
+	ldc1	$f5, FP64CTX_5(a0)
+	ldc1	$f7, FP64CTX_7(a0)
+	ldc1	$f9, FP64CTX_9(a0)
+	ldc1	$f11, FP64CTX_11(a0)
+	ldc1	$f13, FP64CTX_13(a0)
+	ldc1	$f15, FP64CTX_15(a0)
+	ldc1	$f17, FP64CTX_17(a0)
+	ldc1	$f19, FP64CTX_19(a0)
+	ldc1	$f21, FP64CTX_21(a0)
+	ldc1	$f23, FP64CTX_23(a0)
+	ldc1	$f25, FP64CTX_25(a0)
+	ldc1	$f27, FP64CTX_27(a0)
+	ldc1	$f29, FP64CTX_29(a0)
+	ldc1	$f31, FP64CTX_31(a0)
+.set	pop
+1: 	# FP32 context
+	# Enable CU1
+	di	t3
+	ehb
+	or	t3, t3, t2
+	mtc0	t3, C0_STATUS
+	ehb
+	# Load FP32 base
+	REG_L	t1, FP32CTX_CSR(a0)
+	ctc1	t1, $31
+	ldc1	$f0, FP32CTX_0(a0)
+	ldc1	$f2, FP32CTX_2(a0)
+	ldc1	$f4, FP32CTX_4(a0)
+	ldc1	$f6, FP32CTX_6(a0)
+	ldc1	$f8, FP32CTX_8(a0)
+	ldc1	$f10, FP32CTX_10(a0)
+	ldc1	$f12, FP32CTX_12(a0)
+	ldc1	$f14, FP32CTX_14(a0)
+	ldc1	$f16, FP32CTX_16(a0)
+	ldc1	$f18, FP32CTX_18(a0)
+	ldc1	$f20, FP32CTX_20(a0)
+	ldc1	$f22, FP32CTX_22(a0)
+	ldc1	$f24, FP32CTX_24(a0)
+	ldc1	$f26, FP32CTX_26(a0)
+	ldc1	$f28, FP32CTX_28(a0)
+	ldc1	$f30, FP32CTX_30(a0)
+	# Return CTX_FP32/64
+	jr	ra
+END(_fpctx_load)
diff --git a/hw/mips-hal/src/arch/mips/mips_l2size.S b/hw/mips-hal/src/arch/mips/mips_l2size.S
new file mode 100644
index 000000000..75f709ea7
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_l2size.S
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2015, Imagination Technologies Limited and/or its
+ *	           affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include "m32cache.h"
+
+/*
+ * static void __cache_size_hook()
+ *
+ * Internal routine to determine cache sizes by looking at config
+ * registers.  Sizing information is stored directly to memory.
+ *
+ * Do not use tmp3 (reg a1), tmp1 (reg v1) or tmp4 (a2) in this function.
+ */
+.global __def_cache_size_hook
+ALEAF(__def_cache_size_hook, __cache_size_hook)
+
+	# If we are operating with a coherency manager, abort.
+	# Check if we have config 5 register present
+	mfc0	tmp, C0_CONFIG3
+	ext	tmp, tmp, CFG3_M_SHIFT, 1
+	beqz	tmp, 2f
+
+	mfc0	tmp, C0_CONFIG4
+	ext	tmp, tmp, CFG4_M_SHIFT, 1
+	beqz	tmp, 2f
+
+	# Do we have a memory mapped L2 cache config?
+	mfc0	tmp, C0_CONFIG5
+	ext	tmp, tmp, CFG5_L2C_SHIFT, 1
+	beqz	tmp, 2f
+
+	# No CM3 code supplied but we have a memory mapped L2 config
+	# Report a Boot failure through UHI
+	li	t9, 23
+	# Reason - L2 cache config
+	li	a0, 1
+	# Syscall number
+	li	v0, 1
+	# Trigger the UHI operation
+	syscall	1
+	# Should never return
+1:
+	b	1b
+
+2:	mfc0	cfg, C0_CONFIG2
+
+	# Get scache line size (log2)
+	ext	tmp, cfg, CFG2_SL_SHIFT, CFG2_SL_BITS
+	beqz	tmp, 3f		# no s-cache
+	addiu	tmp, tmp, 1
+
+	# Get number of scache ways
+	ext	sways, cfg, CFG2_SA_SHIFT, CFG2_SA_BITS
+	addiu	sways, sways, 1
+	move	scachesize, sways
+
+	# Total scache size = lines/way * linesize * ways
+	li	slinesize, 1
+	sllv	slinesize, slinesize, tmp
+	sllv	scachesize, scachesize, tmp
+
+	# Get scache lines per way
+	ext	tmp, cfg, CFG2_SS_SHIFT, CFG2_SS_BITS
+	addiu	tmp, tmp, 6
+	sllv	scachesize, scachesize, tmp
+
+	sw	scachesize, mips_scache_size
+	sw	slinesize, mips_scache_linesize
+	sw	sways, mips_scache_ways
+3:
+	# Return
+	jr	ra
+END(__def_cache_size_hook)
diff --git a/hw/mips-hal/src/arch/mips/mips_msa.S b/hw/mips-hal/src/arch/mips/mips_msa.S
new file mode 100644
index 000000000..f67a13541
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_msa.S
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2015, Imagination Technologies Limited and/or its
+ *                 affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#if __mips_isa_rev < 6 || !defined(__mips_micromips)
+.module hardfloat
+.module doublefloat
+#undef fp
+.module fp=64
+.module msa
+
+.set nomips16
+#include <mips/asm.h>
+#include <mips/regdef.h>
+#include <mips/m32c0.h>
+#include <mips/hal.h>
+
+#
+# FUNCTION:	_msactx_save
+#
+# DESCRIPTION:	save MSA registers to memory starting at a0
+#
+# RETURNS:	int
+#			0:	No context saved
+#			CTX_*:	Type of context stored
+#
+LEAF(_msactx_save)
+	PTR_S	zero, LINKCTX_NEXT(a0)
+	mfc0	t0, C0_CONFIG5
+	ext	t0, t0, CFG5_MSAEN_SHIFT, 1
+	bnez	t0, 1f
+	# MSA not enabled, bail out
+	move	v0, zero
+	jr	ra
+
+	# Save FCSR if necessary
+1:	mfc0	t0, C0_STATUS
+	ext	t1, t0, SR_CU1_SHIFT, 1
+	lui	v0, %hi(LINKCTX_TYPE_MSA)
+	beqz	t1, 2f
+	lui	v0, %hi(LINKCTX_TYPE_FMSA)
+	cfc1	t2, $31
+	REG_S	t2, MSACTX_FCSR(a0)
+	# Save MSA
+2:	ori	v0, v0, %lo(LINKCTX_TYPE_MSA)
+	cfcmsa	t0, $1
+	REG_S	t0, MSACTX_MSACSR(a0)
+	st.d	 $w0, MSACTX_0(a0)
+	st.d	 $w1, MSACTX_1(a0)
+	st.d	 $w2, MSACTX_2(a0)
+	st.d	 $w3, MSACTX_3(a0)
+	st.d	 $w4, MSACTX_4(a0)
+	st.d	 $w5, MSACTX_5(a0)
+	st.d	 $w6, MSACTX_6(a0)
+	st.d	 $w7, MSACTX_7(a0)
+	st.d	 $w8, MSACTX_8(a0)
+	st.d	 $w9, MSACTX_9(a0)
+	st.d	$w10, MSACTX_10(a0)
+	st.d	$w11, MSACTX_11(a0)
+	st.d	$w12, MSACTX_12(a0)
+	st.d	$w13, MSACTX_13(a0)
+	st.d	$w14, MSACTX_14(a0)
+	st.d	$w15, MSACTX_15(a0)
+	st.d	$w16, MSACTX_16(a0)
+	st.d	$w17, MSACTX_17(a0)
+	st.d	$w18, MSACTX_18(a0)
+	st.d	$w19, MSACTX_19(a0)
+	st.d	$w20, MSACTX_20(a0)
+	st.d	$w21, MSACTX_21(a0)
+	st.d	$w22, MSACTX_22(a0)
+	st.d	$w23, MSACTX_23(a0)
+	st.d	$w24, MSACTX_24(a0)
+	st.d	$w25, MSACTX_25(a0)
+	st.d	$w26, MSACTX_26(a0)
+	st.d	$w27, MSACTX_27(a0)
+	st.d	$w28, MSACTX_28(a0)
+	st.d	$w29, MSACTX_29(a0)
+	st.d	$w30, MSACTX_30(a0)
+	st.d	$w31, MSACTX_31(a0)
+	REG_S	v0, LINKCTX_ID(a0)
+	jr	ra
+END(_msactx_save)
+
+#
+# FUNCTION:	_msactx_load
+#
+# DESCRIPTION:	load MSA/floating point registers from memory starting at a0
+#
+# RETURNS:	int
+#		0:	Unrecognised context
+#		CTX_*:	Type of context restored
+#
+LEAF(_msactx_load)
+	REG_L	v0, LINKCTX_ID(a0)
+	# Detect type
+	li	t0, LINKCTX_TYPE_FMSA
+	li	t1, LINKCTX_TYPE_MSA
+	li	t2, SR_CU1
+	beq	v0, t0, 0f
+	beq	v0, t1, 1f
+	# Don't recognise this context, fail
+	move	v0, zero
+	jr	ra
+
+0:	# FPU+MSA context
+	# Enable CU1
+	di	t3
+	ehb
+	or	t3, t3, t2
+	mtc0	t3, C0_STATUS
+	ehb
+	REG_L	t1, MSACTX_FCSR(a0)
+	ctc1	t1, $31
+1:  # MSA context
+	# Enable MSA
+	li	t3, CFG5_MSAEN
+	mfc0	t2, C0_CONFIG5
+	or	t2, t3, t2
+	mtc0	t2, C0_CONFIG5
+	ehb
+	# Load MSA
+	lw	t3, MSACTX_MSACSR(a0)
+	ctcmsa	$1, t3
+	ld.d	 $w0, MSACTX_0(a0)
+	ld.d	 $w1, MSACTX_1(a0)
+	ld.d	 $w2, MSACTX_2(a0)
+	ld.d	 $w3, MSACTX_3(a0)
+	ld.d	 $w4, MSACTX_4(a0)
+	ld.d	 $w5, MSACTX_5(a0)
+	ld.d	 $w6, MSACTX_6(a0)
+	ld.d	 $w7, MSACTX_7(a0)
+	ld.d	 $w8, MSACTX_8(a0)
+	ld.d	 $w9, MSACTX_9(a0)
+	ld.d	$w10, MSACTX_10(a0)
+	ld.d	$w11, MSACTX_11(a0)
+	ld.d	$w12, MSACTX_12(a0)
+	ld.d	$w13, MSACTX_13(a0)
+	ld.d	$w14, MSACTX_14(a0)
+	ld.d	$w15, MSACTX_15(a0)
+	ld.d	$w16, MSACTX_16(a0)
+	ld.d	$w17, MSACTX_17(a0)
+	ld.d	$w18, MSACTX_18(a0)
+	ld.d	$w19, MSACTX_19(a0)
+	ld.d	$w20, MSACTX_20(a0)
+	ld.d	$w21, MSACTX_21(a0)
+	ld.d	$w22, MSACTX_22(a0)
+	ld.d	$w23, MSACTX_23(a0)
+	ld.d	$w24, MSACTX_24(a0)
+	ld.d	$w25, MSACTX_25(a0)
+	ld.d	$w26, MSACTX_26(a0)
+	ld.d	$w27, MSACTX_27(a0)
+	ld.d	$w28, MSACTX_28(a0)
+	ld.d	$w29, MSACTX_29(a0)
+	ld.d	$w30, MSACTX_30(a0)
+	ld.w	$w31, MSACTX_31(a0)
+	# Return CTX_(F)MSA
+	jr	ra
+END(_msactx_load)
+
+#endif // __mips_isa_rev < 6 || !defined(__micromips__)
diff --git a/hw/mips-hal/src/arch/mips/mips_xpa.S b/hw/mips-hal/src/arch/mips/mips_xpa.S
new file mode 100644
index 000000000..865dc8bf9
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mips_xpa.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015, Imagination Technologies Limited and/or its
+ *                 affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+.set nomips16
+#include <mips/asm.h>
+#include <mips/regdef.h>
+#include <mips/m32c0.h>
+#include <mips/hal.h>
+#include <mips/endian.h>
+
+#
+# FUNCTION:	_xpa_save
+#
+# DESCRIPTION:	save the XPA version of badvaddr.
+#
+# RETURNS:	int
+#
+#			0:	No context saved
+#			CTX_*:	Type of conext stored
+#
+LEAF(_xpa_save)
+	PTR_S	zero, LINKCTX_NEXT(a0)
+	# Test for LPA support
+	mfc0	t0, C0_CONFIG3
+	ext	t0, t0, CFG3_LPA_SHIFT, 1
+	beqz	t0, 1f
+	# Test for LPA enabled
+	mfc0	t0, C0_PAGEGRAIN
+	ext	t0, t0, PAGEGRAIN_ELPA_SHIFT, PAGEGRAIN_ELPA_BITS
+	bnez	t0, 2f
+
+	# LPA either unavailable or not enabled
+	# return 0
+1:	move	v0, zero
+	jr	ra
+
+2:	lui	v0, %hi(LINKCTX_TYPE_XPA)
+	addiu	v0, v0, %lo(LINKCTX_TYPE_XPA)
+	mfc0	t0, C0_BADVADDR
+	.set push
+	.set mips32r5
+	.set xpa
+	mfhc0	t1, C0_BADVADDR
+	.set pop
+#if BYTE_ORDER == BIG_ENDIAN
+	sw	t0, XPACTX_BADVADDR(a0)
+	sw	t1, (XPACTX_BADVADDR+4)(a0)
+#else /* BYTE ORDER == LITTLE_ENDIAN */
+	sw	t1, XPACTX_BADVADDR(a0)
+	sw	t0, (XPACTX_BADVADDR+4)(a0)
+#endif
+	REG_S	v0, LINKCTX_ID(a0)
+	jr ra
+END(_xpa_save)
diff --git a/hw/mips-hal/src/arch/mips/mxxtlb_ops.S b/hw/mips-hal/src/arch/mips/mxxtlb_ops.S
new file mode 100644
index 000000000..ad987667f
--- /dev/null
+++ b/hw/mips-hal/src/arch/mips/mxxtlb_ops.S
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2015, Imagination Technologies Limited and/or its
+ *                 affiliated group companies.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*
+ * mips_xxtlb_ops.S: Generic MIPS TLB support functions
+ *
+ */
+.set nomips16
+#include <mips/m32c0.h>
+#include <mips/asm.h>
+#include <mips/regdef.h>
+
+
+/*
+ * int m64_tlb_size();
+ * int mips_tlb_size();
+ *
+ * Return number of entries in TLB.
+ * Entries in v0, number of sets in v1.
+ * Must not use registers t8 or a3
+ */
+LEAF(mips_tlb_size)
+AENT(m64_tlb_size)
+	/* first see if we've got a TLB */
+	mfc0	t0, C0_CONFIG
+	mfc0	t1, C0_CONFIG1
+	move	v0, zero
+
+	ext	t0, t0, CFG0_MT_SHIFT, CFG0_MT_BITS
+	# No MMU test, 0 entries
+	beqz	t0, 9f
+
+	# Fixed Address Translation, 0 entries
+	li	t2, (CFG0_MT_FIXED >> CFG0_MT_SHIFT)
+	beq	t0, t2, 9f
+
+	# Block Address Translator, 0 entries
+	li	t2, (CFG0_MT_BAT >> CFG0_MT_SHIFT)
+	beq	t0, t2, 9f
+
+	# (D)TLB or not ?
+	andi	t2, t0, (CFG0_MT_TLB | CFG0_MT_DUAL) >> CFG0_MT_SHIFT
+	beqz	t2, 9f
+
+	# As per PRA, field holds No. of entries -1
+	# Standard TLBs and Dual TLBs have extension fields.
+	ext	v0, t1, CFG1_MMUS_SHIFT, CFG1_MMUS_BITS
+	addiu	v0, v0, 1
+
+	mfc0	t1, C0_CONFIG3
+	ext	t1, t1, CFG3_M_SHIFT, 1
+	beqz	t1, 9f
+
+	mfc0	t1, C0_CONFIG4
+#if __mips_isa_rev < 6
+	ext	t3, t1, CFG4_MMUED_SHIFT, CFG4_MMUED_BITS
+
+	li	t2, (CFG4_MMUED_FTLBVEXT >> CFG4_MMUED_SHIFT)
+	beq	t3, t2, 8f			# FTLB + VTLBExt
+
+	li	t2, (CFG4_MMUED_SIZEEXT >> CFG4_MMUED_SHIFT)
+	beq	t3, t2, 7f			# SizeExt for VTLBEXT
+
+	beqz	t3, 9f				# Reserved, nothing more to do
+
+	b	10f				# FTLB Size
+7:
+	ext	t3, t1, CFG4_MMUSE_SHIFT, CFG4_MMUSE_BITS
+	sll	t2, t3, CFG1_MMUS_BITS
+	addu	v0, v0, t2
+	b	9f
+#endif /* __mips_isa_rev < 6 */
+8:
+	ext	t2, t1, CFG4_VTLBSEXT_SHIFT, CFG4_VTLBSEXT_BITS
+	sll	t2, t2, CFG1_MMUS_BITS
+	addu	v0, v0, t2
+10:
+	# Skip FTLB size calc if Config MT != 4
+	li	t3, (CFG0_MT_DUAL >> CFG0_MT_SHIFT)
+	bne	t3, t0, 9f
+
+	# Ways
+	li	t2, 2
+	ext	t3, t1, CFG4_FTLBW_SHIFT, CFG4_FTLBW_BITS
+	addu	t2, t2, t3
+
+	# Sets per way
+	ext	t3, t1, CFG4_FTLBS_SHIFT, CFG4_FTLBS_BITS
+	li	v1, 1
+	sllv	v1, v1, t3
+
+	# Total sets
+	sllv	t2, t2, t3
+	addu	v0, v0, t2
+
+9:	jr	ra
+END(mips_tlb_size)
+
+/*
+ * void m64_tlbinval()
+ * void mips_tlbinval()
+ *
+ * Invalidate the TLB.
+ */
+LEAF(mips_tlbinvalall)
+AENT(m64_tlbinvalall)
+
+	mfc0	t0, C0_CONFIG
+	ext	t0, t0, CFG0_MT_SHIFT, CFG0_MT_BITS
+	# No MMU test, 0 entries
+	beqz	t0, 11f
+
+	# Fixed Address Translation, 0 entries
+	li	t2, (CFG0_MT_FIXED >> CFG0_MT_SHIFT)
+	beq	t0, t2, 11f
+
+	# Block Address Translator, 0 entries
+	li	t2, (CFG0_MT_BAT >> CFG0_MT_SHIFT)
+	beq	t0, t2, 11f
+
+	PTR_MTC0 zero, C0_ENTRYLO0
+	PTR_MTC0 zero, C0_ENTRYLO1
+	PTR_MTC0 zero, C0_PAGEMASK
+
+	// Fetch size & number of sets in v0, v1.
+	move	t8, ra
+	jal	mips_tlb_size
+	move	ra, t8
+
+	mfc0	t9, C0_CONFIG3
+	ext	t9, t9, CFG3_M_SHIFT, 1
+	beqz	t9, 9f
+
+	// If Config4[IE] = 0, use old method for invalidation
+	mfc0	t9, C0_CONFIG4
+	ext     t2, t9, CFG4_IE_SHIFT, CFG4_IE_BITS
+	beqz	t2, 9f
+
+	// If Config4[IE] = 1, EHINV loop.
+	li	t1, (CFG4_IE_EHINV >> CFG4_IE_SHIFT)
+	beq	t1, t2, 14f
+
+	// If Config[MT] = 1,  one instruction required
+	li	t0, (CFG0_MT_TLB >> CFG0_MT_SHIFT)
+	beq	t3, t0, 7f
+
+	// If Config[IE] = 3, one instruction required
+	li	t1, (CFG4_IE_INVALL >> CFG4_IE_SHIFT)
+	beq	t1, t2, 7f
+
+	// If Config4[IE] = 2, many instructions required
+	// No other options
+	b	8f
+
+7:	# TLB walk done by hardware, Config4[IE] = 3 or Config[MT] = 1
+	mtc0	zero, C0_INDEX
+	ehb
+	.set	push
+	.set	mips32r3
+	.set	eva
+	tlbinvf
+	.set	pop
+	b	11f
+
+8:	/* TLB walk done by software, Config4[IE] = 2, Config[MT] = 4
+	 *
+	 * one TLBINVF is executed with an index in VTLB range to
+	 * invalidate all VTLB entries.
+	 *
+	 * One TLBINVF is executed per FTLB set.
+	 *
+	 * We'll clean out the TLB by computing the Size of the VTLB
+	 * but not add the 1. This will give us a finger that points
+	 * at the last VTLB entry.
+	 */
+
+	# Clear VTLB
+	mtc0	zero, C0_INDEX
+	ehb
+	.set	push
+	.set	mips32r3
+	.set	eva
+	tlbinvf
+	.set	pop
+
+	# v0 contains number of TLB entries
+	# v1 contains number of sets per way
+	lui	t9, %hi(__tlb_stride_length)	# Fetch the tlb stride for
+	addiu	t9, %lo(__tlb_stride_length)	# stepping through FTLB sets.
+	mul	v1, v1, t9
+	subu	t2, v0, v1			# End pointer
+
+12:	subu	v0, v0, t9
+	mtc0	v0, C0_INDEX
+	ehb					# mtc0, hazard on tlbinvf
+	.set	push
+	.set	mips32r3
+	.set	eva
+	tlbinvf
+	.set	pop
+	bne	v0, t2, 12b
+
+	b	11f
+
+14:	/*
+	 * Config4[IE] = 1. EHINV supported, but not tlbinvf.
+	 *
+	 * Invalidate the TLB for R3 onwards by loading EHINV and writing to all
+	 * TLB entries.
+	 */
+
+	move	v1, zero
+	li	t1, C0_ENTRYHI_EHINV_MASK
+	mtc0	t1, C0_ENTRYHI
+15:
+	mtc0	v1, C0_INDEX
+	ehb					# mtc0, hazard on tlbwi
+
+	tlbwi
+	addiu	v1, v1, 1
+	bne	v0, v1, 15b
+
+	b	11f
+
+9:	# Perform a basic invalidation of the TLB for R1 onwards by loading
+	# 0x(FFFFFFFF)KSEG0_BASE into EntryHi and writing it into index 0
+	# incrementing by a pagesize, writing into index 1, etc.
+
+	# If large physical addressing is enabled, load 0xFFFFFFFF
+	# into the top half of EntryHi.
+	move	t0, zero			# t0 == 0 if XPA disabled
+	mfc0	t9, C0_CONFIG3			# or not present.
+	and	t9, t1, CFG3_LPA
+	beqz	t9, 10f
+
+	mfc0	t9, C0_PAGEGRAIN
+	ext	t9, t1, PAGEGRAIN_ELPA_SHIFT, PAGEGRAIN_ELPA_BITS
+	bnez	t9, 10f
+
+	li	t0, -1				# t0 == 0xFFFFFFFF if XPA
+						# is used.
+10:	li	t1, (KSEG0_BASE - 2<<13)
+
+	move	v1, zero
+12:	addiu	t1, t1, (2<<13)
+	PTR_MTC0 t1, C0_ENTRYHI
+
+	beqz	t0, 13f
+	.set	push
+	.set	xpa
+	mthc0	t0, C0_ENTRYHI		# Store 0xFFFFFFFF to upper half of EntryHI
+	.set	pop
+
+13:	ehb				# mtc0, hazard on tlbp
+
+	tlbp				# Probe for a match.
+	ehb				# tlbp, Hazard on mfc0
+
+	mfc0	t8, C0_INDEX
+	bgez	t8, 12b			# Skip this address if it exists.
+
+	mtc0	v1, C0_INDEX
+	ehb				# mtc0, hazard on tlbwi
+
+	tlbwi
+	addiu	v1, v1, 1
+	bne	v0, v1, 12b
+
+11:	PTR_MTC0 zero,C0_ENTRYHI	# Unset EntryHI, upper half is cleared
+					# autmatically as mtc0 writes zeroes
+	.set	push
+	.set	noreorder
+	jr.hb	ra
+	nop
+	.set	pop
+END(mips_tlbinvalall)
diff --git a/kernel/os/src/arch/mips/os_arch_mips.c b/kernel/os/src/arch/mips/os_arch_mips.c
index 348fae045..112be4d57 100644
--- a/kernel/os/src/arch/mips/os_arch_mips.c
+++ b/kernel/os/src/arch/mips/os_arch_mips.c
@@ -17,6 +17,7 @@
  * under the License.
  */
 
+
 #include "os/os.h"
 #include "os/os_arch.h"
 #include "syscfg/syscfg.h"
@@ -44,8 +45,9 @@ extern struct os_task g_idle_task;
 void __attribute__((interrupt, keep_interrupts_masked))
 _mips_isr_hw5(void)
 {
-    mips_setcompare(mips_getcompare() + ((MYNEWT_VAL(CLOCK_FREQ) / 2) /
-       OS_TICKS_PER_SEC));
+    unsigned long int inc = (MYNEWT_VAL(CLOCK_FREQ) / 2) / OS_TICKS_PER_SEC;
+    unsigned long int compare = mips_getcompare();
+    mips_setcompare(compare + inc);
     timer_handler();
 }
 
@@ -123,6 +125,7 @@ os_arch_task_stack_init(struct os_task *t, os_stack_t *stack_top, int size)
 void
 os_arch_init(void)
 {
+    /* enable software interrupt 0 */
     mips_bissr((1 << 15) | (1 << 8));
     os_init_idle_task();
 }
diff --git a/kernel/os/src/arch/mips/os_fault.c b/kernel/os/src/arch/mips/os_fault.c
index 5dc952a8b..48591425d 100644
--- a/kernel/os/src/arch/mips/os_fault.c
+++ b/kernel/os/src/arch/mips/os_fault.c
@@ -17,7 +17,6 @@
  * under the License.
  */
 
-#include <console/console.h>
 #include <hal/hal_system.h>
 #ifdef COREDUMP_PRESENT
 #include <coredump/coredump.h>
@@ -82,17 +81,8 @@ void
 __assert_func(const char *file, int line, const char *func, const char *e)
 {
     int sr;
-
     OS_ENTER_CRITICAL(sr);
     (void)sr;
-    console_blocking_mode();
-    console_printf("Assert @ 0x%x\n",
-                   (unsigned int)__builtin_return_address(0));
-    if (hal_debugger_connected()) {
-       /*
-        * If debugger is attached, breakpoint before the trap.
-        */
-    }
     hal_system_reset();
 }
 
@@ -103,17 +93,6 @@ os_default_irq(struct trap_frame *tf)
     struct coredump_regs regs;
 #endif
 
-    console_blocking_mode();
-    console_printf("Unhandled interrupt, exception sp 0x%08lx\n",
-      (uint32_t)tf->ef);
-    console_printf(" r0:0x%08lx  r1:0x%08lx  r2:0x%08lx  r3:0x%08lx\n",
-      tf->ef->r0, tf->ef->r1, tf->ef->r2, tf->ef->r3);
-    console_printf(" r4:0x%08lx  r5:0x%08lx  r6:0x%08lx  r7:0x%08lx\n",
-      tf->r4, tf->r5, tf->r6, tf->r7);
-    console_printf(" r8:0x%08lx  r9:0x%08lx r10:0x%08lx r11:0x%08lx\n",
-      tf->r8, tf->r9, tf->r10, tf->r11);
-    console_printf("r12:0x%08lx  lr:0x%08lx  pc:0x%08lx psr:0x%08lx\n",
-      tf->ef->r12, tf->ef->lr, tf->ef->pc, tf->ef->psr);
 #ifdef COREDUMP_PRESENT
     trap_to_coredump(tf, &regs);
     coredump_dump(&regs, sizeof(regs));


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services