You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@tomcat.apache.org by rj...@apache.org on 2017/01/02 21:21:09 UTC

svn commit: r1777023 [8/12] - in /tomcat/jk/trunk/native/iis/pcre: ./ doc/ doc/html/ sljit/ testdata/

Modified: tomcat/jk/trunk/native/iis/pcre/sljit/sljitLir.h
URL: http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/sljit/sljitLir.h?rev=1777023&r1=1777022&r2=1777023&view=diff
==============================================================================
--- tomcat/jk/trunk/native/iis/pcre/sljit/sljitLir.h (original)
+++ tomcat/jk/trunk/native/iis/pcre/sljit/sljitLir.h Mon Jan  2 21:21:08 2017
@@ -226,7 +226,7 @@ of sljitConfigInternal.h */
 /*  Floating point registers                                             */
 /* --------------------------------------------------------------------- */
 
-/* Each floating point register can store a double or single precision
+/* Each floating point register can store a 32 or a 64 bit precision
    value. The FR and FS register sets are overlap in the same way as R
    and S register sets. See above. */
 
@@ -271,7 +271,7 @@ struct sljit_memory_fragment {
 	struct sljit_memory_fragment *next;
 	sljit_uw used_size;
 	/* Must be aligned to sljit_sw. */
-	sljit_ub memory[1];
+	sljit_u8 memory[1];
 };
 
 struct sljit_label {
@@ -297,8 +297,8 @@ struct sljit_const {
 };
 
 struct sljit_compiler {
-	sljit_si error;
-	sljit_si options;
+	sljit_s32 error;
+	sljit_s32 options;
 
 	struct sljit_label *labels;
 	struct sljit_jump *jumps;
@@ -312,36 +312,36 @@ struct sljit_compiler {
 	struct sljit_memory_fragment *abuf;
 
 	/* Used scratch registers. */
-	sljit_si scratches;
+	sljit_s32 scratches;
 	/* Used saved registers. */
-	sljit_si saveds;
+	sljit_s32 saveds;
 	/* Used float scratch registers. */
-	sljit_si fscratches;
+	sljit_s32 fscratches;
 	/* Used float saved registers. */
-	sljit_si fsaveds;
+	sljit_s32 fsaveds;
 	/* Local stack size. */
-	sljit_si local_size;
+	sljit_s32 local_size;
 	/* Code size. */
 	sljit_uw size;
 	/* For statistical purposes. */
 	sljit_uw executable_size;
 
 #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
-	sljit_si args;
+	sljit_s32 args;
 #endif
 
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-	sljit_si mode32;
+	sljit_s32 mode32;
 #endif
 
 #if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
-	sljit_si flags_saved;
+	sljit_s32 flags_saved;
 #endif
 
 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
 	/* Constant pool handling. */
 	sljit_uw *cpool;
-	sljit_ub *cpool_unique;
+	sljit_u8 *cpool_unique;
 	sljit_uw cpool_diff;
 	sljit_uw cpool_fill;
 	/* Other members. */
@@ -352,40 +352,40 @@ struct sljit_compiler {
 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
 	/* Temporary fields. */
 	sljit_uw shift_imm;
-	sljit_si cache_arg;
+	sljit_s32 cache_arg;
 	sljit_sw cache_argw;
 #endif
 
 #if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2)
-	sljit_si cache_arg;
+	sljit_s32 cache_arg;
 	sljit_sw cache_argw;
 #endif
 
 #if (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
-	sljit_si cache_arg;
+	sljit_s32 cache_arg;
 	sljit_sw cache_argw;
 #endif
 
 #if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
 	sljit_sw imm;
-	sljit_si cache_arg;
+	sljit_s32 cache_arg;
 	sljit_sw cache_argw;
 #endif
 
 #if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
-	sljit_si delay_slot;
-	sljit_si cache_arg;
+	sljit_s32 delay_slot;
+	sljit_s32 cache_arg;
 	sljit_sw cache_argw;
 #endif
 
 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
-	sljit_si delay_slot;
-	sljit_si cache_arg;
+	sljit_s32 delay_slot;
+	sljit_s32 cache_arg;
 	sljit_sw cache_argw;
 #endif
 
 #if (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX)
-	sljit_si cache_arg;
+	sljit_s32 cache_arg;
 	sljit_sw cache_argw;
 #endif
 
@@ -396,13 +396,13 @@ struct sljit_compiler {
 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \
 		|| (defined SLJIT_DEBUG && SLJIT_DEBUG)
 	/* Local size passed to the functions. */
-	sljit_si logical_local_size;
+	sljit_s32 logical_local_size;
 #endif
 
 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \
 		|| (defined SLJIT_DEBUG && SLJIT_DEBUG) \
 		|| (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
-	sljit_si skip_checks;
+	sljit_s32 skip_checks;
 #endif
 };
 
@@ -427,7 +427,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free
    error code. Thus there is no need for checking the error after every
    call, it is enough to do it before the code is compiled. Removing
    these checks increases the performance of the compiling process. */
-static SLJIT_INLINE sljit_si sljit_get_compiler_error(struct sljit_compiler *compiler) { return compiler->error; }
+static SLJIT_INLINE sljit_s32 sljit_get_compiler_error(struct sljit_compiler *compiler) { return compiler->error; }
 
 /* Sets the compiler error code to SLJIT_ERR_ALLOC_FAILED except
    if an error was detected before. After the error code is set
@@ -448,7 +448,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_
    indicate that there is no more memory (does not set the current error code
    of the compiler to out-of-memory status).
 */
-SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_si size);
+SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_s32 size);
 
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
 /* Passing NULL disables verbose. */
@@ -518,9 +518,9 @@ offset 0 is aligned to sljit_d. Otherwis
 /* The local_size must be >= 0 and <= SLJIT_MAX_LOCAL_SIZE. */
 #define SLJIT_MAX_LOCAL_SIZE	65536
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler,
-	sljit_si options, sljit_si args, sljit_si scratches, sljit_si saveds,
-	sljit_si fscratches, sljit_si fsaveds, sljit_si local_size);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
+	sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+	sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size);
 
 /* The machine code has a context (which contains the local stack space size,
    number of used registers, etc.) which initialized by sljit_emit_enter. Several
@@ -532,9 +532,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
    Note: every call of sljit_emit_enter and sljit_set_context overwrites
          the previous context. */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_set_context(struct sljit_compiler *compiler,
-	sljit_si options, sljit_si args, sljit_si scratches, sljit_si saveds,
-	sljit_si fscratches, sljit_si fsaveds, sljit_si local_size);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
+	sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+	sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size);
 
 /* Return from machine code.  The op argument can be SLJIT_UNUSED which means the
    function does not return with anything or any opcode between SLJIT_MOV and
@@ -542,8 +542,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
    is SLJIT_UNUSED, otherwise see below the description about source and
    destination arguments. */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si src, sljit_sw srcw);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 src, sljit_sw srcw);
 
 /* Fast calling mechanism for utility functions (see SLJIT_FAST_CALL). All registers and
    even the stack frame is passed to the callee. The return address is preserved in
@@ -560,8 +560,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 /* Note: although sljit_emit_fast_return could be replaced by an ijump, it is not suggested,
    since many architectures do clever branch prediction on call / return instruction pairs. */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw);
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw);
 
 /*
    Source and destination values for arithmetical instructions
@@ -624,31 +624,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 #define SLJIT_MEM2(r1, r2)	(SLJIT_MEM | (r1) | ((r2) << 8))
 #define SLJIT_IMM		0x40
 
-/* Set 32 bit operation mode (I) on 64 bit CPUs. The flag is totally ignored on
-   32 bit CPUs. If this flag is set for an arithmetic operation, it uses only the
-   lower 32 bit of the input register(s), and set the CPU status flags according
-   to the 32 bit result. The higher 32 bits are undefined for both the input and
-   output. However, the CPU might not ignore those higher 32 bits, like MIPS, which
-   expects it to be the sign extension of the lower 32 bit. All 32 bit operations
-   are undefined, if this condition is not fulfilled. Therefore, when SLJIT_INT_OP
-   is specified, all register arguments must be the result of other operations with
-   the same SLJIT_INT_OP flag. In other words, although a register can hold either
-   a 64 or 32 bit value, these values cannot be mixed. The only exceptions are
-   SLJIT_IMOV and SLJIT_IMOVU (SLJIT_MOV_SI/SLJIT_MOVU_SI with SLJIT_INT_OP flag)
-   which can convert any source argument to SLJIT_INT_OP compatible result. This
-   conversion might be unnecessary on some CPUs like x86-64, since the upper 32
-   bit is always ignored. In this case SLJIT is clever enough to not generate any
-   instructions if the source and destination operands are the same registers.
-   Affects sljit_emit_op0, sljit_emit_op1 and sljit_emit_op2. */
-#define SLJIT_INT_OP		0x100
+/* Set 32 bit operation mode (I) on 64 bit CPUs. This flag is ignored on 32
+   bit CPUs. When this flag is set for an arithmetic operation, only the
+   lower 32 bit of the input register(s) are used, and the CPU status flags
+   are set according to the 32 bit result. Although the higher 32 bit of
+   the input and the result registers are not defined by SLJIT, it might be
+   defined by the CPU architecture (e.g. MIPS). To satisfy these requirements
+   all source registers must be computed by operations where this flag is
+   also set. In other words 32 and 64 bit arithmetic operations cannot be
+   mixed. The only exception is SLJIT_IMOV and SLJIT_IMOVU whose source
+   register can hold any 32 or 64 bit value. This source register is
+   converted to a 32 bit compatible format. SLJIT does not generate any
+   instructions on certain CPUs (e.g. on x86 and ARM) if the source and
+   destination operands are the same registers. Affects sljit_emit_op0,
+   sljit_emit_op1 and sljit_emit_op2. */
+#define SLJIT_I32_OP		0x100
 
-/* Single precision mode (SP). This flag is similar to SLJIT_INT_OP, just
+/* F32 precision mode (SP). This flag is similar to SLJIT_I32_OP, just
    it applies to floating point registers (it is even the same bit). When
-   this flag is passed, the CPU performs single precision floating point
-   operations. Similar to SLJIT_INT_OP, all register arguments must be the
-   result of other floating point operations with this flag. Affects
+   this flag is passed, the CPU performs 32 bit floating point operations.
+   Similar to SLJIT_I32_OP, all register arguments must be computed by
+   floating point operations where this flag is also set. Affects
    sljit_emit_fop1, sljit_emit_fop2 and sljit_emit_fcmp. */
-#define SLJIT_SINGLE_OP		0x100
+#define SLJIT_F32_OP		0x100
 
 /* Common CPU status flags for all architectures (x86, ARM, PPC)
     - carry flag
@@ -697,43 +695,41 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 /* Flags: - (may destroy flags)
    Unsigned multiplication of SLJIT_R0 and SLJIT_R1.
    Result is placed into SLJIT_R1:SLJIT_R0 (high:low) word */
-#define SLJIT_LUMUL			(SLJIT_OP0_BASE + 2)
+#define SLJIT_LMUL_UW			(SLJIT_OP0_BASE + 2)
 /* Flags: - (may destroy flags)
    Signed multiplication of SLJIT_R0 and SLJIT_R1.
    Result is placed into SLJIT_R1:SLJIT_R0 (high:low) word */
-#define SLJIT_LSMUL			(SLJIT_OP0_BASE + 3)
+#define SLJIT_LMUL_SW			(SLJIT_OP0_BASE + 3)
 /* Flags: I - (may destroy flags)
    Unsigned divide of the value in SLJIT_R0 by the value in SLJIT_R1.
    The result is placed into SLJIT_R0 and the remainder into SLJIT_R1.
    Note: if SLJIT_R1 is 0, the behaviour is undefined. */
-#define SLJIT_UDIVMOD			(SLJIT_OP0_BASE + 4)
-#define SLJIT_IUDIVMOD			(SLJIT_UDIVMOD | SLJIT_INT_OP)
+#define SLJIT_DIVMOD_UW			(SLJIT_OP0_BASE + 4)
+#define SLJIT_DIVMOD_U32		(SLJIT_DIVMOD_UW | SLJIT_I32_OP)
 /* Flags: I - (may destroy flags)
    Signed divide of the value in SLJIT_R0 by the value in SLJIT_R1.
    The result is placed into SLJIT_R0 and the remainder into SLJIT_R1.
    Note: if SLJIT_R1 is 0, the behaviour is undefined.
    Note: if SLJIT_R1 is -1 and SLJIT_R0 is integer min (0x800..00),
          the behaviour is undefined. */
-#define SLJIT_SDIVMOD			(SLJIT_OP0_BASE + 5)
-#define SLJIT_ISDIVMOD			(SLJIT_SDIVMOD | SLJIT_INT_OP)
+#define SLJIT_DIVMOD_SW			(SLJIT_OP0_BASE + 5)
+#define SLJIT_DIVMOD_S32		(SLJIT_DIVMOD_SW | SLJIT_I32_OP)
 /* Flags: I - (may destroy flags)
    Unsigned divide of the value in SLJIT_R0 by the value in SLJIT_R1.
    The result is placed into SLJIT_R0. SLJIT_R1 preserves its value.
-   Note: if SLJIT_R1 is 0, the behaviour is undefined.
-   Note: SLJIT_SDIV is single precision divide. */
-#define SLJIT_UDIVI			(SLJIT_OP0_BASE + 6)
-#define SLJIT_IUDIVI			(SLJIT_UDIVI | SLJIT_INT_OP)
+   Note: if SLJIT_R1 is 0, the behaviour is undefined. */
+#define SLJIT_DIV_UW			(SLJIT_OP0_BASE + 6)
+#define SLJIT_DIV_U32			(SLJIT_DIV_UW | SLJIT_I32_OP)
 /* Flags: I - (may destroy flags)
    Signed divide of the value in SLJIT_R0 by the value in SLJIT_R1.
    The result is placed into SLJIT_R0. SLJIT_R1 preserves its value.
    Note: if SLJIT_R1 is 0, the behaviour is undefined.
    Note: if SLJIT_R1 is -1 and SLJIT_R0 is integer min (0x800..00),
-         the behaviour is undefined.
-   Note: SLJIT_SDIV is single precision divide. */
-#define SLJIT_SDIVI			(SLJIT_OP0_BASE + 7)
-#define SLJIT_ISDIVI			(SLJIT_SDIVI | SLJIT_INT_OP)
+         the behaviour is undefined. */
+#define SLJIT_DIV_SW			(SLJIT_OP0_BASE + 7)
+#define SLJIT_DIV_S32			(SLJIT_DIV_SW | SLJIT_I32_OP)
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op);
 
 /* Starting index of opcodes for sljit_emit_op1. */
 #define SLJIT_OP1_BASE			32
@@ -752,188 +748,188 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 /* Flags: - (never set any flags) */
 #define SLJIT_MOV			(SLJIT_OP1_BASE + 0)
 /* Flags: I - (never set any flags) */
-#define SLJIT_MOV_UB			(SLJIT_OP1_BASE + 1)
-#define SLJIT_IMOV_UB			(SLJIT_MOV_UB | SLJIT_INT_OP)
+#define SLJIT_MOV_U8			(SLJIT_OP1_BASE + 1)
+#define SLJIT_MOV32_U8			(SLJIT_MOV_U8 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
-#define SLJIT_MOV_SB			(SLJIT_OP1_BASE + 2)
-#define SLJIT_IMOV_SB			(SLJIT_MOV_SB | SLJIT_INT_OP)
+#define SLJIT_MOV_S8			(SLJIT_OP1_BASE + 2)
+#define SLJIT_MOV32_S8			(SLJIT_MOV_S8 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
-#define SLJIT_MOV_UH			(SLJIT_OP1_BASE + 3)
-#define SLJIT_IMOV_UH			(SLJIT_MOV_UH | SLJIT_INT_OP)
+#define SLJIT_MOV_U16			(SLJIT_OP1_BASE + 3)
+#define SLJIT_MOV32_U16			(SLJIT_MOV_U16 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
-#define SLJIT_MOV_SH			(SLJIT_OP1_BASE + 4)
-#define SLJIT_IMOV_SH			(SLJIT_MOV_SH | SLJIT_INT_OP)
+#define SLJIT_MOV_S16			(SLJIT_OP1_BASE + 4)
+#define SLJIT_MOV32_S16			(SLJIT_MOV_S16 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags)
-   Note: see SLJIT_INT_OP for further details. */
-#define SLJIT_MOV_UI			(SLJIT_OP1_BASE + 5)
-/* No SLJIT_INT_OP form, since it is the same as SLJIT_IMOV. */
+   Note: no SLJIT_MOV32_U32 form, since it is the same as SLJIT_MOV32 */
+#define SLJIT_MOV_U32			(SLJIT_OP1_BASE + 5)
 /* Flags: I - (never set any flags)
-   Note: see SLJIT_INT_OP for further details. */
-#define SLJIT_MOV_SI			(SLJIT_OP1_BASE + 6)
-#define SLJIT_IMOV			(SLJIT_MOV_SI | SLJIT_INT_OP)
+   Note: no SLJIT_MOV32_S32 form, since it is the same as SLJIT_MOV32 */
+#define SLJIT_MOV_S32			(SLJIT_OP1_BASE + 6)
+/* Flags: I - (never set any flags) */
+#define SLJIT_MOV32			(SLJIT_MOV_S32 | SLJIT_I32_OP)
 /* Flags: - (never set any flags) */
 #define SLJIT_MOV_P			(SLJIT_OP1_BASE + 7)
 /* Flags: - (never set any flags) */
 #define SLJIT_MOVU			(SLJIT_OP1_BASE + 8)
 /* Flags: I - (never set any flags) */
-#define SLJIT_MOVU_UB			(SLJIT_OP1_BASE + 9)
-#define SLJIT_IMOVU_UB			(SLJIT_MOVU_UB | SLJIT_INT_OP)
+#define SLJIT_MOVU_U8			(SLJIT_OP1_BASE + 9)
+#define SLJIT_MOVU32_U8			(SLJIT_MOVU_U8 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
-#define SLJIT_MOVU_SB			(SLJIT_OP1_BASE + 10)
-#define SLJIT_IMOVU_SB			(SLJIT_MOVU_SB | SLJIT_INT_OP)
+#define SLJIT_MOVU_S8			(SLJIT_OP1_BASE + 10)
+#define SLJIT_MOVU32_S8			(SLJIT_MOVU_S8 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
-#define SLJIT_MOVU_UH			(SLJIT_OP1_BASE + 11)
-#define SLJIT_IMOVU_UH			(SLJIT_MOVU_UH | SLJIT_INT_OP)
+#define SLJIT_MOVU_U16			(SLJIT_OP1_BASE + 11)
+#define SLJIT_MOVU32_U16			(SLJIT_MOVU_U16 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
-#define SLJIT_MOVU_SH			(SLJIT_OP1_BASE + 12)
-#define SLJIT_IMOVU_SH			(SLJIT_MOVU_SH | SLJIT_INT_OP)
+#define SLJIT_MOVU_S16			(SLJIT_OP1_BASE + 12)
+#define SLJIT_MOVU32_S16		(SLJIT_MOVU_S16 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags)
-   Note: see SLJIT_INT_OP for further details. */
-#define SLJIT_MOVU_UI			(SLJIT_OP1_BASE + 13)
-/* No SLJIT_INT_OP form, since it is the same as SLJIT_IMOVU. */
+   Note: no SLJIT_MOVU32_U32 form, since it is the same as SLJIT_MOVU32 */
+#define SLJIT_MOVU_U32			(SLJIT_OP1_BASE + 13)
 /* Flags: I - (never set any flags)
-   Note: see SLJIT_INT_OP for further details. */
-#define SLJIT_MOVU_SI			(SLJIT_OP1_BASE + 14)
-#define SLJIT_IMOVU			(SLJIT_MOVU_SI | SLJIT_INT_OP)
+   Note: no SLJIT_MOVU32_S32 form, since it is the same as SLJIT_MOVU32 */
+#define SLJIT_MOVU_S32			(SLJIT_OP1_BASE + 14)
+/* Flags: I - (never set any flags) */
+#define SLJIT_MOVU32			(SLJIT_MOVU_S32 | SLJIT_I32_OP)
 /* Flags: - (never set any flags) */
 #define SLJIT_MOVU_P			(SLJIT_OP1_BASE + 15)
 /* Flags: I | E | K */
 #define SLJIT_NOT			(SLJIT_OP1_BASE + 16)
-#define SLJIT_INOT			(SLJIT_NOT | SLJIT_INT_OP)
+#define SLJIT_NOT32			(SLJIT_NOT | SLJIT_I32_OP)
 /* Flags: I | E | O | K */
 #define SLJIT_NEG			(SLJIT_OP1_BASE + 17)
-#define SLJIT_INEG			(SLJIT_NEG | SLJIT_INT_OP)
+#define SLJIT_NEG32			(SLJIT_NEG | SLJIT_I32_OP)
 /* Count leading zeroes
    Flags: I | E | K
    Important note! Sparc 32 does not support K flag, since
    the required popc instruction is introduced only in sparc 64. */
 #define SLJIT_CLZ			(SLJIT_OP1_BASE + 18)
-#define SLJIT_ICLZ			(SLJIT_CLZ | SLJIT_INT_OP)
+#define SLJIT_CLZ32			(SLJIT_CLZ | SLJIT_I32_OP)
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src, sljit_sw srcw);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src, sljit_sw srcw);
 
 /* Starting index of opcodes for sljit_emit_op2. */
 #define SLJIT_OP2_BASE			96
 
 /* Flags: I | E | O | C | K */
 #define SLJIT_ADD			(SLJIT_OP2_BASE + 0)
-#define SLJIT_IADD			(SLJIT_ADD | SLJIT_INT_OP)
+#define SLJIT_ADD32			(SLJIT_ADD | SLJIT_I32_OP)
 /* Flags: I | C | K */
 #define SLJIT_ADDC			(SLJIT_OP2_BASE + 1)
-#define SLJIT_IADDC			(SLJIT_ADDC | SLJIT_INT_OP)
+#define SLJIT_ADDC32			(SLJIT_ADDC | SLJIT_I32_OP)
 /* Flags: I | E | U | S | O | C | K */
 #define SLJIT_SUB			(SLJIT_OP2_BASE + 2)
-#define SLJIT_ISUB			(SLJIT_SUB | SLJIT_INT_OP)
+#define SLJIT_SUB32			(SLJIT_SUB | SLJIT_I32_OP)
 /* Flags: I | C | K */
 #define SLJIT_SUBC			(SLJIT_OP2_BASE + 3)
-#define SLJIT_ISUBC			(SLJIT_SUBC | SLJIT_INT_OP)
+#define SLJIT_SUBC32			(SLJIT_SUBC | SLJIT_I32_OP)
 /* Note: integer mul
    Flags: I | O (see SLJIT_C_MUL_*) | K */
 #define SLJIT_MUL			(SLJIT_OP2_BASE + 4)
-#define SLJIT_IMUL			(SLJIT_MUL | SLJIT_INT_OP)
+#define SLJIT_MUL32			(SLJIT_MUL | SLJIT_I32_OP)
 /* Flags: I | E | K */
 #define SLJIT_AND			(SLJIT_OP2_BASE + 5)
-#define SLJIT_IAND			(SLJIT_AND | SLJIT_INT_OP)
+#define SLJIT_AND32			(SLJIT_AND | SLJIT_I32_OP)
 /* Flags: I | E | K */
 #define SLJIT_OR			(SLJIT_OP2_BASE + 6)
-#define SLJIT_IOR			(SLJIT_OR | SLJIT_INT_OP)
+#define SLJIT_OR32			(SLJIT_OR | SLJIT_I32_OP)
 /* Flags: I | E | K */
 #define SLJIT_XOR			(SLJIT_OP2_BASE + 7)
-#define SLJIT_IXOR			(SLJIT_XOR | SLJIT_INT_OP)
+#define SLJIT_XOR32			(SLJIT_XOR | SLJIT_I32_OP)
 /* Flags: I | E | K
    Let bit_length be the length of the shift operation: 32 or 64.
    If src2 is immediate, src2w is masked by (bit_length - 1).
    Otherwise, if the content of src2 is outside the range from 0
    to bit_length - 1, the result is undefined. */
 #define SLJIT_SHL			(SLJIT_OP2_BASE + 8)
-#define SLJIT_ISHL			(SLJIT_SHL | SLJIT_INT_OP)
+#define SLJIT_SHL32			(SLJIT_SHL | SLJIT_I32_OP)
 /* Flags: I | E | K
    Let bit_length be the length of the shift operation: 32 or 64.
    If src2 is immediate, src2w is masked by (bit_length - 1).
    Otherwise, if the content of src2 is outside the range from 0
    to bit_length - 1, the result is undefined. */
 #define SLJIT_LSHR			(SLJIT_OP2_BASE + 9)
-#define SLJIT_ILSHR			(SLJIT_LSHR | SLJIT_INT_OP)
+#define SLJIT_LSHR32			(SLJIT_LSHR | SLJIT_I32_OP)
 /* Flags: I | E | K
    Let bit_length be the length of the shift operation: 32 or 64.
    If src2 is immediate, src2w is masked by (bit_length - 1).
    Otherwise, if the content of src2 is outside the range from 0
    to bit_length - 1, the result is undefined. */
 #define SLJIT_ASHR			(SLJIT_OP2_BASE + 10)
-#define SLJIT_IASHR			(SLJIT_ASHR | SLJIT_INT_OP)
+#define SLJIT_ASHR32			(SLJIT_ASHR | SLJIT_I32_OP)
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w);
 
 /* Returns with non-zero if fpu is available. */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void);
 
 /* Starting index of opcodes for sljit_emit_fop1. */
 #define SLJIT_FOP1_BASE			128
 
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DMOV			(SLJIT_FOP1_BASE + 0)
-#define SLJIT_SMOV			(SLJIT_DMOV | SLJIT_SINGLE_OP)
+#define SLJIT_MOV_F64			(SLJIT_FOP1_BASE + 0)
+#define SLJIT_MOV_F32			(SLJIT_MOV_F64 | SLJIT_F32_OP)
 /* Convert opcodes: CONV[DST_TYPE].FROM[SRC_TYPE]
    SRC/DST TYPE can be: D - double, S - single, W - signed word, I - signed int
    Rounding mode when the destination is W or I: round towards zero. */
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMS		(SLJIT_FOP1_BASE + 1)
-#define SLJIT_CONVS_FROMD		(SLJIT_CONVD_FROMS | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_F32		(SLJIT_FOP1_BASE + 1)
+#define SLJIT_CONV_F32_FROM_F64		(SLJIT_CONV_F64_FROM_F32 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVW_FROMD		(SLJIT_FOP1_BASE + 2)
-#define SLJIT_CONVW_FROMS		(SLJIT_CONVW_FROMD | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_SW_FROM_F64		(SLJIT_FOP1_BASE + 2)
+#define SLJIT_CONV_SW_FROM_F32		(SLJIT_CONV_SW_FROM_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVI_FROMD		(SLJIT_FOP1_BASE + 3)
-#define SLJIT_CONVI_FROMS		(SLJIT_CONVI_FROMD | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_S32_FROM_F64		(SLJIT_FOP1_BASE + 3)
+#define SLJIT_CONV_S32_FROM_F32		(SLJIT_CONV_S32_FROM_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMW		(SLJIT_FOP1_BASE + 4)
-#define SLJIT_CONVS_FROMW		(SLJIT_CONVD_FROMW | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_SW		(SLJIT_FOP1_BASE + 4)
+#define SLJIT_CONV_F32_FROM_SW		(SLJIT_CONV_F64_FROM_SW | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMI		(SLJIT_FOP1_BASE + 5)
-#define SLJIT_CONVS_FROMI		(SLJIT_CONVD_FROMI | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_S32		(SLJIT_FOP1_BASE + 5)
+#define SLJIT_CONV_F32_FROM_S32		(SLJIT_CONV_F64_FROM_S32 | SLJIT_F32_OP)
 /* Note: dst is the left and src is the right operand for SLJIT_CMPD.
    Note: NaN check is always performed. If SLJIT_C_FLOAT_UNORDERED flag
          is set, the comparison result is unpredictable.
    Flags: SP | E | S (see SLJIT_C_FLOAT_*) */
-#define SLJIT_DCMP			(SLJIT_FOP1_BASE + 6)
-#define SLJIT_SCMP			(SLJIT_DCMP | SLJIT_SINGLE_OP)
+#define SLJIT_CMP_F64			(SLJIT_FOP1_BASE + 6)
+#define SLJIT_CMP_F32			(SLJIT_CMP_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DNEG			(SLJIT_FOP1_BASE + 7)
-#define SLJIT_SNEG			(SLJIT_DNEG | SLJIT_SINGLE_OP)
+#define SLJIT_NEG_F64			(SLJIT_FOP1_BASE + 7)
+#define SLJIT_NEG_F32			(SLJIT_NEG_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DABS			(SLJIT_FOP1_BASE + 8)
-#define SLJIT_SABS			(SLJIT_DABS | SLJIT_SINGLE_OP)
+#define SLJIT_ABS_F64			(SLJIT_FOP1_BASE + 8)
+#define SLJIT_ABS_F32			(SLJIT_ABS_F64 | SLJIT_F32_OP)
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src, sljit_sw srcw);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src, sljit_sw srcw);
 
 /* Starting index of opcodes for sljit_emit_fop2. */
 #define SLJIT_FOP2_BASE			160
 
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DADD			(SLJIT_FOP2_BASE + 0)
-#define SLJIT_SADD			(SLJIT_DADD | SLJIT_SINGLE_OP)
+#define SLJIT_ADD_F64			(SLJIT_FOP2_BASE + 0)
+#define SLJIT_ADD_F32			(SLJIT_ADD_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DSUB			(SLJIT_FOP2_BASE + 1)
-#define SLJIT_SSUB			(SLJIT_DSUB | SLJIT_SINGLE_OP)
+#define SLJIT_SUB_F64			(SLJIT_FOP2_BASE + 1)
+#define SLJIT_SUB_F32			(SLJIT_SUB_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DMUL			(SLJIT_FOP2_BASE + 2)
-#define SLJIT_SMUL			(SLJIT_DMUL | SLJIT_SINGLE_OP)
+#define SLJIT_MUL_F64			(SLJIT_FOP2_BASE + 2)
+#define SLJIT_MUL_F32			(SLJIT_MUL_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DDIV			(SLJIT_FOP2_BASE + 3)
-#define SLJIT_SDIV			(SLJIT_DDIV | SLJIT_SINGLE_OP)
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w);
+#define SLJIT_DIV_F64			(SLJIT_FOP2_BASE + 3)
+#define SLJIT_DIV_F32			(SLJIT_DIV_F64 | SLJIT_F32_OP)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w);
 
 /* Label and jump instructions. */
 
@@ -943,58 +939,58 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_la
 
 /* Integer comparison types. */
 #define SLJIT_EQUAL			0
-#define SLJIT_I_EQUAL			(SLJIT_EQUAL | SLJIT_INT_OP)
+#define SLJIT_EQUAL32			(SLJIT_EQUAL | SLJIT_I32_OP)
 #define SLJIT_ZERO			0
-#define SLJIT_I_ZERO			(SLJIT_ZERO | SLJIT_INT_OP)
+#define SLJIT_ZERO32			(SLJIT_ZERO | SLJIT_I32_OP)
 #define SLJIT_NOT_EQUAL			1
-#define SLJIT_I_NOT_EQUAL		(SLJIT_NOT_EQUAL | SLJIT_INT_OP)
+#define SLJIT_NOT_EQUAL32		(SLJIT_NOT_EQUAL | SLJIT_I32_OP)
 #define SLJIT_NOT_ZERO			1
-#define SLJIT_I_NOT_ZERO		(SLJIT_NOT_ZERO | SLJIT_INT_OP)
+#define SLJIT_NOT_ZERO32		(SLJIT_NOT_ZERO | SLJIT_I32_OP)
 
 #define SLJIT_LESS			2
-#define SLJIT_I_LESS			(SLJIT_LESS | SLJIT_INT_OP)
+#define SLJIT_LESS32			(SLJIT_LESS | SLJIT_I32_OP)
 #define SLJIT_GREATER_EQUAL		3
-#define SLJIT_I_GREATER_EQUAL		(SLJIT_GREATER_EQUAL | SLJIT_INT_OP)
+#define SLJIT_GREATER_EQUAL32		(SLJIT_GREATER_EQUAL | SLJIT_I32_OP)
 #define SLJIT_GREATER			4
-#define SLJIT_I_GREATER			(SLJIT_GREATER | SLJIT_INT_OP)
+#define SLJIT_GREATER32			(SLJIT_GREATER | SLJIT_I32_OP)
 #define SLJIT_LESS_EQUAL		5
-#define SLJIT_I_LESS_EQUAL		(SLJIT_LESS_EQUAL | SLJIT_INT_OP)
+#define SLJIT_LESS_EQUAL32		(SLJIT_LESS_EQUAL | SLJIT_I32_OP)
 #define SLJIT_SIG_LESS			6
-#define SLJIT_I_SIG_LESS		(SLJIT_SIG_LESS | SLJIT_INT_OP)
+#define SLJIT_SIG_LESS32		(SLJIT_SIG_LESS | SLJIT_I32_OP)
 #define SLJIT_SIG_GREATER_EQUAL		7
-#define SLJIT_I_SIG_GREATER_EQUAL	(SLJIT_SIG_GREATER_EQUAL | SLJIT_INT_OP)
+#define SLJIT_SIG_GREATER_EQUAL32	(SLJIT_SIG_GREATER_EQUAL | SLJIT_I32_OP)
 #define SLJIT_SIG_GREATER		8
-#define SLJIT_I_SIG_GREATER		(SLJIT_SIG_GREATER | SLJIT_INT_OP)
+#define SLJIT_SIG_GREATER32		(SLJIT_SIG_GREATER | SLJIT_I32_OP)
 #define SLJIT_SIG_LESS_EQUAL		9
-#define SLJIT_I_SIG_LESS_EQUAL		(SLJIT_SIG_LESS_EQUAL | SLJIT_INT_OP)
+#define SLJIT_SIG_LESS_EQUAL32		(SLJIT_SIG_LESS_EQUAL | SLJIT_I32_OP)
 
 #define SLJIT_OVERFLOW			10
-#define SLJIT_I_OVERFLOW		(SLJIT_OVERFLOW | SLJIT_INT_OP)
+#define SLJIT_OVERFLOW32		(SLJIT_OVERFLOW | SLJIT_I32_OP)
 #define SLJIT_NOT_OVERFLOW		11
-#define SLJIT_I_NOT_OVERFLOW		(SLJIT_NOT_OVERFLOW | SLJIT_INT_OP)
+#define SLJIT_NOT_OVERFLOW32		(SLJIT_NOT_OVERFLOW | SLJIT_I32_OP)
 
 #define SLJIT_MUL_OVERFLOW		12
-#define SLJIT_I_MUL_OVERFLOW		(SLJIT_MUL_OVERFLOW | SLJIT_INT_OP)
+#define SLJIT_MUL_OVERFLOW32		(SLJIT_MUL_OVERFLOW | SLJIT_I32_OP)
 #define SLJIT_MUL_NOT_OVERFLOW		13
-#define SLJIT_I_MUL_NOT_OVERFLOW	(SLJIT_MUL_NOT_OVERFLOW | SLJIT_INT_OP)
+#define SLJIT_MUL_NOT_OVERFLOW32	(SLJIT_MUL_NOT_OVERFLOW | SLJIT_I32_OP)
 
 /* Floating point comparison types. */
-#define SLJIT_D_EQUAL			14
-#define SLJIT_S_EQUAL			(SLJIT_D_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_NOT_EQUAL		15
-#define SLJIT_S_NOT_EQUAL		(SLJIT_D_NOT_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_LESS			16
-#define SLJIT_S_LESS			(SLJIT_D_LESS | SLJIT_SINGLE_OP)
-#define SLJIT_D_GREATER_EQUAL		17
-#define SLJIT_S_GREATER_EQUAL		(SLJIT_D_GREATER_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_GREATER			18
-#define SLJIT_S_GREATER			(SLJIT_D_GREATER | SLJIT_SINGLE_OP)
-#define SLJIT_D_LESS_EQUAL		19
-#define SLJIT_S_LESS_EQUAL		(SLJIT_D_LESS_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_UNORDERED		20
-#define SLJIT_S_UNORDERED		(SLJIT_D_UNORDERED | SLJIT_SINGLE_OP)
-#define SLJIT_D_ORDERED			21
-#define SLJIT_S_ORDERED			(SLJIT_D_ORDERED | SLJIT_SINGLE_OP)
+#define SLJIT_EQUAL_F64			14
+#define SLJIT_EQUAL_F32			(SLJIT_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_NOT_EQUAL_F64		15
+#define SLJIT_NOT_EQUAL_F32		(SLJIT_NOT_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_LESS_F64			16
+#define SLJIT_LESS_F32			(SLJIT_LESS_F64 | SLJIT_F32_OP)
+#define SLJIT_GREATER_EQUAL_F64		17
+#define SLJIT_GREATER_EQUAL_F32		(SLJIT_GREATER_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_GREATER_F64		18
+#define SLJIT_GREATER_F32		(SLJIT_GREATER_F64 | SLJIT_F32_OP)
+#define SLJIT_LESS_EQUAL_F64		19
+#define SLJIT_LESS_EQUAL_F32		(SLJIT_LESS_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_UNORDERED_F64		20
+#define SLJIT_UNORDERED_F32		(SLJIT_UNORDERED_F64 | SLJIT_F32_OP)
+#define SLJIT_ORDERED_F64		21
+#define SLJIT_ORDERED_F32		(SLJIT_ORDERED_F64 | SLJIT_F32_OP)
 
 /* Unconditional jump types. */
 #define SLJIT_JUMP			22
@@ -1014,7 +1010,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_la
     type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
    Flags: - (never set any flags) for both conditional and unconditional jumps.
    Flags: destroy all flags for calls. */
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type);
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type);
 
 /* Basic arithmetic comparison. In most architectures it is implemented as
    an SLJIT_SUB operation (with SLJIT_UNUSED destination and setting
@@ -1024,23 +1020,23 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_ju
     type must be between SLJIT_EQUAL and SLJIT_I_SIG_LESS_EQUAL
     type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
    Flags: destroy flags. */
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w);
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w);
 
 /* Basic floating point comparison. In most architectures it is implemented as
    an SLJIT_FCMP operation (setting appropriate flags) followed by a
    sljit_emit_jump. However some architectures (i.e: MIPS) may employ
    special optimizations here. It is suggested to use this comparison form
    when appropriate.
-    type must be between SLJIT_D_EQUAL and SLJIT_S_ORDERED
+    type must be between SLJIT_EQUAL_F64 and SLJIT_ORDERED_F32
     type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
    Flags: destroy flags.
    Note: if either operand is NaN, the behaviour is undefined for
          types up to SLJIT_S_LESS_EQUAL. */
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w);
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w);
 
 /* Set the destination of the jump to this label. */
 SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sljit_label* label);
@@ -1053,14 +1049,14 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_
     Indirect form: any other valid addressing mode
    Flags: - (never set any flags) for unconditional jumps.
    Flags: destroy all flags for calls. */
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw);
 
 /* Perform the operation using the conditional flags as the second argument.
    Type must always be between SLJIT_EQUAL and SLJIT_S_ORDERED. The value
    represented by the type is 1, if the condition represented by the type
    is fulfilled, and 0 otherwise.
 
-   If op == SLJIT_MOV, SLJIT_MOV_SI, SLJIT_MOV_UI:
+   If op == SLJIT_MOV, SLJIT_MOV_S32, SLJIT_MOV_U32:
      Set dst to the value represented by the type (0 or 1).
      Src must be SLJIT_UNUSED, and srcw must be 0
      Flags: - (never set any flags)
@@ -1070,18 +1066,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
      Important note: only dst=src and dstw=srcw is supported at the moment!
      Flags: I | E | K
    Note: sljit_emit_op_flags does nothing, if dst is SLJIT_UNUSED (regardless of op). */
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src, sljit_sw srcw,
-	sljit_si type);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src, sljit_sw srcw,
+	sljit_s32 type);
 
 /* Copies the base address of SLJIT_SP + offset to dst.
    Flags: - (never set any flags) */
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset);
 
 /* The constant can be changed runtime (see: sljit_set_const)
    Flags: - (never set any flags) */
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value);
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value);
 
 /* After the code generation the address for label, jump and const instructions
    are computed. Since these structures are freed by sljit_free_compiler, the
@@ -1104,7 +1100,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_
 /* Get the human readable name of the platform. Can be useful on platforms
    like ARM, where ARM and Thumb2 functions can be mixed, and
    it is useful to know the type of the code generator. */
-SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void);
+SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void);
 
 /* Portable helper function to get an offset of a member. */
 #define SLJIT_OFFSETOF(base, member) ((sljit_sw)(&((base*)0x10)->member) - 0x10)
@@ -1196,14 +1192,14 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_
 
    Note: it returns with -1 for virtual registers (only on x86-32). */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg);
 
 /* The following function is a helper function for sljit_emit_op_custom.
    It returns with the real machine register index of any SLJIT_FLOAT register.
 
    Note: the index is always an even number on ARM (except ARM-64), MIPS, and SPARC. */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_float_register_index(sljit_si reg);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg);
 
 /* Any instruction can be inserted into the instruction stream by
    sljit_emit_op_custom. It has a similar purpose as inline assembly.
@@ -1215,18 +1211,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
               if size == 4, the instruction argument must be 4 byte aligned.
    Otherwise: size must be 4 and instruction argument must be 4 byte aligned. */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler,
-	void *instruction, sljit_si size);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
+	void *instruction, sljit_s32 size);
 
 #if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
 
 /* Returns with non-zero if sse2 is available. */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_x86_is_sse2_available(void);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_x86_is_sse2_available(void);
 
 /* Returns with non-zero if cmov instruction is available. */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_x86_is_cmov_available(void);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_x86_is_cmov_available(void);
 
 /* Emit a conditional mov instruction on x86 CPUs. This instruction
    moves src to destination, if the condition is satisfied. Unlike
@@ -1235,14 +1231,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
    checked by sljit_x86_is_cmov_available function.
     type must be between SLJIT_EQUAL and SLJIT_S_ORDERED
     dst_reg must be a valid register and it can be combined
-      with SLJIT_INT_OP to perform 32 bit arithmetic
+      with SLJIT_I32_OP to perform 32 bit arithmetic
    Flags: I - (never set any flags)
  */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_x86_emit_cmov(struct sljit_compiler *compiler,
-	sljit_si type,
-	sljit_si dst_reg,
-	sljit_si src, sljit_sw srcw);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_x86_emit_cmov(struct sljit_compiler *compiler,
+	sljit_s32 type,
+	sljit_s32 dst_reg,
+	sljit_s32 src, sljit_sw srcw);
 
 #endif
 

Modified: tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeARM_32.c
URL: http://svn.apache.org/viewvc/tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeARM_32.c?rev=1777023&r1=1777022&r2=1777023&view=diff
==============================================================================
--- tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeARM_32.c (original)
+++ tomcat/jk/trunk/native/iis/pcre/sljit/sljitNativeARM_32.c Mon Jan  2 21:21:08 2017
@@ -24,7 +24,7 @@
  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void)
+SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
 {
 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
 	return "ARMv7" SLJIT_CPUINFO;
@@ -52,10 +52,10 @@ SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST cha
 #define ALIGN_INSTRUCTION(ptr) \
 	(sljit_uw*)(((sljit_uw)(ptr) + (CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1) & ~((CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1))
 #define MAX_DIFFERENCE(max_diff) \
-	(((max_diff) / (sljit_si)sizeof(sljit_uw)) - (CONST_POOL_ALIGNMENT - 1))
+	(((max_diff) / (sljit_s32)sizeof(sljit_uw)) - (CONST_POOL_ALIGNMENT - 1))
 
 /* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */
-static SLJIT_CONST sljit_ub reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
+static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
 	0, 0, 1, 2, 11, 10, 9, 8, 7, 6, 5, 4, 13, 3, 12, 14, 15
 };
 
@@ -126,13 +126,13 @@ static SLJIT_CONST sljit_ub reg_map[SLJI
 
 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
 
-static sljit_si push_cpool(struct sljit_compiler *compiler)
+static sljit_s32 push_cpool(struct sljit_compiler *compiler)
 {
 	/* Pushing the constant pool into the instruction stream. */
 	sljit_uw* inst;
 	sljit_uw* cpool_ptr;
 	sljit_uw* cpool_end;
-	sljit_si i;
+	sljit_s32 i;
 
 	/* The label could point the address after the constant pool. */
 	if (compiler->last_label && compiler->last_label->size == compiler->size)
@@ -164,7 +164,7 @@ static sljit_si push_cpool(struct sljit_
 	return SLJIT_SUCCESS;
 }
 
-static sljit_si push_inst(struct sljit_compiler *compiler, sljit_uw inst)
+static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_uw inst)
 {
 	sljit_uw* ptr;
 
@@ -178,13 +178,13 @@ static sljit_si push_inst(struct sljit_c
 	return SLJIT_SUCCESS;
 }
 
-static sljit_si push_inst_with_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal)
+static sljit_s32 push_inst_with_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal)
 {
 	sljit_uw* ptr;
 	sljit_uw cpool_index = CPOOL_SIZE;
 	sljit_uw* cpool_ptr;
 	sljit_uw* cpool_end;
-	sljit_ub* cpool_unique_ptr;
+	sljit_u8* cpool_unique_ptr;
 
 	if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092)))
 		FAIL_IF(push_cpool(compiler));
@@ -228,7 +228,7 @@ static sljit_si push_inst_with_literal(s
 	return SLJIT_SUCCESS;
 }
 
-static sljit_si push_inst_with_unique_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal)
+static sljit_s32 push_inst_with_unique_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal)
 {
 	sljit_uw* ptr;
 	if (SLJIT_UNLIKELY((compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092)) || compiler->cpool_fill >= CPOOL_SIZE))
@@ -248,7 +248,7 @@ static sljit_si push_inst_with_unique_li
 	return SLJIT_SUCCESS;
 }
 
-static SLJIT_INLINE sljit_si prepare_blx(struct sljit_compiler *compiler)
+static SLJIT_INLINE sljit_s32 prepare_blx(struct sljit_compiler *compiler)
 {
 	/* Place for at least two instruction (doesn't matter whether the first has a literal). */
 	if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4088)))
@@ -256,7 +256,7 @@ static SLJIT_INLINE sljit_si prepare_blx
 	return SLJIT_SUCCESS;
 }
 
-static SLJIT_INLINE sljit_si emit_blx(struct sljit_compiler *compiler)
+static SLJIT_INLINE sljit_s32 emit_blx(struct sljit_compiler *compiler)
 {
 	/* Must follow tightly the previous instruction (to be able to convert it to bl instruction). */
 	SLJIT_ASSERT(compiler->cpool_diff == CONST_POOL_EMPTY || compiler->size - compiler->cpool_diff < MAX_DIFFERENCE(4092));
@@ -286,7 +286,7 @@ static sljit_uw patch_pc_relative_loads(
 
 			/* Must be a load instruction with immediate offset. */
 			SLJIT_ASSERT(ind < cpool_size && !(*last_pc_patch & (1 << 25)) && (*last_pc_patch & (1 << 20)));
-			if ((sljit_si)const_pool[ind] < 0) {
+			if ((sljit_s32)const_pool[ind] < 0) {
 				const_pool[ind] = counter;
 				ind = counter;
 				counter++;
@@ -311,26 +311,26 @@ static sljit_uw patch_pc_relative_loads(
 /* In some rare ocasions we may need future patches. The probability is close to 0 in practice. */
 struct future_patch {
 	struct future_patch* next;
-	sljit_si index;
-	sljit_si value;
+	sljit_s32 index;
+	sljit_s32 value;
 };
 
-static sljit_si resolve_const_pool_index(struct sljit_compiler *compiler, struct future_patch **first_patch, sljit_uw cpool_current_index, sljit_uw *cpool_start_address, sljit_uw *buf_ptr)
+static sljit_s32 resolve_const_pool_index(struct sljit_compiler *compiler, struct future_patch **first_patch, sljit_uw cpool_current_index, sljit_uw *cpool_start_address, sljit_uw *buf_ptr)
 {
-	sljit_si value;
+	sljit_s32 value;
 	struct future_patch *curr_patch, *prev_patch;
 
 	SLJIT_UNUSED_ARG(compiler);
 
 	/* Using the values generated by patch_pc_relative_loads. */
 	if (!*first_patch)
-		value = (sljit_si)cpool_start_address[cpool_current_index];
+		value = (sljit_s32)cpool_start_address[cpool_current_index];
 	else {
 		curr_patch = *first_patch;
-		prev_patch = 0;
+		prev_patch = NULL;
 		while (1) {
 			if (!curr_patch) {
-				value = (sljit_si)cpool_start_address[cpool_current_index];
+				value = (sljit_s32)cpool_start_address[cpool_current_index];
 				break;
 			}
 			if ((sljit_uw)curr_patch->index == cpool_current_index) {
@@ -370,7 +370,7 @@ static sljit_si resolve_const_pool_index
 
 #else
 
-static sljit_si push_inst(struct sljit_compiler *compiler, sljit_uw inst)
+static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_uw inst)
 {
 	sljit_uw* ptr;
 
@@ -381,7 +381,7 @@ static sljit_si push_inst(struct sljit_c
 	return SLJIT_SUCCESS;
 }
 
-static SLJIT_INLINE sljit_si emit_imm(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm)
+static SLJIT_INLINE sljit_s32 emit_imm(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm)
 {
 	FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff)));
 	return push_inst(compiler, MOVT | RD(reg) | ((imm >> 12) & 0xf0000) | ((imm >> 16) & 0xfff));
@@ -389,7 +389,7 @@ static SLJIT_INLINE sljit_si emit_imm(st
 
 #endif
 
-static SLJIT_INLINE sljit_si detect_jump_type(struct sljit_jump *jump, sljit_uw *code_ptr, sljit_uw *code)
+static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_uw *code_ptr, sljit_uw *code)
 {
 	sljit_sw diff;
 
@@ -446,13 +446,13 @@ static SLJIT_INLINE sljit_si detect_jump
 	return 0;
 }
 
-static SLJIT_INLINE void inline_set_jump_addr(sljit_uw addr, sljit_uw new_addr, sljit_si flush)
+static SLJIT_INLINE void inline_set_jump_addr(sljit_uw addr, sljit_uw new_addr, sljit_s32 flush)
 {
 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
 	sljit_uw *ptr = (sljit_uw*)addr;
 	sljit_uw *inst = (sljit_uw*)ptr[0];
 	sljit_uw mov_pc = ptr[1];
-	sljit_si bl = (mov_pc & 0x0000f000) != RD(TMP_PC);
+	sljit_s32 bl = (mov_pc & 0x0000f000) != RD(TMP_PC);
 	sljit_sw diff = (sljit_sw)(((sljit_sw)new_addr - (sljit_sw)(inst + 2)) >> 2);
 
 	if (diff <= 0x7fffff && diff >= -0x800000) {
@@ -504,7 +504,7 @@ static SLJIT_INLINE void inline_set_jump
 
 static sljit_uw get_imm(sljit_uw imm);
 
-static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw new_constant, sljit_si flush)
+static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw new_constant, sljit_s32 flush)
 {
 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
 	sljit_uw *ptr = (sljit_uw*)addr;
@@ -789,7 +789,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_gen
 	}
 #endif
 
-	SLJIT_ASSERT(code_ptr - code <= (sljit_si)size);
+	SLJIT_ASSERT(code_ptr - code <= (sljit_s32)size);
 
 	compiler->error = SLJIT_ERR_COMPILED;
 	compiler->executable_size = (code_ptr - code) * sizeof(sljit_uw);
@@ -820,16 +820,16 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_gen
 #define EMIT_DATA_PROCESS_INS(opcode, set_flags, dst, src1, src2) \
 	(0xe0000000 | ((opcode) << 21) | (set_flags) | RD(dst) | RN(src1) | (src2))
 
-static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si inp_flags,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w);
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler,
-	sljit_si options, sljit_si args, sljit_si scratches, sljit_si saveds,
-	sljit_si fscratches, sljit_si fsaveds, sljit_si local_size)
+static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 inp_flags,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w);
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
+	sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+	sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
 {
-	sljit_si size, i, tmp;
+	sljit_s32 size, i, tmp;
 	sljit_uw push;
 
 	CHECK_ERROR();
@@ -866,11 +866,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 	return SLJIT_SUCCESS;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_set_context(struct sljit_compiler *compiler,
-	sljit_si options, sljit_si args, sljit_si scratches, sljit_si saveds,
-	sljit_si fscratches, sljit_si fsaveds, sljit_si local_size)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
+	sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+	sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
 {
-	sljit_si size;
+	sljit_s32 size;
 
 	CHECK_ERROR();
 	CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
@@ -881,9 +881,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 	return SLJIT_SUCCESS;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
 {
-	sljit_si i, tmp;
+	sljit_s32 i, tmp;
 	sljit_uw pop;
 
 	CHECK_ERROR();
@@ -983,8 +983,8 @@ static sljit_sw data_transfer_insts[16]
 	} \
 	return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, (reg_map[(flags & ARGS_SWAPPED) ? src1 : src2] << 8) | (opcode << 5) | 0x10 | ((flags & ARGS_SWAPPED) ? reg_map[src2] : reg_map[src1])));
 
-static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags,
-	sljit_si dst, sljit_si src1, sljit_si src2)
+static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
+	sljit_s32 dst, sljit_s32 src1, sljit_s32 src2)
 {
 	sljit_sw mul_inst;
 
@@ -1001,17 +1001,17 @@ static SLJIT_INLINE sljit_si emit_single
 		}
 		return SLJIT_SUCCESS;
 
-	case SLJIT_MOV_UB:
-	case SLJIT_MOV_SB:
+	case SLJIT_MOV_U8:
+	case SLJIT_MOV_S8:
 		SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
 		if ((flags & (REG_DEST | REG_SOURCE)) == (REG_DEST | REG_SOURCE)) {
 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
-			if (op == SLJIT_MOV_UB)
+			if (op == SLJIT_MOV_U8)
 				return push_inst(compiler, EMIT_DATA_PROCESS_INS(AND_DP, 0, dst, src2, SRC2_IMM | 0xff));
 			FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | reg_map[src2])));
-			return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | (op == SLJIT_MOV_UB ? 0x20 : 0x40) | reg_map[dst]));
+			return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | (op == SLJIT_MOV_U8 ? 0x20 : 0x40) | reg_map[dst]));
 #else
-			return push_inst(compiler, (op == SLJIT_MOV_UB ? UXTB : SXTB) | RD(dst) | RM(src2));
+			return push_inst(compiler, (op == SLJIT_MOV_U8 ? UXTB : SXTB) | RD(dst) | RM(src2));
 #endif
 		}
 		else if (dst != src2) {
@@ -1022,15 +1022,15 @@ static SLJIT_INLINE sljit_si emit_single
 		}
 		return SLJIT_SUCCESS;
 
-	case SLJIT_MOV_UH:
-	case SLJIT_MOV_SH:
+	case SLJIT_MOV_U16:
+	case SLJIT_MOV_S16:
 		SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
 		if ((flags & (REG_DEST | REG_SOURCE)) == (REG_DEST | REG_SOURCE)) {
 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
 			FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | reg_map[src2])));
-			return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | (op == SLJIT_MOV_UH ? 0x20 : 0x40) | reg_map[dst]));
+			return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | (op == SLJIT_MOV_U16 ? 0x20 : 0x40) | reg_map[dst]));
 #else
-			return push_inst(compiler, (op == SLJIT_MOV_UH ? UXTH : SXTH) | RD(dst) | RM(src2));
+			return push_inst(compiler, (op == SLJIT_MOV_U16 ? UXTH : SXTH) | RD(dst) | RM(src2));
 #endif
 		}
 		else if (dst != src2) {
@@ -1139,7 +1139,7 @@ static SLJIT_INLINE sljit_si emit_single
    Returns with 0 if not possible. */
 static sljit_uw get_imm(sljit_uw imm)
 {
-	sljit_si rol;
+	sljit_s32 rol;
 
 	if (imm <= 0xff)
 		return SRC2_IMM | imm;
@@ -1175,12 +1175,12 @@ static sljit_uw get_imm(sljit_uw imm)
 }
 
 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
-static sljit_si generate_int(struct sljit_compiler *compiler, sljit_si reg, sljit_uw imm, sljit_si positive)
+static sljit_s32 generate_int(struct sljit_compiler *compiler, sljit_s32 reg, sljit_uw imm, sljit_s32 positive)
 {
 	sljit_uw mask;
 	sljit_uw imm1;
 	sljit_uw imm2;
-	sljit_si rol;
+	sljit_s32 rol;
 
 	/* Step1: Search a zero byte (8 continous zero bit). */
 	mask = 0xff000000;
@@ -1286,7 +1286,7 @@ static sljit_si generate_int(struct slji
 }
 #endif
 
-static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sljit_uw imm)
+static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_uw imm)
 {
 	sljit_uw tmp;
 
@@ -1317,7 +1317,7 @@ static sljit_si load_immediate(struct sl
 }
 
 /* Helper function. Dst should be reg + value, using at most 1 instruction, flags does not set. */
-static sljit_si emit_set_delta(struct sljit_compiler *compiler, sljit_si dst, sljit_si reg, sljit_sw value)
+static sljit_s32 emit_set_delta(struct sljit_compiler *compiler, sljit_s32 dst, sljit_s32 reg, sljit_sw value)
 {
 	if (value >= 0) {
 		value = get_imm(value);
@@ -1333,7 +1333,7 @@ static sljit_si emit_set_delta(struct sl
 }
 
 /* Can perform an operation using at most 1 instruction. */
-static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw)
+static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
 {
 	sljit_uw imm;
 
@@ -1408,7 +1408,7 @@ static sljit_si getput_arg_fast(struct s
 /* See getput_arg below.
    Note: can_cache is called only for binary operators. Those
    operators always uses word arguments without write back. */
-static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw)
+static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
 {
 	/* Immediate caching is not supported as it would be an operation on constant arguments. */
 	if (arg & SLJIT_IMM)
@@ -1456,9 +1456,9 @@ static sljit_si can_cache(sljit_si arg,
 	}
 
 /* Emit the necessary instructions. See can_cache above. */
-static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw)
+static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
 {
-	sljit_si tmp_r;
+	sljit_s32 tmp_r;
 	sljit_sw max_delta;
 	sljit_sw sign;
 	sljit_uw imm;
@@ -1583,7 +1583,7 @@ static sljit_si getput_arg(struct sljit_
 	return push_inst(compiler, EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & REG_MASK, reg_map[tmp_r] | (max_delta & 0xf00 ? SRC2_IMM : 0)));
 }
 
-static SLJIT_INLINE sljit_si emit_op_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw)
+static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
 {
 	if (getput_arg_fast(compiler, flags, reg, arg, argw))
 		return compiler->error;
@@ -1592,17 +1592,17 @@ static SLJIT_INLINE sljit_si emit_op_mem
 	return getput_arg(compiler, flags, reg, arg, argw, 0, 0);
 }
 
-static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w)
+static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
 {
 	if (getput_arg_fast(compiler, flags, reg, arg1, arg1w))
 		return compiler->error;
 	return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w);
 }
 
-static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si inp_flags,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w)
+static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 inp_flags,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w)
 {
 	/* arg1 goes to TMP_REG1 or src reg
 	   arg2 goes to TMP_REG2, imm or src reg
@@ -1610,25 +1610,25 @@ static sljit_si emit_op(struct sljit_com
 	   result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */
 
 	/* We prefers register and simple consts. */
-	sljit_si dst_r;
-	sljit_si src1_r;
-	sljit_si src2_r = 0;
-	sljit_si sugg_src2_r = TMP_REG2;
-	sljit_si flags = GET_FLAGS(op) ? SET_FLAGS : 0;
+	sljit_s32 dst_r;
+	sljit_s32 src1_r;
+	sljit_s32 src2_r = 0;
+	sljit_s32 sugg_src2_r = TMP_REG2;
+	sljit_s32 flags = GET_FLAGS(op) ? SET_FLAGS : 0;
 
 	compiler->cache_arg = 0;
 	compiler->cache_argw = 0;
 
 	/* Destination check. */
 	if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) {
-		if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI && !(src2 & SLJIT_MEM))
+		if (op >= SLJIT_MOV && op <= SLJIT_MOVU_S32 && !(src2 & SLJIT_MEM))
 			return SLJIT_SUCCESS;
 		dst_r = TMP_REG2;
 	}
 	else if (FAST_IS_REG(dst)) {
 		dst_r = dst;
 		flags |= REG_DEST;
-		if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI)
+		if (op >= SLJIT_MOV && op <= SLJIT_MOVU_S32)
 			sugg_src2_r = dst_r;
 	}
 	else {
@@ -1695,7 +1695,7 @@ static sljit_si emit_op(struct sljit_com
 		if (FAST_IS_REG(src2)) {
 			src2_r = src2;
 			flags |= REG_SOURCE;
-			if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_SI)
+			if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_S32)
 				dst_r = src2_r;
 		}
 		else do { /* do { } while(0) is used because of breaks. */
@@ -1804,7 +1804,7 @@ extern int __aeabi_idivmod(int numerator
 }
 #endif
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
 {
 	CHECK_ERROR();
 	CHECK(check_sljit_emit_op0(compiler, op));
@@ -1817,58 +1817,58 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 	case SLJIT_NOP:
 		FAIL_IF(push_inst(compiler, NOP));
 		break;
-	case SLJIT_LUMUL:
-	case SLJIT_LSMUL:
+	case SLJIT_LMUL_UW:
+	case SLJIT_LMUL_SW:
 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
-		return push_inst(compiler, (op == SLJIT_LUMUL ? UMULL : SMULL)
+		return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
 			| (reg_map[SLJIT_R1] << 16)
 			| (reg_map[SLJIT_R0] << 12)
 			| (reg_map[SLJIT_R0] << 8)
 			| reg_map[SLJIT_R1]);
 #else
 		FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG1, SLJIT_UNUSED, RM(SLJIT_R1))));
-		return push_inst(compiler, (op == SLJIT_LUMUL ? UMULL : SMULL)
+		return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
 			| (reg_map[SLJIT_R1] << 16)
 			| (reg_map[SLJIT_R0] << 12)
 			| (reg_map[SLJIT_R0] << 8)
 			| reg_map[TMP_REG1]);
 #endif
-	case SLJIT_UDIVMOD:
-	case SLJIT_SDIVMOD:
-	case SLJIT_UDIVI:
-	case SLJIT_SDIVI:
-		SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+	case SLJIT_DIVMOD_UW:
+	case SLJIT_DIVMOD_SW:
+	case SLJIT_DIV_UW:
+	case SLJIT_DIV_SW:
+		SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
 		SLJIT_COMPILE_ASSERT(reg_map[2] == 1 && reg_map[3] == 2, bad_register_mapping);
 
-		if ((op >= SLJIT_UDIVI) && (compiler->scratches >= 3)) {
+		if ((op >= SLJIT_DIV_UW) && (compiler->scratches >= 3)) {
 			FAIL_IF(push_inst(compiler, 0xe52d2008 /* str r2, [sp, #-8]! */));
 			FAIL_IF(push_inst(compiler, 0xe58d1004 /* str r1, [sp, #4] */));
 		}
-		else if ((op >= SLJIT_UDIVI) || (compiler->scratches >= 3))
-			FAIL_IF(push_inst(compiler, 0xe52d0008 | (op >= SLJIT_UDIVI ? 0x1000 : 0x2000) /* str r1/r2, [sp, #-8]! */));
+		else if ((op >= SLJIT_DIV_UW) || (compiler->scratches >= 3))
+			FAIL_IF(push_inst(compiler, 0xe52d0008 | (op >= SLJIT_DIV_UW ? 0x1000 : 0x2000) /* str r1/r2, [sp, #-8]! */));
 
 #if defined(__GNUC__)
 		FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM,
-			((op | 0x2) == SLJIT_UDIVI ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
+			((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
 #else
 #error "Software divmod functions are needed"
 #endif
 
-		if ((op >= SLJIT_UDIVI) && (compiler->scratches >= 3)) {
+		if ((op >= SLJIT_DIV_UW) && (compiler->scratches >= 3)) {
 			FAIL_IF(push_inst(compiler, 0xe59d1004 /* ldr r1, [sp, #4] */));
 			FAIL_IF(push_inst(compiler, 0xe49d2008 /* ldr r2, [sp], #8 */));
 		}
-		else if ((op >= SLJIT_UDIVI) || (compiler->scratches >= 3))
-			return push_inst(compiler, 0xe49d0008 | (op >= SLJIT_UDIVI ? 0x1000 : 0x2000) /* ldr r1/r2, [sp], #8 */);
+		else if ((op >= SLJIT_DIV_UW) || (compiler->scratches >= 3))
+			return push_inst(compiler, 0xe49d0008 | (op >= SLJIT_DIV_UW ? 0x1000 : 0x2000) /* ldr r1/r2, [sp], #8 */);
 		return SLJIT_SUCCESS;
 	}
 
 	return SLJIT_SUCCESS;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src, sljit_sw srcw)
 {
 	CHECK_ERROR();
 	CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
@@ -1877,40 +1877,40 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 
 	switch (GET_OPCODE(op)) {
 	case SLJIT_MOV:
-	case SLJIT_MOV_UI:
-	case SLJIT_MOV_SI:
+	case SLJIT_MOV_U32:
+	case SLJIT_MOV_S32:
 	case SLJIT_MOV_P:
 		return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw);
 
-	case SLJIT_MOV_UB:
-		return emit_op(compiler, SLJIT_MOV_UB, ALLOW_ANY_IMM | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw);
+	case SLJIT_MOV_U8:
+		return emit_op(compiler, SLJIT_MOV_U8, ALLOW_ANY_IMM | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
 
-	case SLJIT_MOV_SB:
-		return emit_op(compiler, SLJIT_MOV_SB, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw);
+	case SLJIT_MOV_S8:
+		return emit_op(compiler, SLJIT_MOV_S8, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
 
-	case SLJIT_MOV_UH:
-		return emit_op(compiler, SLJIT_MOV_UH, ALLOW_ANY_IMM | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw);
+	case SLJIT_MOV_U16:
+		return emit_op(compiler, SLJIT_MOV_U16, ALLOW_ANY_IMM | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
 
-	case SLJIT_MOV_SH:
-		return emit_op(compiler, SLJIT_MOV_SH, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw);
+	case SLJIT_MOV_S16:
+		return emit_op(compiler, SLJIT_MOV_S16, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
 
 	case SLJIT_MOVU:
-	case SLJIT_MOVU_UI:
-	case SLJIT_MOVU_SI:
+	case SLJIT_MOVU_U32:
+	case SLJIT_MOVU_S32:
 	case SLJIT_MOVU_P:
 		return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
 
-	case SLJIT_MOVU_UB:
-		return emit_op(compiler, SLJIT_MOV_UB, ALLOW_ANY_IMM | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw);
+	case SLJIT_MOVU_U8:
+		return emit_op(compiler, SLJIT_MOV_U8, ALLOW_ANY_IMM | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
 
-	case SLJIT_MOVU_SB:
-		return emit_op(compiler, SLJIT_MOV_SB, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw);
+	case SLJIT_MOVU_S8:
+		return emit_op(compiler, SLJIT_MOV_S8, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
 
-	case SLJIT_MOVU_UH:
-		return emit_op(compiler, SLJIT_MOV_UH, ALLOW_ANY_IMM | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw);
+	case SLJIT_MOVU_U16:
+		return emit_op(compiler, SLJIT_MOV_U16, ALLOW_ANY_IMM | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
 
-	case SLJIT_MOVU_SH:
-		return emit_op(compiler, SLJIT_MOV_SH, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw);
+	case SLJIT_MOVU_S16:
+		return emit_op(compiler, SLJIT_MOV_S16, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
 
 	case SLJIT_NOT:
 		return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw);
@@ -1929,10 +1929,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 	return SLJIT_SUCCESS;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w)
 {
 	CHECK_ERROR();
 	CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
@@ -1971,20 +1971,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 	return SLJIT_SUCCESS;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
 {
 	CHECK_REG_INDEX(check_sljit_get_register_index(reg));
 	return reg_map[reg];
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_float_register_index(sljit_si reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
 {
 	CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
 	return reg << 1;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler,
-	void *instruction, sljit_si size)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
+	void *instruction, sljit_s32 size)
 {
 	CHECK_ERROR();
 	CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
@@ -2000,7 +2000,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 
 /* 0 - no fpu
    1 - vfp */
-static sljit_si arm_fpu_type = -1;
+static sljit_s32 arm_fpu_type = -1;
 
 static void init_compiler(void)
 {
@@ -2011,7 +2011,7 @@ static void init_compiler(void)
 	arm_fpu_type = 1;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void)
 {
 #ifdef SLJIT_IS_FPU_AVAILABLE
 	return SLJIT_IS_FPU_AVAILABLE;
@@ -2026,7 +2026,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 
 #define arm_fpu_type 1
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void)
 {
 	/* Always available. */
 	return 1;
@@ -2040,11 +2040,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 #define EMIT_FPU_OPERATION(opcode, mode, dst, src1, src2) \
 	((opcode) | (mode) | ((dst) << 12) | (src1) | ((src2) << 16))
 
-static sljit_si emit_fop_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw)
+static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
 {
 	sljit_sw tmp;
 	sljit_uw imm;
-	sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD));
+	sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD));
 	SLJIT_ASSERT(arg & SLJIT_MEM);
 
 	if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
@@ -2104,16 +2104,16 @@ static sljit_si emit_fop_mem(struct slji
 	return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, 0));
 }
 
-static SLJIT_INLINE sljit_si sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src, sljit_sw srcw)
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src, sljit_sw srcw)
 {
 	if (src & SLJIT_MEM) {
-		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
+		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
 		src = TMP_FREG1;
 	}
 
-	FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_SINGLE_OP, TMP_FREG1, src, 0)));
+	FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_F32_OP, TMP_FREG1, src, 0)));
 
 	if (dst == SLJIT_UNUSED)
 		return SLJIT_SUCCESS;
@@ -2125,11 +2125,11 @@ static SLJIT_INLINE sljit_si sljit_emit_
 	return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
 }
 
-static SLJIT_INLINE sljit_si sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src, sljit_sw srcw)
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src, sljit_sw srcw)
 {
-	sljit_si dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+	sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
 
 	if (FAST_IS_REG(src))
 		FAIL_IF(push_inst(compiler, VMOV | RD(src) | (TMP_FREG1 << 16)));
@@ -2142,85 +2142,85 @@ static SLJIT_INLINE sljit_si sljit_emit_
 		FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (TMP_FREG1 << 16)));
 	}
 
-	FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_SINGLE_OP, dst_r, TMP_FREG1, 0)));
+	FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_F32_OP, dst_r, TMP_FREG1, 0)));
 
 	if (dst & SLJIT_MEM)
-		return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw);
+		return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
 	return SLJIT_SUCCESS;
 }
 
-static SLJIT_INLINE sljit_si sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w)
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w)
 {
 	if (src1 & SLJIT_MEM) {
-		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
+		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
 		src1 = TMP_FREG1;
 	}
 
 	if (src2 & SLJIT_MEM) {
-		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
+		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
 		src2 = TMP_FREG2;
 	}
 
-	FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_SINGLE_OP, src1, src2, 0)));
+	FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_F32_OP, src1, src2, 0)));
 	return push_inst(compiler, VMRS);
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src, sljit_sw srcw)
 {
-	sljit_si dst_r;
+	sljit_s32 dst_r;
 
 	CHECK_ERROR();
 	compiler->cache_arg = 0;
 	compiler->cache_argw = 0;
-	if (GET_OPCODE(op) != SLJIT_CONVD_FROMS)
-		op ^= SLJIT_SINGLE_OP;
+	if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32)
+		op ^= SLJIT_F32_OP;
 
-	SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error);
+	SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error);
 	SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
 
 	dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
 
 	if (src & SLJIT_MEM) {
-		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_r, src, srcw));
+		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw));
 		src = dst_r;
 	}
 
 	switch (GET_OPCODE(op)) {
-	case SLJIT_DMOV:
+	case SLJIT_MOV_F64:
 		if (src != dst_r) {
 			if (dst_r != TMP_FREG1)
-				FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+				FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
 			else
 				dst_r = src;
 		}
 		break;
-	case SLJIT_DNEG:
-		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+	case SLJIT_NEG_F64:
+		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
 		break;
-	case SLJIT_DABS:
-		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+	case SLJIT_ABS_F64:
+		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
 		break;
-	case SLJIT_CONVD_FROMS:
-		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
-		op ^= SLJIT_SINGLE_OP;
+	case SLJIT_CONV_F64_FROM_F32:
+		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
+		op ^= SLJIT_F32_OP;
 		break;
 	}
 
 	if (dst & SLJIT_MEM)
-		return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), dst_r, dst, dstw);
+		return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw);
 	return SLJIT_SUCCESS;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src1, sljit_sw src1w,
-	sljit_si src2, sljit_sw src2w)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src1, sljit_sw src1w,
+	sljit_s32 src2, sljit_sw src2w)
 {
-	sljit_si dst_r;
+	sljit_s32 dst_r;
 
 	CHECK_ERROR();
 	CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
@@ -2230,40 +2230,40 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 
 	compiler->cache_arg = 0;
 	compiler->cache_argw = 0;
-	op ^= SLJIT_SINGLE_OP;
+	op ^= SLJIT_F32_OP;
 
 	dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
 
 	if (src2 & SLJIT_MEM) {
-		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
+		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
 		src2 = TMP_FREG2;
 	}
 
 	if (src1 & SLJIT_MEM) {
-		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
+		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
 		src1 = TMP_FREG1;
 	}
 
 	switch (GET_OPCODE(op)) {
-	case SLJIT_DADD:
-		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+	case SLJIT_ADD_F64:
+		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
 		break;
 
-	case SLJIT_DSUB:
-		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+	case SLJIT_SUB_F64:
+		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
 		break;
 
-	case SLJIT_DMUL:
-		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+	case SLJIT_MUL_F64:
+		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
 		break;
 
-	case SLJIT_DDIV:
-		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+	case SLJIT_DIV_F64:
+		FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
 		break;
 	}
 
 	if (dst_r == TMP_FREG1)
-		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw));
+		FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw));
 
 	return SLJIT_SUCCESS;
 }
@@ -2276,7 +2276,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 /*  Other instructions                                                   */
 /* --------------------------------------------------------------------- */
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
 {
 	CHECK_ERROR();
 	CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
@@ -2299,7 +2299,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 	return getput_arg(compiler, WORD_DATA, TMP_REG2, dst, dstw, 0, 0);
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw)
 {
 	CHECK_ERROR();
 	CHECK(check_sljit_emit_fast_return(compiler, src, srcw));
@@ -2326,33 +2326,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 /*  Conditional instructions                                             */
 /* --------------------------------------------------------------------- */
 
-static sljit_uw get_cc(sljit_si type)
+static sljit_uw get_cc(sljit_s32 type)
 {
 	switch (type) {
 	case SLJIT_EQUAL:
 	case SLJIT_MUL_NOT_OVERFLOW:
-	case SLJIT_D_EQUAL:
+	case SLJIT_EQUAL_F64:
 		return 0x00000000;
 
 	case SLJIT_NOT_EQUAL:
 	case SLJIT_MUL_OVERFLOW:
-	case SLJIT_D_NOT_EQUAL:
+	case SLJIT_NOT_EQUAL_F64:
 		return 0x10000000;
 
 	case SLJIT_LESS:
-	case SLJIT_D_LESS:
+	case SLJIT_LESS_F64:
 		return 0x30000000;
 
 	case SLJIT_GREATER_EQUAL:
-	case SLJIT_D_GREATER_EQUAL:
+	case SLJIT_GREATER_EQUAL_F64:
 		return 0x20000000;
 
 	case SLJIT_GREATER:
-	case SLJIT_D_GREATER:
+	case SLJIT_GREATER_F64:
 		return 0x80000000;
 
 	case SLJIT_LESS_EQUAL:
-	case SLJIT_D_LESS_EQUAL:
+	case SLJIT_LESS_EQUAL_F64:
 		return 0x90000000;
 
 	case SLJIT_SIG_LESS:
@@ -2368,11 +2368,11 @@ static sljit_uw get_cc(sljit_si type)
 		return 0xd0000000;
 
 	case SLJIT_OVERFLOW:
-	case SLJIT_D_UNORDERED:
+	case SLJIT_UNORDERED_F64:
 		return 0x60000000;
 
 	case SLJIT_NOT_OVERFLOW:
-	case SLJIT_D_ORDERED:
+	case SLJIT_ORDERED_F64:
 		return 0x70000000;
 
 	default:
@@ -2397,7 +2397,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_la
 	return label;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type)
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
 {
 	struct sljit_jump *jump;
 
@@ -2438,7 +2438,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_ju
 	return jump;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
 {
 	struct sljit_jump *jump;
 
@@ -2475,12 +2475,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 	return SLJIT_SUCCESS;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op,
-	sljit_si dst, sljit_sw dstw,
-	sljit_si src, sljit_sw srcw,
-	sljit_si type)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
+	sljit_s32 dst, sljit_sw dstw,
+	sljit_s32 src, sljit_sw srcw,
+	sljit_s32 type)
 {
-	sljit_si dst_r, flags = GET_ALL_FLAGS(op);
+	sljit_s32 dst_r, flags = GET_ALL_FLAGS(op);
 	sljit_uw cc, ins;
 
 	CHECK_ERROR();
@@ -2528,10 +2528,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_
 	return (flags & SLJIT_SET_E) ? push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, SET_FLAGS, TMP_REG1, SLJIT_UNUSED, RM(dst_r))) : SLJIT_SUCCESS;
 }
 
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value)
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
 {
 	struct sljit_const *const_;
-	sljit_si reg;
+	sljit_s32 reg;
 
 	CHECK_ERROR_PTR();
 	CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));



---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscribe@tomcat.apache.org
For additional commands, e-mail: dev-help@tomcat.apache.org