You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2015/07/23 13:14:00 UTC

[03/43] trafficserver git commit: TS-3783 TS-3030 Add luajit v2.0.4 as a subtree

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/vm_x86.dasc
----------------------------------------------------------------------
diff --git a/lib/luajit/src/vm_x86.dasc b/lib/luajit/src/vm_x86.dasc
new file mode 100644
index 0000000..6cdb8cb
--- /dev/null
+++ b/lib/luajit/src/vm_x86.dasc
@@ -0,0 +1,6377 @@
+|// Low-level VM code for x86 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+|
+|.if P64
+|.arch x64
+|.else
+|.arch x86
+|.endif
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|//-----------------------------------------------------------------------
+|
+|.if P64
+|.define X64, 1
+|.define SSE, 1
+|.if WIN
+|.define X64WIN, 1
+|.endif
+|.endif
+|
+|// Fixed register assignments for the interpreter.
+|// This is very fragile and has many dependencies. Caveat emptor.
+|.define BASE,		edx		// Not C callee-save, refetched anyway.
+|.if not X64
+|.define KBASE,		edi		// Must be C callee-save.
+|.define KBASEa,	KBASE
+|.define PC,		esi		// Must be C callee-save.
+|.define PCa,		PC
+|.define DISPATCH,	ebx		// Must be C callee-save.
+|.elif X64WIN
+|.define KBASE,		edi		// Must be C callee-save.
+|.define KBASEa,	rdi
+|.define PC,		esi		// Must be C callee-save.
+|.define PCa,		rsi
+|.define DISPATCH,	ebx		// Must be C callee-save.
+|.else
+|.define KBASE,		r15d		// Must be C callee-save.
+|.define KBASEa,	r15
+|.define PC,		ebx		// Must be C callee-save.
+|.define PCa,		rbx
+|.define DISPATCH,	r14d		// Must be C callee-save.
+|.endif
+|
+|.define RA,		ecx
+|.define RAH,		ch
+|.define RAL,		cl
+|.define RB,		ebp		// Must be ebp (C callee-save).
+|.define RC,		eax		// Must be eax.
+|.define RCW,		ax
+|.define RCH,		ah
+|.define RCL,		al
+|.define OP,		RB
+|.define RD,		RC
+|.define RDW,		RCW
+|.define RDL,		RCL
+|.if X64
+|.define RAa, rcx
+|.define RBa, rbp
+|.define RCa, rax
+|.define RDa, rax
+|.else
+|.define RAa, RA
+|.define RBa, RB
+|.define RCa, RC
+|.define RDa, RD
+|.endif
+|
+|.if not X64
+|.define FCARG1,	ecx		// x86 fastcall arguments.
+|.define FCARG2,	edx
+|.elif X64WIN
+|.define CARG1,		rcx		// x64/WIN64 C call arguments.
+|.define CARG2,		rdx
+|.define CARG3,		r8
+|.define CARG4,		r9
+|.define CARG1d,	ecx
+|.define CARG2d,	edx
+|.define CARG3d,	r8d
+|.define CARG4d,	r9d
+|.define FCARG1,	CARG1d		// Upwards compatible to x86 fastcall.
+|.define FCARG2,	CARG2d
+|.else
+|.define CARG1,		rdi		// x64/POSIX C call arguments.
+|.define CARG2,		rsi
+|.define CARG3,		rdx
+|.define CARG4,		rcx
+|.define CARG5,		r8
+|.define CARG6,		r9
+|.define CARG1d,	edi
+|.define CARG2d,	esi
+|.define CARG3d,	edx
+|.define CARG4d,	ecx
+|.define CARG5d,	r8d
+|.define CARG6d,	r9d
+|.define FCARG1,	CARG1d		// Simulate x86 fastcall.
+|.define FCARG2,	CARG2d
+|.endif
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L,		lua_State
+|.type GL,		global_State
+|.type TVALUE,		TValue
+|.type GCOBJ,		GCobj
+|.type STR,		GCstr
+|.type TAB,		GCtab
+|.type LFUNC,		GCfuncL
+|.type CFUNC,		GCfuncC
+|.type PROTO,		GCproto
+|.type UPVAL,		GCupval
+|.type NODE,		Node
+|.type NARGS,		int
+|.type TRACE,		GCtrace
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|//-----------------------------------------------------------------------
+|.if not X64		// x86 stack layout.
+|
+|.define CFRAME_SPACE,	aword*7			// Delta for esp (see <--).
+|.macro saveregs_
+|  push edi; push esi; push ebx
+|  sub esp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+|  push ebp; saveregs_
+|.endmacro
+|.macro restoreregs
+|  add esp, CFRAME_SPACE
+|  pop ebx; pop esi; pop edi; pop ebp
+|.endmacro
+|
+|.define SAVE_ERRF,	aword [esp+aword*15]	// vm_pcall/vm_cpcall only.
+|.define SAVE_NRES,	aword [esp+aword*14]
+|.define SAVE_CFRAME,	aword [esp+aword*13]
+|.define SAVE_L,	aword [esp+aword*12]
+|//----- 16 byte aligned, ^^^ arguments from C caller
+|.define SAVE_RET,	aword [esp+aword*11]	//<-- esp entering interpreter.
+|.define SAVE_R4,	aword [esp+aword*10]
+|.define SAVE_R3,	aword [esp+aword*9]
+|.define SAVE_R2,	aword [esp+aword*8]
+|//----- 16 byte aligned
+|.define SAVE_R1,	aword [esp+aword*7]	//<-- esp after register saves.
+|.define SAVE_PC,	aword [esp+aword*6]
+|.define TMP2,		aword [esp+aword*5]
+|.define TMP1,		aword [esp+aword*4]
+|//----- 16 byte aligned
+|.define ARG4,		aword [esp+aword*3]
+|.define ARG3,		aword [esp+aword*2]
+|.define ARG2,		aword [esp+aword*1]
+|.define ARG1,		aword [esp]		//<-- esp while in interpreter.
+|//----- 16 byte aligned, ^^^ arguments for C callee
+|
+|// FPARGx overlaps ARGx and ARG(x+1) on x86.
+|.define FPARG3,	qword [esp+qword*1]
+|.define FPARG1,	qword [esp]
+|// TMPQ overlaps TMP1/TMP2. ARG5/MULTRES overlap TMP1/TMP2 (and TMPQ).
+|.define TMPQ,		qword [esp+aword*4]
+|.define TMP3,		ARG4
+|.define ARG5,		TMP1
+|.define TMPa,		TMP1
+|.define MULTRES,	TMP2
+|
+|// Arguments for vm_call and vm_pcall.
+|.define INARG_BASE,	SAVE_CFRAME		// Overwritten by SAVE_CFRAME!
+|
+|// Arguments for vm_cpcall.
+|.define INARG_CP_CALL,	SAVE_ERRF
+|.define INARG_CP_UD,	SAVE_NRES
+|.define INARG_CP_FUNC,	SAVE_CFRAME
+|
+|//-----------------------------------------------------------------------
+|.elif X64WIN		// x64/Windows stack layout
+|
+|.define CFRAME_SPACE,	aword*5			// Delta for rsp (see <--).
+|.macro saveregs_
+|  push rdi; push rsi; push rbx
+|  sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+|  push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+|  add rsp, CFRAME_SPACE
+|  pop rbx; pop rsi; pop rdi; pop rbp
+|.endmacro
+|
+|.define SAVE_CFRAME,	aword [rsp+aword*13]
+|.define SAVE_PC,	dword [rsp+dword*25]
+|.define SAVE_L,	dword [rsp+dword*24]
+|.define SAVE_ERRF,	dword [rsp+dword*23]
+|.define SAVE_NRES,	dword [rsp+dword*22]
+|.define TMP2,		dword [rsp+dword*21]
+|.define TMP1,		dword [rsp+dword*20]
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter
+|.define SAVE_RET,	aword [rsp+aword*9]	//<-- rsp entering interpreter.
+|.define SAVE_R4,	aword [rsp+aword*8]
+|.define SAVE_R3,	aword [rsp+aword*7]
+|.define SAVE_R2,	aword [rsp+aword*6]
+|.define SAVE_R1,	aword [rsp+aword*5]	//<-- rsp after register saves.
+|.define ARG5,		aword [rsp+aword*4]
+|.define CSAVE_4,	aword [rsp+aword*3]
+|.define CSAVE_3,	aword [rsp+aword*2]
+|.define CSAVE_2,	aword [rsp+aword*1]
+|.define CSAVE_1,	aword [rsp]		//<-- rsp while in interpreter.
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee
+|
+|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ).
+|.define TMPQ,		qword [rsp+aword*10]
+|.define MULTRES,	TMP2
+|.define TMPa,		ARG5
+|.define ARG5d,		dword [rsp+aword*4]
+|.define TMP3,		ARG5d
+|
+|//-----------------------------------------------------------------------
+|.else			// x64/POSIX stack layout
+|
+|.define CFRAME_SPACE,	aword*5			// Delta for rsp (see <--).
+|.macro saveregs_
+|  push rbx; push r15; push r14
+|  sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+|  push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+|  add rsp, CFRAME_SPACE
+|  pop r14; pop r15; pop rbx; pop rbp
+|.endmacro
+|
+|//----- 16 byte aligned,
+|.define SAVE_RET,	aword [rsp+aword*9]	//<-- rsp entering interpreter.
+|.define SAVE_R4,	aword [rsp+aword*8]
+|.define SAVE_R3,	aword [rsp+aword*7]
+|.define SAVE_R2,	aword [rsp+aword*6]
+|.define SAVE_R1,	aword [rsp+aword*5]	//<-- rsp after register saves.
+|.define SAVE_CFRAME,	aword [rsp+aword*4]
+|.define SAVE_PC,	dword [rsp+dword*7]
+|.define SAVE_L,	dword [rsp+dword*6]
+|.define SAVE_ERRF,	dword [rsp+dword*5]
+|.define SAVE_NRES,	dword [rsp+dword*4]
+|.define TMPa,		aword [rsp+aword*1]
+|.define TMP2,		dword [rsp+dword*1]
+|.define TMP1,		dword [rsp]		//<-- rsp while in interpreter.
+|//----- 16 byte aligned
+|
+|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ).
+|.define TMPQ,		qword [rsp]
+|.define TMP3,		dword [rsp+aword*1]
+|.define MULTRES,	TMP2
+|
+|.endif
+|
+|//-----------------------------------------------------------------------
+|
+|// Instruction headers.
+|.macro ins_A; .endmacro
+|.macro ins_AD; .endmacro
+|.macro ins_AJ; .endmacro
+|.macro ins_ABC; movzx RB, RCH; movzx RC, RCL; .endmacro
+|.macro ins_AB_; movzx RB, RCH; .endmacro
+|.macro ins_A_C; movzx RC, RCL; .endmacro
+|.macro ins_AND; not RDa; .endmacro
+|
+|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster).
+|.macro ins_NEXT
+|  mov RC, [PC]
+|  movzx RA, RCH
+|  movzx OP, RCL
+|  add PC, 4
+|  shr RC, 16
+|.if X64
+|  jmp aword [DISPATCH+OP*8]
+|.else
+|  jmp aword [DISPATCH+OP*4]
+|.endif
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+|  // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+|  .define ins_next, ins_NEXT
+|  .define ins_next_, ins_NEXT
+|.else
+|  // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+|  // Affects only certain kinds of benchmarks (and only with -j off).
+|  // Around 10%-30% slower on Core2, a lot more slower on P4.
+|  .macro ins_next
+|    jmp ->ins_next
+|  .endmacro
+|  .macro ins_next_
+|  ->ins_next:
+|    ins_NEXT
+|  .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+|  // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-4] = PC
+|  mov PC, LFUNC:RB->pc
+|  mov RA, [PC]
+|  movzx OP, RAL
+|  movzx RA, RAH
+|  add PC, 4
+|.if X64
+|  jmp aword [DISPATCH+OP*8]
+|.else
+|  jmp aword [DISPATCH+OP*4]
+|.endif
+|.endmacro
+|
+|.macro ins_call
+|  // BASE = new base, RB = LFUNC, RD = nargs+1
+|  mov [BASE-4], PC
+|  ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checktp, reg, tp;  cmp dword [BASE+reg*8+4], tp; .endmacro
+|.macro checknum, reg, target; checktp reg, LJ_TISNUM; jae target; .endmacro
+|.macro checkint, reg, target; checktp reg, LJ_TISNUM; jne target; .endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR; jne target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB; jne target; .endmacro
+|
+|// These operands must be used with movzx.
+|.define PC_OP, byte [PC-4]
+|.define PC_RA, byte [PC-3]
+|.define PC_RB, byte [PC-1]
+|.define PC_RC, byte [PC-2]
+|.define PC_RD, word [PC-2]
+|
+|.macro branchPC, reg
+|  lea PC, [PC+reg*4-BCBIAS_J*4]
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field)	(GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field)	(GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field)  ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|// Decrement hashed hotcount and trigger trace recorder if zero.
+|.macro hotloop, reg
+|  mov reg, PC
+|  shr reg, 1
+|  and reg, HOTCOUNT_PCMASK
+|  sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP
+|  jb ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall, reg
+|  mov reg, PC
+|  shr reg, 1
+|  and reg, HOTCOUNT_PCMASK
+|  sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL
+|  jb ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro set_vmstate, st
+|  mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st
+|.endmacro
+|
+|// x87 compares.
+|.macro fcomparepp			// Compare and pop st0 >< st1.
+|  fucomip st1
+|  fpop
+|.endmacro
+|
+|.macro fdup; fld st0; .endmacro
+|.macro fpop1; fstp st1; .endmacro
+|
+|// Synthesize SSE FP constants.
+|.macro sseconst_abs, reg, tmp		// Synthesize abs mask.
+|.if X64
+|  mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp
+|.else
+|  pxor reg, reg; pcmpeqd reg, reg; psrlq reg, 1
+|.endif
+|.endmacro
+|
+|.macro sseconst_hi, reg, tmp, val	// Synthesize hi-32 bit const.
+|.if X64
+|  mov64 tmp, U64x(val,00000000); movd reg, tmp
+|.else
+|  mov tmp, 0x .. val; movd reg, tmp; pshufd reg, reg, 0x51
+|.endif
+|.endmacro
+|
+|.macro sseconst_sign, reg, tmp		// Synthesize sign mask.
+|  sseconst_hi reg, tmp, 80000000
+|.endmacro
+|.macro sseconst_1, reg, tmp		// Synthesize 1.0.
+|  sseconst_hi reg, tmp, 3ff00000
+|.endmacro
+|.macro sseconst_m1, reg, tmp		// Synthesize -1.0.
+|  sseconst_hi reg, tmp, bff00000
+|.endmacro
+|.macro sseconst_2p52, reg, tmp		// Synthesize 2^52.
+|  sseconst_hi reg, tmp, 43300000
+|.endmacro
+|.macro sseconst_tobit, reg, tmp	// Synthesize 2^52 + 2^51.
+|  sseconst_hi reg, tmp, 43380000
+|.endmacro
+|
+|// Move table write barrier back. Overwrites reg.
+|.macro barrierback, tab, reg
+|  and byte tab->marked, (uint8_t)~LJ_GC_BLACK	// black2gray(tab)
+|  mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)]
+|  mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab
+|  mov tab->gclist, reg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+  |.code_sub
+  |
+  |//-----------------------------------------------------------------------
+  |//-- Return handling ----------------------------------------------------
+  |//-----------------------------------------------------------------------
+  |
+  |->vm_returnp:
+  |  test PC, FRAME_P
+  |  jz ->cont_dispatch
+  |
+  |  // Return from pcall or xpcall fast func.
+  |  and PC, -8
+  |  sub BASE, PC			// Restore caller base.
+  |  lea RAa, [RA+PC-8]			// Rebase RA and prepend one result.
+  |  mov PC, [BASE-4]			// Fetch PC of previous frame.
+  |  // Prepending may overwrite the pcall frame, so do it at the end.
+  |  mov dword [BASE+RA+4], LJ_TTRUE	// Prepend true to results.
+  |
+  |->vm_returnc:
+  |  add RD, 1				// RD = nresults+1
+  |  jz ->vm_unwind_yield
+  |  mov MULTRES, RD
+  |  test PC, FRAME_TYPE
+  |  jz ->BC_RET_Z			// Handle regular return to Lua.
+  |
+  |->vm_return:
+  |  // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return
+  |  xor PC, FRAME_C
+  |  test PC, FRAME_TYPE
+  |  jnz ->vm_returnp
+  |
+  |  // Return to C.
+  |  set_vmstate C
+  |  and PC, -8
+  |  sub PC, BASE
+  |  neg PC				// Previous base = BASE - delta.
+  |
+  |  sub RD, 1
+  |  jz >2
+  |1:  // Move results down.
+  |.if X64
+  |  mov RBa, [BASE+RA]
+  |  mov [BASE-8], RBa
+  |.else
+  |  mov RB, [BASE+RA]
+  |  mov [BASE-8], RB
+  |  mov RB, [BASE+RA+4]
+  |  mov [BASE-4], RB
+  |.endif
+  |  add BASE, 8
+  |  sub RD, 1
+  |  jnz <1
+  |2:
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, PC
+  |3:
+  |  mov RD, MULTRES
+  |  mov RA, SAVE_NRES			// RA = wanted nresults+1
+  |4:
+  |  cmp RA, RD
+  |  jne >6				// More/less results wanted?
+  |5:
+  |  sub BASE, 8
+  |  mov L:RB->top, BASE
+  |
+  |->vm_leave_cp:
+  |  mov RAa, SAVE_CFRAME		// Restore previous C frame.
+  |  mov L:RB->cframe, RAa
+  |  xor eax, eax			// Ok return status for vm_pcall.
+  |
+  |->vm_leave_unw:
+  |  restoreregs
+  |  ret
+  |
+  |6:
+  |  jb >7				// Less results wanted?
+  |  // More results wanted. Check stack size and fill up results with nil.
+  |  cmp BASE, L:RB->maxstack
+  |  ja >8
+  |  mov dword [BASE-4], LJ_TNIL
+  |  add BASE, 8
+  |  add RD, 1
+  |  jmp <4
+  |
+  |7:  // Less results wanted.
+  |  test RA, RA
+  |  jz <5				// But check for LUA_MULTRET+1.
+  |  sub RA, RD				// Negative result!
+  |  lea BASE, [BASE+RA*8]		// Correct top.
+  |  jmp <5
+  |
+  |8:  // Corner case: need to grow stack for filling up results.
+  |  // This can happen if:
+  |  // - A C function grows the stack (a lot).
+  |  // - The GC shrinks the stack in between.
+  |  // - A return back from a lua_call() with (high) nresults adjustment.
+  |  mov L:RB->top, BASE		// Save current top held in BASE (yes).
+  |  mov MULTRES, RD			// Need to fill only remainder with nil.
+  |  mov FCARG2, RA
+  |  mov FCARG1, L:RB
+  |  call extern lj_state_growstack@8	// (lua_State *L, int n)
+  |  mov BASE, L:RB->top		// Need the (realloced) L->top in BASE.
+  |  jmp <3
+  |
+  |->vm_unwind_yield:
+  |  mov al, LUA_YIELD
+  |  jmp ->vm_unwind_c_eh
+  |
+  |->vm_unwind_c@8:			// Unwind C stack, return from vm_pcall.
+  |  // (void *cframe, int errcode)
+  |.if X64
+  |  mov eax, CARG2d			// Error return status for vm_pcall.
+  |  mov rsp, CARG1
+  |.else
+  |  mov eax, FCARG2			// Error return status for vm_pcall.
+  |  mov esp, FCARG1
+  |.endif
+  |->vm_unwind_c_eh:			// Landing pad for external unwinder.
+  |  mov L:RB, SAVE_L
+  |  mov GL:RB, L:RB->glref
+  |  mov dword GL:RB->vmstate, ~LJ_VMST_C
+  |  jmp ->vm_leave_unw
+  |
+  |->vm_unwind_rethrow:
+  |.if X64 and not X64WIN
+  |  mov FCARG1, SAVE_L
+  |  mov FCARG2, eax
+  |  restoreregs
+  |  jmp extern lj_err_throw@8		// (lua_State *L, int errcode)
+  |.endif
+  |
+  |->vm_unwind_ff@4:			// Unwind C stack, return from ff pcall.
+  |  // (void *cframe)
+  |.if X64
+  |  and CARG1, CFRAME_RAWMASK
+  |  mov rsp, CARG1
+  |.else
+  |  and FCARG1, CFRAME_RAWMASK
+  |  mov esp, FCARG1
+  |.endif
+  |->vm_unwind_ff_eh:			// Landing pad for external unwinder.
+  |  mov L:RB, SAVE_L
+  |  mov RAa, -8			// Results start at BASE+RA = BASE-8.
+  |  mov RD, 1+1			// Really 1+2 results, incr. later.
+  |  mov BASE, L:RB->base
+  |  mov DISPATCH, L:RB->glref		// Setup pointer to dispatch table.
+  |  add DISPATCH, GG_G2DISP
+  |  mov PC, [BASE-4]			// Fetch PC of previous frame.
+  |  mov dword [BASE-4], LJ_TFALSE	// Prepend false to error message.
+  |  set_vmstate INTERP
+  |  jmp ->vm_returnc			// Increments RD/MULTRES and returns.
+  |
+  |//-----------------------------------------------------------------------
+  |//-- Grow stack for calls -----------------------------------------------
+  |//-----------------------------------------------------------------------
+  |
+  |->vm_growstack_c:			// Grow stack for C function.
+  |  mov FCARG2, LUA_MINSTACK
+  |  jmp >2
+  |
+  |->vm_growstack_v:			// Grow stack for vararg Lua function.
+  |  sub RD, 8
+  |  jmp >1
+  |
+  |->vm_growstack_f:			// Grow stack for fixarg Lua function.
+  |  // BASE = new base, RD = nargs+1, RB = L, PC = first PC
+  |  lea RD, [BASE+NARGS:RD*8-8]
+  |1:
+  |  movzx RA, byte [PC-4+PC2PROTO(framesize)]
+  |  add PC, 4				// Must point after first instruction.
+  |  mov L:RB->base, BASE
+  |  mov L:RB->top, RD
+  |  mov SAVE_PC, PC
+  |  mov FCARG2, RA
+  |2:
+  |  // RB = L, L->base = new base, L->top = top
+  |  mov FCARG1, L:RB
+  |  call extern lj_state_growstack@8	// (lua_State *L, int n)
+  |  mov BASE, L:RB->base
+  |  mov RD, L:RB->top
+  |  mov LFUNC:RB, [BASE-8]
+  |  sub RD, BASE
+  |  shr RD, 3
+  |  add NARGS:RD, 1
+  |  // BASE = new base, RB = LFUNC, RD = nargs+1
+  |  ins_callt				// Just retry the call.
+  |
+  |//-----------------------------------------------------------------------
+  |//-- Entry points into the assembler VM ---------------------------------
+  |//-----------------------------------------------------------------------
+  |
+  |->vm_resume:				// Setup C frame and resume thread.
+  |  // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+  |  saveregs
+  |.if X64
+  |  mov L:RB, CARG1d			// Caveat: CARG1d may be RA.
+  |  mov SAVE_L, CARG1d
+  |  mov RA, CARG2d
+  |.else
+  |  mov L:RB, SAVE_L
+  |  mov RA, INARG_BASE			// Caveat: overlaps SAVE_CFRAME!
+  |.endif
+  |  mov PC, FRAME_CP
+  |  xor RD, RD
+  |  lea KBASEa, [esp+CFRAME_RESUME]
+  |  mov DISPATCH, L:RB->glref		// Setup pointer to dispatch table.
+  |  add DISPATCH, GG_G2DISP
+  |  mov L:RB->cframe, KBASEa
+  |  mov SAVE_PC, RD			// Any value outside of bytecode is ok.
+  |  mov SAVE_CFRAME, RDa
+  |.if X64
+  |  mov SAVE_NRES, RD
+  |  mov SAVE_ERRF, RD
+  |.endif
+  |  cmp byte L:RB->status, RDL
+  |  je >3				// Initial resume (like a call).
+  |
+  |  // Resume after yield (like a return).
+  |  set_vmstate INTERP
+  |  mov byte L:RB->status, RDL
+  |  mov BASE, L:RB->base
+  |  mov RD, L:RB->top
+  |  sub RD, RA
+  |  shr RD, 3
+  |  add RD, 1				// RD = nresults+1
+  |  sub RA, BASE			// RA = resultofs
+  |  mov PC, [BASE-4]
+  |  mov MULTRES, RD
+  |  test PC, FRAME_TYPE
+  |  jz ->BC_RET_Z
+  |  jmp ->vm_return
+  |
+  |->vm_pcall:				// Setup protected C frame and enter VM.
+  |  // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+  |  saveregs
+  |  mov PC, FRAME_CP
+  |.if X64
+  |  mov SAVE_ERRF, CARG4d
+  |.endif
+  |  jmp >1
+  |
+  |->vm_call:				// Setup C frame and enter VM.
+  |  // (lua_State *L, TValue *base, int nres1)
+  |  saveregs
+  |  mov PC, FRAME_C
+  |
+  |1:  // Entry point for vm_pcall above (PC = ftype).
+  |.if X64
+  |  mov SAVE_NRES, CARG3d
+  |  mov L:RB, CARG1d			// Caveat: CARG1d may be RA.
+  |  mov SAVE_L, CARG1d
+  |  mov RA, CARG2d
+  |.else
+  |  mov L:RB, SAVE_L
+  |  mov RA, INARG_BASE			// Caveat: overlaps SAVE_CFRAME!
+  |.endif
+  |
+  |  mov KBASEa, L:RB->cframe		// Add our C frame to cframe chain.
+  |  mov SAVE_CFRAME, KBASEa
+  |  mov SAVE_PC, L:RB			// Any value outside of bytecode is ok.
+  |.if X64
+  |  mov L:RB->cframe, rsp
+  |.else
+  |  mov L:RB->cframe, esp
+  |.endif
+  |
+  |2:  // Entry point for vm_cpcall below (RA = base, RB = L, PC = ftype).
+  |  mov DISPATCH, L:RB->glref		// Setup pointer to dispatch table.
+  |  add DISPATCH, GG_G2DISP
+  |
+  |3:  // Entry point for vm_resume above (RA = base, RB = L, PC = ftype).
+  |  set_vmstate INTERP
+  |  mov BASE, L:RB->base		// BASE = old base (used in vmeta_call).
+  |  add PC, RA
+  |  sub PC, BASE			// PC = frame delta + frame type
+  |
+  |  mov RD, L:RB->top
+  |  sub RD, RA
+  |  shr NARGS:RD, 3
+  |  add NARGS:RD, 1			// RD = nargs+1
+  |
+  |->vm_call_dispatch:
+  |  mov LFUNC:RB, [RA-8]
+  |  cmp dword [RA-4], LJ_TFUNC
+  |  jne ->vmeta_call			// Ensure KBASE defined and != BASE.
+  |
+  |->vm_call_dispatch_f:
+  |  mov BASE, RA
+  |  ins_call
+  |  // BASE = new base, RB = func, RD = nargs+1, PC = caller PC
+  |
+  |->vm_cpcall:				// Setup protected C frame, call C.
+  |  // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+  |  saveregs
+  |.if X64
+  |  mov L:RB, CARG1d			// Caveat: CARG1d may be RA.
+  |  mov SAVE_L, CARG1d
+  |.else
+  |  mov L:RB, SAVE_L
+  |  // Caveat: INARG_CP_* and SAVE_CFRAME/SAVE_NRES/SAVE_ERRF overlap!
+  |  mov RC, INARG_CP_UD		// Get args before they are overwritten.
+  |  mov RA, INARG_CP_FUNC
+  |  mov BASE, INARG_CP_CALL
+  |.endif
+  |  mov SAVE_PC, L:RB			// Any value outside of bytecode is ok.
+  |
+  |  mov KBASE, L:RB->stack		// Compute -savestack(L, L->top).
+  |  sub KBASE, L:RB->top
+  |  mov SAVE_ERRF, 0			// No error function.
+  |  mov SAVE_NRES, KBASE		// Neg. delta means cframe w/o frame.
+  |  // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe).
+  |
+  |.if X64
+  |  mov KBASEa, L:RB->cframe		// Add our C frame to cframe chain.
+  |  mov SAVE_CFRAME, KBASEa
+  |  mov L:RB->cframe, rsp
+  |
+  |  call CARG4			// (lua_State *L, lua_CFunction func, void *ud)
+  |.else
+  |  mov ARG3, RC			// Have to copy args downwards.
+  |  mov ARG2, RA
+  |  mov ARG1, L:RB
+  |
+  |  mov KBASE, L:RB->cframe		// Add our C frame to cframe chain.
+  |  mov SAVE_CFRAME, KBASE
+  |  mov L:RB->cframe, esp
+  |
+  |  call BASE			// (lua_State *L, lua_CFunction func, void *ud)
+  |.endif
+  |  // TValue * (new base) or NULL returned in eax (RC).
+  |  test RC, RC
+  |  jz ->vm_leave_cp			// No base? Just remove C frame.
+  |  mov RA, RC
+  |  mov PC, FRAME_CP
+  |  jmp <2				// Else continue with the call.
+  |
+  |//-----------------------------------------------------------------------
+  |//-- Metamethod handling ------------------------------------------------
+  |//-----------------------------------------------------------------------
+  |
+  |//-- Continuation dispatch ----------------------------------------------
+  |
+  |->cont_dispatch:
+  |  // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES)
+  |  add RA, BASE
+  |  and PC, -8
+  |  mov RB, BASE
+  |  sub BASE, PC			// Restore caller BASE.
+  |  mov dword [RA+RD*8-4], LJ_TNIL	// Ensure one valid arg.
+  |  mov RC, RA				// ... in [RC]
+  |  mov PC, [RB-12]			// Restore PC from [cont|PC].
+  |.if X64
+  |  movsxd RAa, dword [RB-16]		// May be negative on WIN64 with debug.
+  |.if FFI
+  |  cmp RA, 1
+  |  jbe >1
+  |.endif
+  |  lea KBASEa, qword [=>0]
+  |  add RAa, KBASEa
+  |.else
+  |  mov RA, dword [RB-16]
+  |.if FFI
+  |  cmp RA, 1
+  |  jbe >1
+  |.endif
+  |.endif
+  |  mov LFUNC:KBASE, [BASE-8]
+  |  mov KBASE, LFUNC:KBASE->pc
+  |  mov KBASE, [KBASE+PC2PROTO(k)]
+  |  // BASE = base, RC = result, RB = meta base
+  |  jmp RAa				// Jump to continuation.
+  |
+  |.if FFI
+  |1:
+  |  je ->cont_ffi_callback		// cont = 1: return from FFI callback.
+  |  // cont = 0: Tail call from C function.
+  |  sub RB, BASE
+  |  shr RB, 3
+  |  lea RD, [RB-1]
+  |  jmp ->vm_call_tail
+  |.endif
+  |
+  |->cont_cat:				// BASE = base, RC = result, RB = mbase
+  |  movzx RA, PC_RB
+  |  sub RB, 16
+  |  lea RA, [BASE+RA*8]
+  |  sub RA, RB
+  |  je ->cont_ra
+  |  neg RA
+  |  shr RA, 3
+  |.if X64WIN
+  |  mov CARG3d, RA
+  |  mov L:CARG1d, SAVE_L
+  |  mov L:CARG1d->base, BASE
+  |  mov RCa, [RC]
+  |  mov [RB], RCa
+  |  mov CARG2d, RB
+  |.elif X64
+  |  mov L:CARG1d, SAVE_L
+  |  mov L:CARG1d->base, BASE
+  |  mov CARG3d, RA
+  |  mov RAa, [RC]
+  |  mov [RB], RAa
+  |  mov CARG2d, RB
+  |.else
+  |  mov ARG3, RA
+  |  mov RA, [RC+4]
+  |  mov RC, [RC]
+  |  mov [RB+4], RA
+  |  mov [RB], RC
+  |  mov ARG2, RB
+  |.endif
+  |  jmp ->BC_CAT_Z
+  |
+  |//-- Table indexing metamethods -----------------------------------------
+  |
+  |->vmeta_tgets:
+  |  mov TMP1, RC			// RC = GCstr *
+  |  mov TMP2, LJ_TSTR
+  |  lea RCa, TMP1			// Store temp. TValue in TMP1/TMP2.
+  |  cmp PC_OP, BC_GGET
+  |  jne >1
+  |  lea RA, [DISPATCH+DISPATCH_GL(tmptv)]  // Store fn->l.env in g->tmptv.
+  |  mov [RA], TAB:RB			// RB = GCtab *
+  |  mov dword [RA+4], LJ_TTAB
+  |  mov RB, RA
+  |  jmp >2
+  |
+  |->vmeta_tgetb:
+  |  movzx RC, PC_RC
+  |.if DUALNUM
+  |  mov TMP2, LJ_TISNUM
+  |  mov TMP1, RC
+  |.elif SSE
+  |  cvtsi2sd xmm0, RC
+  |  movsd TMPQ, xmm0
+  |.else
+  |  mov ARG4, RC
+  |  fild ARG4
+  |  fstp TMPQ
+  |.endif
+  |  lea RCa, TMPQ			// Store temp. TValue in TMPQ.
+  |  jmp >1
+  |
+  |->vmeta_tgetv:
+  |  movzx RC, PC_RC			// Reload TValue *k from RC.
+  |  lea RC, [BASE+RC*8]
+  |1:
+  |  movzx RB, PC_RB			// Reload TValue *t from RB.
+  |  lea RB, [BASE+RB*8]
+  |2:
+  |.if X64
+  |  mov L:CARG1d, SAVE_L
+  |  mov L:CARG1d->base, BASE		// Caveat: CARG2d/CARG3d may be BASE.
+  |  mov CARG2d, RB
+  |  mov CARG3, RCa			// May be 64 bit ptr to stack.
+  |  mov L:RB, L:CARG1d
+  |.else
+  |  mov ARG2, RB
+  |  mov L:RB, SAVE_L
+  |  mov ARG3, RC
+  |  mov ARG1, L:RB
+  |  mov L:RB->base, BASE
+  |.endif
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_tget		// (lua_State *L, TValue *o, TValue *k)
+  |  // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+  |  mov BASE, L:RB->base
+  |  test RC, RC
+  |  jz >3
+  |->cont_ra:				// BASE = base, RC = result
+  |  movzx RA, PC_RA
+  |.if X64
+  |  mov RBa, [RC]
+  |  mov [BASE+RA*8], RBa
+  |.else
+  |  mov RB, [RC+4]
+  |  mov RC, [RC]
+  |  mov [BASE+RA*8+4], RB
+  |  mov [BASE+RA*8], RC
+  |.endif
+  |  ins_next
+  |
+  |3:  // Call __index metamethod.
+  |  // BASE = base, L->top = new base, stack = cont/func/t/k
+  |  mov RA, L:RB->top
+  |  mov [RA-12], PC			// [cont|PC]
+  |  lea PC, [RA+FRAME_CONT]
+  |  sub PC, BASE
+  |  mov LFUNC:RB, [RA-8]		// Guaranteed to be a function here.
+  |  mov NARGS:RD, 2+1			// 2 args for func(t, k).
+  |  jmp ->vm_call_dispatch_f
+  |
+  |//-----------------------------------------------------------------------
+  |
+  |->vmeta_tsets:
+  |  mov TMP1, RC			// RC = GCstr *
+  |  mov TMP2, LJ_TSTR
+  |  lea RCa, TMP1			// Store temp. TValue in TMP1/TMP2.
+  |  cmp PC_OP, BC_GSET
+  |  jne >1
+  |  lea RA, [DISPATCH+DISPATCH_GL(tmptv)]  // Store fn->l.env in g->tmptv.
+  |  mov [RA], TAB:RB			// RB = GCtab *
+  |  mov dword [RA+4], LJ_TTAB
+  |  mov RB, RA
+  |  jmp >2
+  |
+  |->vmeta_tsetb:
+  |  movzx RC, PC_RC
+  |.if DUALNUM
+  |  mov TMP2, LJ_TISNUM
+  |  mov TMP1, RC
+  |.elif SSE
+  |  cvtsi2sd xmm0, RC
+  |  movsd TMPQ, xmm0
+  |.else
+  |  mov ARG4, RC
+  |  fild ARG4
+  |  fstp TMPQ
+  |.endif
+  |  lea RCa, TMPQ			// Store temp. TValue in TMPQ.
+  |  jmp >1
+  |
+  |->vmeta_tsetv:
+  |  movzx RC, PC_RC			// Reload TValue *k from RC.
+  |  lea RC, [BASE+RC*8]
+  |1:
+  |  movzx RB, PC_RB			// Reload TValue *t from RB.
+  |  lea RB, [BASE+RB*8]
+  |2:
+  |.if X64
+  |  mov L:CARG1d, SAVE_L
+  |  mov L:CARG1d->base, BASE		// Caveat: CARG2d/CARG3d may be BASE.
+  |  mov CARG2d, RB
+  |  mov CARG3, RCa			// May be 64 bit ptr to stack.
+  |  mov L:RB, L:CARG1d
+  |.else
+  |  mov ARG2, RB
+  |  mov L:RB, SAVE_L
+  |  mov ARG3, RC
+  |  mov ARG1, L:RB
+  |  mov L:RB->base, BASE
+  |.endif
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_tset		// (lua_State *L, TValue *o, TValue *k)
+  |  // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+  |  mov BASE, L:RB->base
+  |  test RC, RC
+  |  jz >3
+  |  // NOBARRIER: lj_meta_tset ensures the table is not black.
+  |  movzx RA, PC_RA
+  |.if X64
+  |  mov RBa, [BASE+RA*8]
+  |  mov [RC], RBa
+  |.else
+  |  mov RB, [BASE+RA*8+4]
+  |  mov RA, [BASE+RA*8]
+  |  mov [RC+4], RB
+  |  mov [RC], RA
+  |.endif
+  |->cont_nop:				// BASE = base, (RC = result)
+  |  ins_next
+  |
+  |3:  // Call __newindex metamethod.
+  |  // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+  |  mov RA, L:RB->top
+  |  mov [RA-12], PC			// [cont|PC]
+  |  movzx RC, PC_RA
+  |  // Copy value to third argument.
+  |.if X64
+  |  mov RBa, [BASE+RC*8]
+  |  mov [RA+16], RBa
+  |.else
+  |  mov RB, [BASE+RC*8+4]
+  |  mov RC, [BASE+RC*8]
+  |  mov [RA+20], RB
+  |  mov [RA+16], RC
+  |.endif
+  |  lea PC, [RA+FRAME_CONT]
+  |  sub PC, BASE
+  |  mov LFUNC:RB, [RA-8]		// Guaranteed to be a function here.
+  |  mov NARGS:RD, 3+1			// 3 args for func(t, k, v).
+  |  jmp ->vm_call_dispatch_f
+  |
+  |//-- Comparison metamethods ---------------------------------------------
+  |
+  |->vmeta_comp:
+  |.if X64
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE		// Caveat: CARG2d/CARG3d == BASE.
+  |.if X64WIN
+  |  lea CARG3d, [BASE+RD*8]
+  |  lea CARG2d, [BASE+RA*8]
+  |.else
+  |  lea CARG2d, [BASE+RA*8]
+  |  lea CARG3d, [BASE+RD*8]
+  |.endif
+  |  mov CARG1d, L:RB			// Caveat: CARG1d/CARG4d == RA.
+  |  movzx CARG4d, PC_OP
+  |.else
+  |  movzx RB, PC_OP
+  |  lea RD, [BASE+RD*8]
+  |  lea RA, [BASE+RA*8]
+  |  mov ARG4, RB
+  |  mov L:RB, SAVE_L
+  |  mov ARG3, RD
+  |  mov ARG2, RA
+  |  mov ARG1, L:RB
+  |  mov L:RB->base, BASE
+  |.endif
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_comp	// (lua_State *L, TValue *o1, *o2, int op)
+  |  // 0/1 or TValue * (metamethod) returned in eax (RC).
+  |3:
+  |  mov BASE, L:RB->base
+  |  cmp RC, 1
+  |  ja ->vmeta_binop
+  |4:
+  |  lea PC, [PC+4]
+  |  jb >6
+  |5:
+  |  movzx RD, PC_RD
+  |  branchPC RD
+  |6:
+  |  ins_next
+  |
+  |->cont_condt:			// BASE = base, RC = result
+  |  add PC, 4
+  |  cmp dword [RC+4], LJ_TISTRUECOND	// Branch if result is true.
+  |  jb <5
+  |  jmp <6
+  |
+  |->cont_condf:			// BASE = base, RC = result
+  |  cmp dword [RC+4], LJ_TISTRUECOND	// Branch if result is false.
+  |  jmp <4
+  |
+  |->vmeta_equal:
+  |  sub PC, 4
+  |.if X64WIN
+  |  mov CARG3d, RD
+  |  mov CARG4d, RB
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE		// Caveat: CARG2d == BASE.
+  |  mov CARG2d, RA
+  |  mov CARG1d, L:RB			// Caveat: CARG1d == RA.
+  |.elif X64
+  |  mov CARG2d, RA
+  |  mov CARG4d, RB			// Caveat: CARG4d == RA.
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE		// Caveat: CARG3d == BASE.
+  |  mov CARG3d, RD
+  |  mov CARG1d, L:RB
+  |.else
+  |  mov ARG4, RB
+  |  mov L:RB, SAVE_L
+  |  mov ARG3, RD
+  |  mov ARG2, RA
+  |  mov ARG1, L:RB
+  |  mov L:RB->base, BASE
+  |.endif
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_equal	// (lua_State *L, GCobj *o1, *o2, int ne)
+  |  // 0/1 or TValue * (metamethod) returned in eax (RC).
+  |  jmp <3
+  |
+  |->vmeta_equal_cd:
+  |.if FFI
+  |  sub PC, 4
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE
+  |  mov FCARG1, L:RB
+  |  mov FCARG2, dword [PC-4]
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_equal_cd@8	// (lua_State *L, BCIns ins)
+  |  // 0/1 or TValue * (metamethod) returned in eax (RC).
+  |  jmp <3
+  |.endif
+  |
+  |//-- Arithmetic metamethods ---------------------------------------------
+  |
+  |->vmeta_arith_vno:
+  |.if DUALNUM
+  |  movzx RB, PC_RB
+  |.endif
+  |->vmeta_arith_vn:
+  |  lea RC, [KBASE+RC*8]
+  |  jmp >1
+  |
+  |->vmeta_arith_nvo:
+  |.if DUALNUM
+  |  movzx RC, PC_RC
+  |.endif
+  |->vmeta_arith_nv:
+  |  lea RC, [KBASE+RC*8]
+  |  lea RB, [BASE+RB*8]
+  |  xchg RB, RC
+  |  jmp >2
+  |
+  |->vmeta_unm:
+  |  lea RC, [BASE+RD*8]
+  |  mov RB, RC
+  |  jmp >2
+  |
+  |->vmeta_arith_vvo:
+  |.if DUALNUM
+  |  movzx RB, PC_RB
+  |.endif
+  |->vmeta_arith_vv:
+  |  lea RC, [BASE+RC*8]
+  |1:
+  |  lea RB, [BASE+RB*8]
+  |2:
+  |  lea RA, [BASE+RA*8]
+  |.if X64WIN
+  |  mov CARG3d, RB
+  |  mov CARG4d, RC
+  |  movzx RC, PC_OP
+  |  mov ARG5d, RC
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE		// Caveat: CARG2d == BASE.
+  |  mov CARG2d, RA
+  |  mov CARG1d, L:RB			// Caveat: CARG1d == RA.
+  |.elif X64
+  |  movzx CARG5d, PC_OP
+  |  mov CARG2d, RA
+  |  mov CARG4d, RC			// Caveat: CARG4d == RA.
+  |  mov L:CARG1d, SAVE_L
+  |  mov L:CARG1d->base, BASE		// Caveat: CARG3d == BASE.
+  |  mov CARG3d, RB
+  |  mov L:RB, L:CARG1d
+  |.else
+  |  mov ARG3, RB
+  |  mov L:RB, SAVE_L
+  |  mov ARG4, RC
+  |  movzx RC, PC_OP
+  |  mov ARG2, RA
+  |  mov ARG5, RC
+  |  mov ARG1, L:RB
+  |  mov L:RB->base, BASE
+  |.endif
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_arith	// (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+  |  // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+  |  mov BASE, L:RB->base
+  |  test RC, RC
+  |  jz ->cont_nop
+  |
+  |  // Call metamethod for binary op.
+  |->vmeta_binop:
+  |  // BASE = base, RC = new base, stack = cont/func/o1/o2
+  |  mov RA, RC
+  |  sub RC, BASE
+  |  mov [RA-12], PC			// [cont|PC]
+  |  lea PC, [RC+FRAME_CONT]
+  |  mov NARGS:RD, 2+1			// 2 args for func(o1, o2).
+  |  jmp ->vm_call_dispatch
+  |
+  |->vmeta_len:
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE
+  |  lea FCARG2, [BASE+RD*8]		// Caveat: FCARG2 == BASE
+  |  mov L:FCARG1, L:RB
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_len@8		// (lua_State *L, TValue *o)
+  |  // NULL (retry) or TValue * (metamethod) returned in eax (RC).
+  |  mov BASE, L:RB->base
+#if LJ_52
+  |  test RC, RC
+  |  jne ->vmeta_binop			// Binop call for compatibility.
+  |  movzx RD, PC_RD
+  |  mov TAB:FCARG1, [BASE+RD*8]
+  |  jmp ->BC_LEN_Z
+#else
+  |  jmp ->vmeta_binop			// Binop call for compatibility.
+#endif
+  |
+  |//-- Call metamethod ----------------------------------------------------
+  |
+  |->vmeta_call_ra:
+  |  lea RA, [BASE+RA*8+8]
+  |->vmeta_call:			// Resolve and call __call metamethod.
+  |  // BASE = old base, RA = new base, RC = nargs+1, PC = return
+  |  mov TMP2, RA			// Save RA, RC for us.
+  |  mov TMP1, NARGS:RD
+  |  sub RA, 8
+  |.if X64
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE		// Caveat: CARG2d/CARG3d may be BASE.
+  |  mov CARG2d, RA
+  |  lea CARG3d, [RA+NARGS:RD*8]
+  |  mov CARG1d, L:RB			// Caveat: CARG1d may be RA.
+  |.else
+  |  lea RC, [RA+NARGS:RD*8]
+  |  mov L:RB, SAVE_L
+  |  mov ARG2, RA
+  |  mov ARG3, RC
+  |  mov ARG1, L:RB
+  |  mov L:RB->base, BASE		// This is the callers base!
+  |.endif
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_call	// (lua_State *L, TValue *func, TValue *top)
+  |  mov BASE, L:RB->base
+  |  mov RA, TMP2
+  |  mov NARGS:RD, TMP1
+  |  mov LFUNC:RB, [RA-8]
+  |  add NARGS:RD, 1
+  |  // This is fragile. L->base must not move, KBASE must always be defined.
+  |  cmp KBASE, BASE			// Continue with CALLT if flag set.
+  |  je ->BC_CALLT_Z
+  |  mov BASE, RA
+  |  ins_call				// Otherwise call resolved metamethod.
+  |
+  |//-- Argument coercion for 'for' statement ------------------------------
+  |
+  |->vmeta_for:
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE
+  |  mov FCARG2, RA			// Caveat: FCARG2 == BASE
+  |  mov L:FCARG1, L:RB			// Caveat: FCARG1 == RA
+  |  mov SAVE_PC, PC
+  |  call extern lj_meta_for@8	// (lua_State *L, TValue *base)
+  |  mov BASE, L:RB->base
+  |  mov RC, [PC-4]
+  |  movzx RA, RCH
+  |  movzx OP, RCL
+  |  shr RC, 16
+  |.if X64
+  |  jmp aword [DISPATCH+OP*8+GG_DISP2STATIC]	// Retry FORI or JFORI.
+  |.else
+  |  jmp aword [DISPATCH+OP*4+GG_DISP2STATIC]	// Retry FORI or JFORI.
+  |.endif
+  |
+  |//-----------------------------------------------------------------------
+  |//-- Fast functions -----------------------------------------------------
+  |//-----------------------------------------------------------------------
+  |
+  |.macro .ffunc, name
+  |->ff_ .. name:
+  |.endmacro
+  |
+  |.macro .ffunc_1, name
+  |->ff_ .. name:
+  |  cmp NARGS:RD, 1+1;  jb ->fff_fallback
+  |.endmacro
+  |
+  |.macro .ffunc_2, name
+  |->ff_ .. name:
+  |  cmp NARGS:RD, 2+1;  jb ->fff_fallback
+  |.endmacro
+  |
+  |.macro .ffunc_n, name
+  |  .ffunc_1 name
+  |  cmp dword [BASE+4], LJ_TISNUM;  jae ->fff_fallback
+  |  fld qword [BASE]
+  |.endmacro
+  |
+  |.macro .ffunc_n, name, op
+  |  .ffunc_1 name
+  |  cmp dword [BASE+4], LJ_TISNUM;  jae ->fff_fallback
+  |  op
+  |  fld qword [BASE]
+  |.endmacro
+  |
+  |.macro .ffunc_nsse, name, op
+  |  .ffunc_1 name
+  |  cmp dword [BASE+4], LJ_TISNUM;  jae ->fff_fallback
+  |  op xmm0, qword [BASE]
+  |.endmacro
+  |
+  |.macro .ffunc_nsse, name
+  |  .ffunc_nsse name, movsd
+  |.endmacro
+  |
+  |.macro .ffunc_nn, name
+  |  .ffunc_2 name
+  |  cmp dword [BASE+4], LJ_TISNUM;  jae ->fff_fallback
+  |  cmp dword [BASE+12], LJ_TISNUM;  jae ->fff_fallback
+  |  fld qword [BASE]
+  |  fld qword [BASE+8]
+  |.endmacro
+  |
+  |.macro .ffunc_nnsse, name
+  |  .ffunc_2 name
+  |  cmp dword [BASE+4], LJ_TISNUM;  jae ->fff_fallback
+  |  cmp dword [BASE+12], LJ_TISNUM;  jae ->fff_fallback
+  |  movsd xmm0, qword [BASE]
+  |  movsd xmm1, qword [BASE+8]
+  |.endmacro
+  |
+  |.macro .ffunc_nnr, name
+  |  .ffunc_2 name
+  |  cmp dword [BASE+4], LJ_TISNUM;  jae ->fff_fallback
+  |  cmp dword [BASE+12], LJ_TISNUM;  jae ->fff_fallback
+  |  fld qword [BASE+8]
+  |  fld qword [BASE]
+  |.endmacro
+  |
+  |// Inlined GC threshold check. Caveat: uses label 1.
+  |.macro ffgccheck
+  |  mov RB, [DISPATCH+DISPATCH_GL(gc.total)]
+  |  cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)]
+  |  jb >1
+  |  call ->fff_gcstep
+  |1:
+  |.endmacro
+  |
+  |//-- Base library: checks -----------------------------------------------
+  |
+  |.ffunc_1 assert
+  |  mov RB, [BASE+4]
+  |  cmp RB, LJ_TISTRUECOND;  jae ->fff_fallback
+  |  mov PC, [BASE-4]
+  |  mov MULTRES, RD
+  |  mov [BASE-4], RB
+  |  mov RB, [BASE]
+  |  mov [BASE-8], RB
+  |  sub RD, 2
+  |  jz >2
+  |  mov RA, BASE
+  |1:
+  |  add RA, 8
+  |.if X64
+  |  mov RBa, [RA]
+  |  mov [RA-8], RBa
+  |.else
+  |  mov RB, [RA+4]
+  |  mov [RA-4], RB
+  |  mov RB, [RA]
+  |  mov [RA-8], RB
+  |.endif
+  |  sub RD, 1
+  |  jnz <1
+  |2:
+  |  mov RD, MULTRES
+  |  jmp ->fff_res_
+  |
+  |.ffunc_1 type
+  |  mov RB, [BASE+4]
+  |.if X64
+  |  mov RA, RB
+  |  sar RA, 15
+  |  cmp RA, -2
+  |  je >3
+  |.endif
+  |  mov RC, ~LJ_TNUMX
+  |  not RB
+  |  cmp RC, RB
+  |  cmova RC, RB
+  |2:
+  |  mov CFUNC:RB, [BASE-8]
+  |  mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))]
+  |  mov PC, [BASE-4]
+  |  mov dword [BASE-4], LJ_TSTR
+  |  mov [BASE-8], STR:RC
+  |  jmp ->fff_res1
+  |.if X64
+  |3:
+  |  mov RC, ~LJ_TLIGHTUD
+  |  jmp <2
+  |.endif
+  |
+  |//-- Base library: getters and setters ---------------------------------
+  |
+  |.ffunc_1 getmetatable
+  |  mov RB, [BASE+4]
+  |  mov PC, [BASE-4]
+  |  cmp RB, LJ_TTAB;  jne >6
+  |1:  // Field metatable must be at same offset for GCtab and GCudata!
+  |  mov TAB:RB, [BASE]
+  |  mov TAB:RB, TAB:RB->metatable
+  |2:
+  |  test TAB:RB, TAB:RB
+  |  mov dword [BASE-4], LJ_TNIL
+  |  jz ->fff_res1
+  |  mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable)]
+  |  mov dword [BASE-4], LJ_TTAB	// Store metatable as default result.
+  |  mov [BASE-8], TAB:RB
+  |  mov RA, TAB:RB->hmask
+  |  and RA, STR:RC->hash
+  |  imul RA, #NODE
+  |  add NODE:RA, TAB:RB->node
+  |3:  // Rearranged logic, because we expect _not_ to find the key.
+  |  cmp dword NODE:RA->key.it, LJ_TSTR
+  |  jne >4
+  |  cmp dword NODE:RA->key.gcr, STR:RC
+  |  je >5
+  |4:
+  |  mov NODE:RA, NODE:RA->next
+  |  test NODE:RA, NODE:RA
+  |  jnz <3
+  |  jmp ->fff_res1			// Not found, keep default result.
+  |5:
+  |  mov RB, [RA+4]
+  |  cmp RB, LJ_TNIL;  je ->fff_res1	// Ditto for nil value.
+  |  mov RC, [RA]
+  |  mov [BASE-4], RB			// Return value of mt.__metatable.
+  |  mov [BASE-8], RC
+  |  jmp ->fff_res1
+  |
+  |6:
+  |  cmp RB, LJ_TUDATA;  je <1
+  |.if X64
+  |  cmp RB, LJ_TNUMX;  ja >8
+  |  cmp RB, LJ_TISNUM;  jbe >7
+  |  mov RB, LJ_TLIGHTUD
+  |  jmp >8
+  |7:
+  |.else
+  |  cmp RB, LJ_TISNUM;  ja >8
+  |.endif
+  |  mov RB, LJ_TNUMX
+  |8:
+  |  not RB
+  |  mov TAB:RB, [DISPATCH+RB*4+DISPATCH_GL(gcroot[GCROOT_BASEMT])]
+  |  jmp <2
+  |
+  |.ffunc_2 setmetatable
+  |  cmp dword [BASE+4], LJ_TTAB;  jne ->fff_fallback
+  |  // Fast path: no mt for table yet and not clearing the mt.
+  |  mov TAB:RB, [BASE]
+  |  cmp dword TAB:RB->metatable, 0;  jne ->fff_fallback
+  |  cmp dword [BASE+12], LJ_TTAB;  jne ->fff_fallback
+  |  mov TAB:RC, [BASE+8]
+  |  mov TAB:RB->metatable, TAB:RC
+  |  mov PC, [BASE-4]
+  |  mov dword [BASE-4], LJ_TTAB		// Return original table.
+  |  mov [BASE-8], TAB:RB
+  |  test byte TAB:RB->marked, LJ_GC_BLACK	// isblack(table)
+  |  jz >1
+  |  // Possible write barrier. Table is black, but skip iswhite(mt) check.
+  |  barrierback TAB:RB, RC
+  |1:
+  |  jmp ->fff_res1
+  |
+  |.ffunc_2 rawget
+  |  cmp dword [BASE+4], LJ_TTAB;  jne ->fff_fallback
+  |.if X64WIN
+  |  mov RB, BASE			// Save BASE.
+  |  lea CARG3d, [BASE+8]
+  |  mov CARG2d, [BASE]			// Caveat: CARG2d == BASE.
+  |  mov CARG1d, SAVE_L
+  |.elif X64
+  |  mov RB, BASE			// Save BASE.
+  |  mov CARG2d, [BASE]
+  |  lea CARG3d, [BASE+8]		// Caveat: CARG3d == BASE.
+  |  mov CARG1d, SAVE_L
+  |.else
+  |  mov TAB:RD, [BASE]
+  |  mov L:RB, SAVE_L
+  |  mov ARG2, TAB:RD
+  |  mov ARG1, L:RB
+  |  mov RB, BASE			// Save BASE.
+  |  add BASE, 8
+  |  mov ARG3, BASE
+  |.endif
+  |  call extern lj_tab_get	// (lua_State *L, GCtab *t, cTValue *key)
+  |  // cTValue * returned in eax (RD).
+  |  mov BASE, RB			// Restore BASE.
+  |  // Copy table slot.
+  |.if X64
+  |  mov RBa, [RD]
+  |  mov PC, [BASE-4]
+  |  mov [BASE-8], RBa
+  |.else
+  |  mov RB, [RD]
+  |  mov RD, [RD+4]
+  |  mov PC, [BASE-4]
+  |  mov [BASE-8], RB
+  |  mov [BASE-4], RD
+  |.endif
+  |  jmp ->fff_res1
+  |
+  |//-- Base library: conversions ------------------------------------------
+  |
+  |.ffunc tonumber
+  |  // Only handles the number case inline (without a base argument).
+  |  cmp NARGS:RD, 1+1;  jne ->fff_fallback	// Exactly one argument.
+  |  cmp dword [BASE+4], LJ_TISNUM
+  |.if DUALNUM
+  |  jne >1
+  |  mov RB, dword [BASE]; jmp ->fff_resi
+  |1:
+  |  ja ->fff_fallback
+  |.else
+  |  jae ->fff_fallback
+  |.endif
+  |.if SSE
+  |  movsd xmm0, qword [BASE]; jmp ->fff_resxmm0
+  |.else
+  |  fld qword [BASE]; jmp ->fff_resn
+  |.endif
+  |
+  |.ffunc_1 tostring
+  |  // Only handles the string or number case inline.
+  |  mov PC, [BASE-4]
+  |  cmp dword [BASE+4], LJ_TSTR;  jne >3
+  |  // A __tostring method in the string base metatable is ignored.
+  |  mov STR:RD, [BASE]
+  |2:
+  |  mov dword [BASE-4], LJ_TSTR
+  |  mov [BASE-8], STR:RD
+  |  jmp ->fff_res1
+  |3:  // Handle numbers inline, unless a number base metatable is present.
+  |  cmp dword [BASE+4], LJ_TISNUM;  ja ->fff_fallback
+  |  cmp dword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0
+  |  jne ->fff_fallback
+  |  ffgccheck				// Caveat: uses label 1.
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE		// Add frame since C call can throw.
+  |  mov SAVE_PC, PC			// Redundant (but a defined value).
+  |.if X64 and not X64WIN
+  |  mov FCARG2, BASE			// Otherwise: FCARG2 == BASE
+  |.endif
+  |  mov L:FCARG1, L:RB
+  |.if DUALNUM
+  |  call extern lj_str_fromnumber@8	// (lua_State *L, cTValue *o)
+  |.else
+  |  call extern lj_str_fromnum@8	// (lua_State *L, lua_Number *np)
+  |.endif
+  |  // GCstr returned in eax (RD).
+  |  mov BASE, L:RB->base
+  |  jmp <2
+  |
+  |//-- Base library: iterators -------------------------------------------
+  |
+  |.ffunc_1 next
+  |  je >2				// Missing 2nd arg?
+  |1:
+  |  cmp dword [BASE+4], LJ_TTAB;  jne ->fff_fallback
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE		// Add frame since C call can throw.
+  |  mov L:RB->top, BASE		// Dummy frame length is ok.
+  |  mov PC, [BASE-4]
+  |.if X64WIN
+  |  lea CARG3d, [BASE+8]
+  |  mov CARG2d, [BASE]			// Caveat: CARG2d == BASE.
+  |  mov CARG1d, L:RB
+  |.elif X64
+  |  mov CARG2d, [BASE]
+  |  lea CARG3d, [BASE+8]		// Caveat: CARG3d == BASE.
+  |  mov CARG1d, L:RB
+  |.else
+  |  mov TAB:RD, [BASE]
+  |  mov ARG2, TAB:RD
+  |  mov ARG1, L:RB
+  |  add BASE, 8
+  |  mov ARG3, BASE
+  |.endif
+  |  mov SAVE_PC, PC			// Needed for ITERN fallback.
+  |  call extern lj_tab_next	// (lua_State *L, GCtab *t, TValue *key)
+  |  // Flag returned in eax (RD).
+  |  mov BASE, L:RB->base
+  |  test RD, RD;  jz >3		// End of traversal?
+  |  // Copy key and value to results.
+  |.if X64
+  |  mov RBa, [BASE+8]
+  |  mov RDa, [BASE+16]
+  |  mov [BASE-8], RBa
+  |  mov [BASE], RDa
+  |.else
+  |  mov RB, [BASE+8]
+  |  mov RD, [BASE+12]
+  |  mov [BASE-8], RB
+  |  mov [BASE-4], RD
+  |  mov RB, [BASE+16]
+  |  mov RD, [BASE+20]
+  |  mov [BASE], RB
+  |  mov [BASE+4], RD
+  |.endif
+  |->fff_res2:
+  |  mov RD, 1+2
+  |  jmp ->fff_res
+  |2:  // Set missing 2nd arg to nil.
+  |  mov dword [BASE+12], LJ_TNIL
+  |  jmp <1
+  |3:  // End of traversal: return nil.
+  |  mov dword [BASE-4], LJ_TNIL
+  |  jmp ->fff_res1
+  |
+  |.ffunc_1 pairs
+  |  mov TAB:RB, [BASE]
+  |  cmp dword [BASE+4], LJ_TTAB;  jne ->fff_fallback
+#if LJ_52
+  |  cmp dword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+  |  mov CFUNC:RB, [BASE-8]
+  |  mov CFUNC:RD, CFUNC:RB->upvalue[0]
+  |  mov PC, [BASE-4]
+  |  mov dword [BASE-4], LJ_TFUNC
+  |  mov [BASE-8], CFUNC:RD
+  |  mov dword [BASE+12], LJ_TNIL
+  |  mov RD, 1+3
+  |  jmp ->fff_res
+  |
+  |.ffunc_2 ipairs_aux
+  |  cmp dword [BASE+4], LJ_TTAB;  jne ->fff_fallback
+  |  cmp dword [BASE+12], LJ_TISNUM
+  |.if DUALNUM
+  |  jne ->fff_fallback
+  |.else
+  |  jae ->fff_fallback
+  |.endif
+  |  mov PC, [BASE-4]
+  |.if DUALNUM
+  |  mov RD, dword [BASE+8]
+  |  add RD, 1
+  |  mov dword [BASE-4], LJ_TISNUM
+  |  mov dword [BASE-8], RD
+  |.elif SSE
+  |  movsd xmm0, qword [BASE+8]
+  |  sseconst_1 xmm1, RBa
+  |  addsd xmm0, xmm1
+  |  cvtsd2si RD, xmm0
+  |  movsd qword [BASE-8], xmm0
+  |.else
+  |  fld qword [BASE+8]
+  |  fld1
+  |  faddp st1
+  |  fist ARG1
+  |  fstp qword [BASE-8]
+  |  mov RD, ARG1
+  |.endif
+  |  mov TAB:RB, [BASE]
+  |  cmp RD, TAB:RB->asize;  jae >2	// Not in array part?
+  |  shl RD, 3
+  |  add RD, TAB:RB->array
+  |1:
+  |  cmp dword [RD+4], LJ_TNIL;  je ->fff_res0
+  |  // Copy array slot.
+  |.if X64
+  |  mov RBa, [RD]
+  |  mov [BASE], RBa
+  |.else
+  |  mov RB, [RD]
+  |  mov RD, [RD+4]
+  |  mov [BASE], RB
+  |  mov [BASE+4], RD
+  |.endif
+  |  jmp ->fff_res2
+  |2:  // Check for empty hash part first. Otherwise call C function.
+  |  cmp dword TAB:RB->hmask, 0; je ->fff_res0
+  |  mov FCARG1, TAB:RB
+  |  mov RB, BASE			// Save BASE.
+  |  mov FCARG2, RD			// Caveat: FCARG2 == BASE
+  |  call extern lj_tab_getinth@8	// (GCtab *t, int32_t key)
+  |  // cTValue * or NULL returned in eax (RD).
+  |  mov BASE, RB
+  |  test RD, RD
+  |  jnz <1
+  |->fff_res0:
+  |  mov RD, 1+0
+  |  jmp ->fff_res
+  |
+  |.ffunc_1 ipairs
+  |  mov TAB:RB, [BASE]
+  |  cmp dword [BASE+4], LJ_TTAB;  jne ->fff_fallback
+#if LJ_52
+  |  cmp dword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+  |  mov CFUNC:RB, [BASE-8]
+  |  mov CFUNC:RD, CFUNC:RB->upvalue[0]
+  |  mov PC, [BASE-4]
+  |  mov dword [BASE-4], LJ_TFUNC
+  |  mov [BASE-8], CFUNC:RD
+  |.if DUALNUM
+  |  mov dword [BASE+12], LJ_TISNUM
+  |  mov dword [BASE+8], 0
+  |.elif SSE
+  |  xorps xmm0, xmm0
+  |  movsd qword [BASE+8], xmm0
+  |.else
+  |  fldz
+  |  fstp qword [BASE+8]
+  |.endif
+  |  mov RD, 1+3
+  |  jmp ->fff_res
+  |
+  |//-- Base library: catch errors ----------------------------------------
+  |
+  |.ffunc_1 pcall
+  |  lea RA, [BASE+8]
+  |  sub NARGS:RD, 1
+  |  mov PC, 8+FRAME_PCALL
+  |1:
+  |  movzx RB, byte [DISPATCH+DISPATCH_GL(hookmask)]
+  |  shr RB, HOOK_ACTIVE_SHIFT
+  |  and RB, 1
+  |  add PC, RB				// Remember active hook before pcall.
+  |  jmp ->vm_call_dispatch
+  |
+  |.ffunc_2 xpcall
+  |  cmp dword [BASE+12], LJ_TFUNC;  jne ->fff_fallback
+  |  mov RB, [BASE+4]			// Swap function and traceback.
+  |  mov [BASE+12], RB
+  |  mov dword [BASE+4], LJ_TFUNC
+  |  mov LFUNC:RB, [BASE]
+  |  mov PC, [BASE+8]
+  |  mov [BASE+8], LFUNC:RB
+  |  mov [BASE], PC
+  |  lea RA, [BASE+16]
+  |  sub NARGS:RD, 2
+  |  mov PC, 16+FRAME_PCALL
+  |  jmp <1
+  |
+  |//-- Coroutine library --------------------------------------------------
+  |
+  |.macro coroutine_resume_wrap, resume
+  |.if resume
+  |.ffunc_1 coroutine_resume
+  |  mov L:RB, [BASE]
+  |.else
+  |.ffunc coroutine_wrap_aux
+  |  mov CFUNC:RB, [BASE-8]
+  |  mov L:RB, CFUNC:RB->upvalue[0].gcr
+  |.endif
+  |  mov PC, [BASE-4]
+  |  mov SAVE_PC, PC
+  |.if X64
+  |  mov TMP1, L:RB
+  |.else
+  |  mov ARG1, L:RB
+  |.endif
+  |.if resume
+  |  cmp dword [BASE+4], LJ_TTHREAD;  jne ->fff_fallback
+  |.endif
+  |  cmp aword L:RB->cframe, 0; jne ->fff_fallback
+  |  cmp byte L:RB->status, LUA_YIELD;  ja ->fff_fallback
+  |  mov RA, L:RB->top
+  |  je >1				// Status != LUA_YIELD (i.e. 0)?
+  |  cmp RA, L:RB->base			// Check for presence of initial func.
+  |  je ->fff_fallback
+  |1:
+  |.if resume
+  |  lea PC, [RA+NARGS:RD*8-16]		// Check stack space (-1-thread).
+  |.else
+  |  lea PC, [RA+NARGS:RD*8-8]		// Check stack space (-1).
+  |.endif
+  |  cmp PC, L:RB->maxstack; ja ->fff_fallback
+  |  mov L:RB->top, PC
+  |
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE
+  |.if resume
+  |  add BASE, 8			// Keep resumed thread in stack for GC.
+  |.endif
+  |  mov L:RB->top, BASE
+  |.if resume
+  |  lea RB, [BASE+NARGS:RD*8-24]	// RB = end of source for stack move.
+  |.else
+  |  lea RB, [BASE+NARGS:RD*8-16]	// RB = end of source for stack move.
+  |.endif
+  |  sub RBa, PCa			// Relative to PC.
+  |
+  |  cmp PC, RA
+  |  je >3
+  |2:  // Move args to coroutine.
+  |.if X64
+  |  mov RCa, [PC+RB]
+  |  mov [PC-8], RCa
+  |.else
+  |  mov RC, [PC+RB+4]
+  |  mov [PC-4], RC
+  |  mov RC, [PC+RB]
+  |  mov [PC-8], RC
+  |.endif
+  |  sub PC, 8
+  |  cmp PC, RA
+  |  jne <2
+  |3:
+  |.if X64
+  |  mov CARG2d, RA
+  |  mov CARG1d, TMP1
+  |.else
+  |  mov ARG2, RA
+  |  xor RA, RA
+  |  mov ARG4, RA
+  |  mov ARG3, RA
+  |.endif
+  |  call ->vm_resume			// (lua_State *L, TValue *base, 0, 0)
+  |  set_vmstate INTERP
+  |
+  |  mov L:RB, SAVE_L
+  |.if X64
+  |  mov L:PC, TMP1
+  |.else
+  |  mov L:PC, ARG1			// The callee doesn't modify SAVE_L.
+  |.endif
+  |  mov BASE, L:RB->base
+  |  cmp eax, LUA_YIELD
+  |  ja >8
+  |4:
+  |  mov RA, L:PC->base
+  |  mov KBASE, L:PC->top
+  |  mov L:PC->top, RA			// Clear coroutine stack.
+  |  mov PC, KBASE
+  |  sub PC, RA
+  |  je >6				// No results?
+  |  lea RD, [BASE+PC]
+  |  shr PC, 3
+  |  cmp RD, L:RB->maxstack
+  |  ja >9				// Need to grow stack?
+  |
+  |  mov RB, BASE
+  |  sub RBa, RAa
+  |5:  // Move results from coroutine.
+  |.if X64
+  |  mov RDa, [RA]
+  |  mov [RA+RB], RDa
+  |.else
+  |  mov RD, [RA]
+  |  mov [RA+RB], RD
+  |  mov RD, [RA+4]
+  |  mov [RA+RB+4], RD
+  |.endif
+  |  add RA, 8
+  |  cmp RA, KBASE
+  |  jne <5
+  |6:
+  |.if resume
+  |  lea RD, [PC+2]			// nresults+1 = 1 + true + results.
+  |  mov dword [BASE-4], LJ_TTRUE	// Prepend true to results.
+  |.else
+  |  lea RD, [PC+1]			// nresults+1 = 1 + results.
+  |.endif
+  |7:
+  |  mov PC, SAVE_PC
+  |  mov MULTRES, RD
+  |.if resume
+  |  mov RAa, -8
+  |.else
+  |  xor RA, RA
+  |.endif
+  |  test PC, FRAME_TYPE
+  |  jz ->BC_RET_Z
+  |  jmp ->vm_return
+  |
+  |8:  // Coroutine returned with error (at co->top-1).
+  |.if resume
+  |  mov dword [BASE-4], LJ_TFALSE	// Prepend false to results.
+  |  mov RA, L:PC->top
+  |  sub RA, 8
+  |  mov L:PC->top, RA			// Clear error from coroutine stack.
+  |  // Copy error message.
+  |.if X64
+  |  mov RDa, [RA]
+  |  mov [BASE], RDa
+  |.else
+  |  mov RD, [RA]
+  |  mov [BASE], RD
+  |  mov RD, [RA+4]
+  |  mov [BASE+4], RD
+  |.endif
+  |  mov RD, 1+2			// nresults+1 = 1 + false + error.
+  |  jmp <7
+  |.else
+  |  mov FCARG2, L:PC
+  |  mov FCARG1, L:RB
+  |  call extern lj_ffh_coroutine_wrap_err@8  // (lua_State *L, lua_State *co)
+  |  // Error function does not return.
+  |.endif
+  |
+  |9:  // Handle stack expansion on return from yield.
+  |.if X64
+  |  mov L:RA, TMP1
+  |.else
+  |  mov L:RA, ARG1			// The callee doesn't modify SAVE_L.
+  |.endif
+  |  mov L:RA->top, KBASE		// Undo coroutine stack clearing.
+  |  mov FCARG2, PC
+  |  mov FCARG1, L:RB
+  |  call extern lj_state_growstack@8	// (lua_State *L, int n)
+  |.if X64
+  |  mov L:PC, TMP1
+  |.else
+  |  mov L:PC, ARG1
+  |.endif
+  |  mov BASE, L:RB->base
+  |  jmp <4				// Retry the stack move.
+  |.endmacro
+  |
+  |  coroutine_resume_wrap 1		// coroutine.resume
+  |  coroutine_resume_wrap 0		// coroutine.wrap
+  |
+  |.ffunc coroutine_yield
+  |  mov L:RB, SAVE_L
+  |  test aword L:RB->cframe, CFRAME_RESUME
+  |  jz ->fff_fallback
+  |  mov L:RB->base, BASE
+  |  lea RD, [BASE+NARGS:RD*8-8]
+  |  mov L:RB->top, RD
+  |  xor RD, RD
+  |  mov aword L:RB->cframe, RDa
+  |  mov al, LUA_YIELD
+  |  mov byte L:RB->status, al
+  |  jmp ->vm_leave_unw
+  |
+  |//-- Math library -------------------------------------------------------
+  |
+  |.if not DUALNUM
+  |->fff_resi:  // Dummy.
+  |.endif
+  |
+  |.if SSE
+  |->fff_resn:
+  |  mov PC, [BASE-4]
+  |  fstp qword [BASE-8]
+  |  jmp ->fff_res1
+  |.endif
+  |
+  |  .ffunc_1 math_abs
+  |.if DUALNUM
+  |  cmp dword [BASE+4], LJ_TISNUM; jne >2
+  |  mov RB, dword [BASE]
+  |  cmp RB, 0; jns ->fff_resi
+  |  neg RB; js >1
+  |->fff_resbit:
+  |->fff_resi:
+  |  mov PC, [BASE-4]
+  |  mov dword [BASE-4], LJ_TISNUM
+  |  mov dword [BASE-8], RB
+  |  jmp ->fff_res1
+  |1:
+  |  mov PC, [BASE-4]
+  |  mov dword [BASE-4], 0x41e00000  // 2^31.
+  |  mov dword [BASE-8], 0
+  |  jmp ->fff_res1
+  |2:
+  |  ja ->fff_fallback
+  |.else
+  |  cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+  |.endif
+  |
+  |.if SSE
+  |  movsd xmm0, qword [BASE]
+  |  sseconst_abs xmm1, RDa
+  |  andps xmm0, xmm1
+  |->fff_resxmm0:
+  |  mov PC, [BASE-4]
+  |  movsd qword [BASE-8], xmm0
+  |  // fallthrough
+  |.else
+  |  fld qword [BASE]
+  |  fabs
+  |  // fallthrough
+  |->fff_resxmm0:  // Dummy.
+  |->fff_resn:
+  |  mov PC, [BASE-4]
+  |  fstp qword [BASE-8]
+  |.endif
+  |
+  |->fff_res1:
+  |  mov RD, 1+1
+  |->fff_res:
+  |  mov MULTRES, RD
+  |->fff_res_:
+  |  test PC, FRAME_TYPE
+  |  jnz >7
+  |5:
+  |  cmp PC_RB, RDL			// More results expected?
+  |  ja >6
+  |  // Adjust BASE. KBASE is assumed to be set for the calling frame.
+  |  movzx RA, PC_RA
+  |  not RAa				// Note: ~RA = -(RA+1)
+  |  lea BASE, [BASE+RA*8]		// base = base - (RA+1)*8
+  |  ins_next
+  |
+  |6:  // Fill up results with nil.
+  |  mov dword [BASE+RD*8-12], LJ_TNIL
+  |  add RD, 1
+  |  jmp <5
+  |
+  |7:  // Non-standard return case.
+  |  mov RAa, -8			// Results start at BASE+RA = BASE-8.
+  |  jmp ->vm_return
+  |
+  |.macro math_round, func
+  |  .ffunc math_ .. func
+  |.if DUALNUM
+  |  cmp dword [BASE+4], LJ_TISNUM; jne >1
+  |  mov RB, dword [BASE]; jmp ->fff_resi
+  |1:
+  |  ja ->fff_fallback
+  |.else
+  |  cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+  |.endif
+  |.if SSE
+  |  movsd xmm0, qword [BASE]
+  |  call ->vm_ .. func
+  |  .if DUALNUM
+  |    cvtsd2si RB, xmm0
+  |    cmp RB, 0x80000000
+  |    jne ->fff_resi
+  |    cvtsi2sd xmm1, RB
+  |    ucomisd xmm0, xmm1
+  |    jp ->fff_resxmm0
+  |    je ->fff_resi
+  |  .endif
+  |  jmp ->fff_resxmm0
+  |.else
+  |  fld qword [BASE]
+  |  call ->vm_ .. func
+  |  .if DUALNUM
+  |    fist ARG1
+  |    mov RB, ARG1
+  |    cmp RB, 0x80000000; jne >2
+  |    fdup
+  |    fild ARG1
+  |    fcomparepp
+  |    jp ->fff_resn
+  |    jne ->fff_resn
+  |2:
+  |    fpop
+  |    jmp ->fff_resi
+  | .else
+  |    jmp ->fff_resn
+  | .endif
+  |.endif
+  |.endmacro
+  |
+  |  math_round floor
+  |  math_round ceil
+  |
+  |.if SSE
+  |.ffunc_nsse math_sqrt, sqrtsd; jmp ->fff_resxmm0
+  |.else
+  |.ffunc_n math_sqrt; fsqrt; jmp ->fff_resn
+  |.endif
+  |
+  |.ffunc math_log
+  |  cmp NARGS:RD, 1+1; jne ->fff_fallback	// Exactly one argument.
+  |  cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+  |  fldln2; fld qword [BASE]; fyl2x; jmp ->fff_resn
+  |
+  |.ffunc_n math_log10, fldlg2;	fyl2x;		jmp ->fff_resn
+  |.ffunc_n math_exp;	call ->vm_exp_x87;	jmp ->fff_resn
+  |
+  |.ffunc_n math_sin;	fsin;			jmp ->fff_resn
+  |.ffunc_n math_cos;	fcos;			jmp ->fff_resn
+  |.ffunc_n math_tan;	fptan; fpop;		jmp ->fff_resn
+  |
+  |.ffunc_n math_asin
+  |  fdup; fmul st0; fld1; fsubrp st1; fsqrt; fpatan
+  |  jmp ->fff_resn
+  |.ffunc_n math_acos
+  |  fdup; fmul st0; fld1; fsubrp st1; fsqrt; fxch; fpatan
+  |  jmp ->fff_resn
+  |.ffunc_n math_atan;	fld1; fpatan;		jmp ->fff_resn
+  |
+  |.macro math_extern, func
+  |.if SSE
+  |  .ffunc_nsse math_ .. func
+  |  .if not X64
+  |    movsd FPARG1, xmm0
+  |  .endif
+  |.else
+  |  .ffunc_n math_ .. func
+  |  fstp FPARG1
+  |.endif
+  |  mov RB, BASE
+  |  call extern lj_vm_ .. func
+  |  mov BASE, RB
+  |  .if X64
+  |    jmp ->fff_resxmm0
+  |  .else
+  |    jmp ->fff_resn
+  |  .endif
+  |.endmacro
+  |
+  |  math_extern sinh
+  |  math_extern cosh
+  |  math_extern tanh
+  |
+  |->ff_math_deg:
+  |.if SSE
+  |.ffunc_nsse math_rad
+  |  mov CFUNC:RB, [BASE-8]
+  |  mulsd xmm0, qword CFUNC:RB->upvalue[0]
+  |  jmp ->fff_resxmm0
+  |.else
+  |.ffunc_n math_rad
+  |  mov CFUNC:RB, [BASE-8]
+  |  fmul qword CFUNC:RB->upvalue[0]
+  |  jmp ->fff_resn
+  |.endif
+  |
+  |.ffunc_nn math_atan2;	fpatan;		jmp ->fff_resn
+  |.ffunc_nnr math_ldexp;	fscale; fpop1;	jmp ->fff_resn
+  |
+  |.ffunc_1 math_frexp
+  |  mov RB, [BASE+4]
+  |  cmp RB, LJ_TISNUM;  jae ->fff_fallback
+  |  mov PC, [BASE-4]
+  |  mov RC, [BASE]
+  |  mov [BASE-4], RB; mov [BASE-8], RC
+  |  shl RB, 1; cmp RB, 0xffe00000; jae >3
+  |  or RC, RB; jz >3
+  |  mov RC, 1022
+  |  cmp RB, 0x00200000; jb >4
+  |1:
+  |  shr RB, 21; sub RB, RC		// Extract and unbias exponent.
+  |.if SSE
+  |  cvtsi2sd xmm0, RB
+  |.else
+  |  mov TMP1, RB; fild TMP1
+  |.endif
+  |  mov RB, [BASE-4]
+  |  and RB, 0x800fffff			// Mask off exponent.
+  |  or RB, 0x3fe00000			// Put mantissa in range [0.5,1) or 0.
+  |  mov [BASE-4], RB
+  |2:
+  |.if SSE
+  |  movsd qword [BASE], xmm0
+  |.else
+  |  fstp qword [BASE]
+  |.endif
+  |  mov RD, 1+2
+  |  jmp ->fff_res
+  |3:  // Return +-0, +-Inf, NaN unmodified and an exponent of 0.
+  |.if SSE
+  |  xorps xmm0, xmm0; jmp <2
+  |.else
+  |  fldz; jmp <2
+  |.endif
+  |4:  // Handle denormals by multiplying with 2^54 and adjusting the bias.
+  |.if SSE
+  |  movsd xmm0, qword [BASE]
+  |  sseconst_hi xmm1, RBa, 43500000  // 2^54.
+  |  mulsd xmm0, xmm1
+  |  movsd qword [BASE-8], xmm0
+  |.else
+  |  fld qword [BASE]
+  |  mov TMP1, 0x5a800000; fmul TMP1	// x = x*2^54
+  |  fstp qword [BASE-8]
+  |.endif
+  |  mov RB, [BASE-4]; mov RC, 1076; shl RB, 1; jmp <1
+  |
+  |.if SSE
+  |.ffunc_nsse math_modf
+  |.else
+  |.ffunc_n math_modf
+  |.endif
+  |  mov RB, [BASE+4]
+  |  mov PC, [BASE-4]
+  |  shl RB, 1; cmp RB, 0xffe00000; je >4	// +-Inf?
+  |.if SSE
+  |  movaps xmm4, xmm0
+  |  call ->vm_trunc
+  |  subsd xmm4, xmm0
+  |1:
+  |  movsd qword [BASE-8], xmm0
+  |  movsd qword [BASE], xmm4
+  |.else
+  |  fdup
+  |  call ->vm_trunc
+  |  fsub st1, st0
+  |1:
+  |  fstp qword [BASE-8]
+  |  fstp qword [BASE]
+  |.endif
+  |  mov RC, [BASE-4]; mov RB, [BASE+4]
+  |  xor RC, RB; js >3				// Need to adjust sign?
+  |2:
+  |  mov RD, 1+2
+  |  jmp ->fff_res
+  |3:
+  |  xor RB, 0x80000000; mov [BASE+4], RB	// Flip sign of fraction.
+  |  jmp <2
+  |4:
+  |.if SSE
+  |  xorps xmm4, xmm4; jmp <1			// Return +-Inf and +-0.
+  |.else
+  |  fldz; fxch; jmp <1				// Return +-Inf and +-0.
+  |.endif
+  |
+  |.ffunc_nnr math_fmod
+  |1: ; fprem; fnstsw ax; and ax, 0x400; jnz <1
+  |  fpop1
+  |  jmp ->fff_resn
+  |
+  |.if SSE
+  |.ffunc_nnsse math_pow;	call ->vm_pow;	jmp ->fff_resxmm0
+  |.else
+  |.ffunc_nn math_pow;		call ->vm_pow;	jmp ->fff_resn
+  |.endif
+  |
+  |.macro math_minmax, name, cmovop, fcmovop, sseop
+  |  .ffunc name
+  |  mov RA, 2
+  |  cmp dword [BASE+4], LJ_TISNUM
+  |.if DUALNUM
+  |  jne >4
+  |  mov RB, dword [BASE]
+  |1:  // Handle integers.
+  |  cmp RA, RD; jae ->fff_resi
+  |  cmp dword [BASE+RA*8-4], LJ_TISNUM; jne >3
+  |  cmp RB, dword [BASE+RA*8-8]
+  |  cmovop RB, dword [BASE+RA*8-8]
+  |  add RA, 1
+  |  jmp <1
+  |3:
+  |  ja ->fff_fallback
+  |  // Convert intermediate result to number and continue below.
+  |.if SSE
+  |  cvtsi2sd xmm0, RB
+  |.else
+  |  mov TMP1, RB
+  |  fild TMP1
+  |.endif
+  |  jmp >6
+  |4:
+  |  ja ->fff_fallback
+  |.else
+  |  jae ->fff_fallback
+  |.endif
+  |
+  |.if SSE
+  |  movsd xmm0, qword [BASE]
+  |5:  // Handle numbers or integers.
+  |  cmp RA, RD; jae ->fff_resxmm0
+  |  cmp dword [BASE+RA*8-4], LJ_TISNUM
+  |.if DUALNUM
+  |  jb >6
+  |  ja ->fff_fallback
+  |  cvtsi2sd xmm1, dword [BASE+RA*8-8]
+  |  jmp >7
+  |.else
+  |  jae ->fff_fallback
+  |.endif
+  |6:
+  |  movsd xmm1, qword [BASE+RA*8-8]
+  |7:
+  |  sseop xmm0, xmm1
+  |  add RA, 1
+  |  jmp <5
+  |.else
+  |  fld qword [BASE]
+  |5:  // Handle numbers or integers.
+  |  cmp RA, RD; jae ->fff_resn
+  |  cmp dword [BASE+RA*8-4], LJ_TISNUM
+  |.if DUALNUM
+  |  jb >6
+  |  ja >9
+  |  fild dword [BASE+RA*8-8]
+  |  jmp >7
+  |.else
+  |  jae >9
+  |.endif
+  |6:
+  |  fld qword [BASE+RA*8-8]
+  |7:
+  |  fucomi st1; fcmovop st1; fpop1
+  |  add RA, 1
+  |  jmp <5
+  |.endif
+  |.endmacro
+  |
+  |  math_minmax math_min, cmovg, fcmovnbe, minsd
+  |  math_minmax math_max, cmovl, fcmovbe, maxsd
+  |.if not SSE
+  |9:
+  |  fpop; jmp ->fff_fallback
+  |.endif
+  |
+  |//-- String library -----------------------------------------------------
+  |
+  |.ffunc_1 string_len
+  |  cmp dword [BASE+4], LJ_TSTR;  jne ->fff_fallback
+  |  mov STR:RB, [BASE]
+  |.if DUALNUM
+  |  mov RB, dword STR:RB->len; jmp ->fff_resi
+  |.elif SSE
+  |  cvtsi2sd xmm0, dword STR:RB->len; jmp ->fff_resxmm0
+  |.else
+  |  fild dword STR:RB->len; jmp ->fff_resn
+  |.endif
+  |
+  |.ffunc string_byte			// Only handle the 1-arg case here.
+  |  cmp NARGS:RD, 1+1;  jne ->fff_fallback
+  |  cmp dword [BASE+4], LJ_TSTR;  jne ->fff_fallback
+  |  mov STR:RB, [BASE]
+  |  mov PC, [BASE-4]
+  |  cmp dword STR:RB->len, 1
+  |  jb ->fff_res0			// Return no results for empty string.
+  |  movzx RB, byte STR:RB[1]
+  |.if DUALNUM
+  |  jmp ->fff_resi
+  |.elif SSE
+  |  cvtsi2sd xmm0, RB; jmp ->fff_resxmm0
+  |.else
+  |  mov TMP1, RB; fild TMP1; jmp ->fff_resn
+  |.endif
+  |
+  |.ffunc string_char			// Only handle the 1-arg case here.
+  |  ffgccheck
+  |  cmp NARGS:RD, 1+1;  jne ->fff_fallback	// *Exactly* 1 arg.
+  |  cmp dword [BASE+4], LJ_TISNUM
+  |.if DUALNUM
+  |  jne ->fff_fallback
+  |  mov RB, dword [BASE]
+  |  cmp RB, 255;  ja ->fff_fallback
+  |  mov TMP2, RB
+  |.elif SSE
+  |  jae ->fff_fallback
+  |  cvttsd2si RB, qword [BASE]
+  |  cmp RB, 255;  ja ->fff_fallback
+  |  mov TMP2, RB
+  |.else
+  |  jae ->fff_fallback
+  |  fld qword [BASE]
+  |  fistp TMP2
+  |  cmp TMP2, 255;  ja ->fff_fallback
+  |.endif
+  |.if X64
+  |  mov TMP3, 1
+  |.else
+  |  mov ARG3, 1
+  |.endif
+  |  lea RDa, TMP2			// Points to stack. Little-endian.
+  |->fff_newstr:
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE
+  |.if X64
+  |  mov CARG3d, TMP3			// Zero-extended to size_t.
+  |  mov CARG2, RDa			// May be 64 bit ptr to stack.
+  |  mov CARG1d, L:RB
+  |.else
+  |  mov ARG2, RD
+  |  mov ARG1, L:RB
+  |.endif
+  |  mov SAVE_PC, PC
+  |  call extern lj_str_new		// (lua_State *L, char *str, size_t l)
+  |  // GCstr * returned in eax (RD).
+  |  mov BASE, L:RB->base
+  |  mov PC, [BASE-4]
+  |  mov dword [BASE-4], LJ_TSTR
+  |  mov [BASE-8], STR:RD
+  |  jmp ->fff_res1
+  |
+  |.ffunc string_sub
+  |  ffgccheck
+  |  mov TMP2, -1
+  |  cmp NARGS:RD, 1+2;  jb ->fff_fallback
+  |  jna >1
+  |  cmp dword [BASE+20], LJ_TISNUM
+  |.if DUALNUM
+  |  jne ->fff_fallback
+  |  mov RB, dword [BASE+16]
+  |  mov TMP2, RB
+  |.elif SSE
+  |  jae ->fff_fallback
+  |  cvttsd2si RB, qword [BASE+16]
+  |  mov TMP2, RB
+  |.else
+  |  jae ->fff_fallback
+  |  fld qword [BASE+16]
+  |  fistp TMP2
+  |.endif
+  |1:
+  |  cmp dword [BASE+4], LJ_TSTR;  jne ->fff_fallback
+  |  cmp dword [BASE+12], LJ_TISNUM
+  |.if DUALNUM
+  |  jne ->fff_fallback
+  |.else
+  |  jae ->fff_fallback
+  |.endif
+  |  mov STR:RB, [BASE]
+  |  mov TMP3, STR:RB
+  |  mov RB, STR:RB->len
+  |.if DUALNUM
+  |  mov RA, dword [BASE+8]
+  |.elif SSE
+  |  cvttsd2si RA, qword [BASE+8]
+  |.else
+  |  fld qword [BASE+8]
+  |  fistp ARG3
+  |  mov RA, ARG3
+  |.endif
+  |  mov RC, TMP2
+  |  cmp RB, RC				// len < end? (unsigned compare)
+  |  jb >5
+  |2:
+  |  test RA, RA			// start <= 0?
+  |  jle >7
+  |3:
+  |  mov STR:RB, TMP3
+  |  sub RC, RA				// start > end?
+  |  jl ->fff_emptystr
+  |  lea RB, [STR:RB+RA+#STR-1]
+  |  add RC, 1
+  |4:
+  |.if X64
+  |  mov TMP3, RC
+  |.else
+  |  mov ARG3, RC
+  |.endif
+  |  mov RD, RB
+  |  jmp ->fff_newstr
+  |
+  |5:  // Negative end or overflow.
+  |  jl >6
+  |  lea RC, [RC+RB+1]			// end = end+(len+1)
+  |  jmp <2
+  |6:  // Overflow.
+  |  mov RC, RB				// end = len
+  |  jmp <2
+  |
+  |7:  // Negative start or underflow.
+  |  je >8
+  |  add RA, RB				// start = start+(len+1)
+  |  add RA, 1
+  |  jg <3				// start > 0?
+  |8:  // Underflow.
+  |  mov RA, 1				// start = 1
+  |  jmp <3
+  |
+  |->fff_emptystr:  // Range underflow.
+  |  xor RC, RC				// Zero length. Any ptr in RB is ok.
+  |  jmp <4
+  |
+  |.ffunc string_rep			// Only handle the 1-char case inline.
+  |  ffgccheck
+  |  cmp NARGS:RD, 2+1; jne ->fff_fallback	// Exactly 2 arguments.
+  |  cmp dword [BASE+4], LJ_TSTR;  jne ->fff_fallback
+  |  cmp dword [BASE+12], LJ_TISNUM
+  |  mov STR:RB, [BASE]
+  |.if DUALNUM
+  |  jne ->fff_fallback
+  |  mov RC, dword [BASE+8]
+  |.elif SSE
+  |  jae ->fff_fallback
+  |  cvttsd2si RC, qword [BASE+8]
+  |.else
+  |  jae ->fff_fallback
+  |  fld qword [BASE+8]
+  |  fistp TMP2
+  |  mov RC, TMP2
+  |.endif
+  |  test RC, RC
+  |  jle ->fff_emptystr			// Count <= 0? (or non-int)
+  |  cmp dword STR:RB->len, 1
+  |  jb ->fff_emptystr			// Zero length string?
+  |  jne ->fff_fallback_2		// Fallback for > 1-char strings.
+  |  cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC;  jb ->fff_fallback_2
+  |  movzx RA, byte STR:RB[1]
+  |  mov RB, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
+  |.if X64
+  |  mov TMP3, RC
+  |.else
+  |  mov ARG3, RC
+  |.endif
+  |1:  // Fill buffer with char. Yes, this is suboptimal code (do you care?).
+  |  mov [RB], RAL
+  |  add RB, 1
+  |  sub RC, 1
+  |  jnz <1
+  |  mov RD, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
+  |  jmp ->fff_newstr
+  |
+  |.ffunc_1 string_reverse
+  |  ffgccheck
+  |  cmp dword [BASE+4], LJ_TSTR;  jne ->fff_fallback
+  |  mov STR:RB, [BASE]
+  |  mov RC, STR:RB->len
+  |  test RC, RC
+  |  jz ->fff_emptystr			// Zero length string?
+  |  cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC;  jb ->fff_fallback_1
+  |  add RB, #STR
+  |  mov TMP2, PC			// Need another temp register.
+  |.if X64
+  |  mov TMP3, RC
+  |.else
+  |  mov ARG3, RC
+  |.endif
+  |  mov PC, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
+  |1:
+  |  movzx RA, byte [RB]
+  |  add RB, 1
+  |  sub RC, 1
+  |  mov [PC+RC], RAL
+  |  jnz <1
+  |  mov RD, PC
+  |  mov PC, TMP2
+  |  jmp ->fff_newstr
+  |
+  |.macro ffstring_case, name, lo, hi
+  |  .ffunc_1 name
+  |  ffgccheck
+  |  cmp dword [BASE+4], LJ_TSTR;  jne ->fff_fallback
+  |  mov STR:RB, [BASE]
+  |  mov RC, STR:RB->len
+  |  cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC;  jb ->fff_fallback_1
+  |  add RB, #STR
+  |  mov TMP2, PC			// Need another temp register.
+  |.if X64
+  |  mov TMP3, RC
+  |.else
+  |  mov ARG3, RC
+  |.endif
+  |  mov PC, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
+  |  jmp >3
+  |1:  // ASCII case conversion. Yes, this is suboptimal code (do you care?).
+  |  movzx RA, byte [RB+RC]
+  |  cmp RA, lo
+  |  jb >2
+  |  cmp RA, hi
+  |  ja >2
+  |  xor RA, 0x20
+  |2:
+  |  mov [PC+RC], RAL
+  |3:
+  |  sub RC, 1
+  |  jns <1
+  |  mov RD, PC
+  |  mov PC, TMP2
+  |  jmp ->fff_newstr
+  |.endmacro
+  |
+  |ffstring_case string_lower, 0x41, 0x5a
+  |ffstring_case string_upper, 0x61, 0x7a
+  |
+  |//-- Table library ------------------------------------------------------
+  |
+  |.ffunc_1 table_getn
+  |  cmp dword [BASE+4], LJ_TTAB;  jne ->fff_fallback
+  |  mov RB, BASE			// Save BASE.
+  |  mov TAB:FCARG1, [BASE]
+  |  call extern lj_tab_len@4		// LJ_FASTCALL (GCtab *t)
+  |  // Length of table returned in eax (RD).
+  |  mov BASE, RB			// Restore BASE.
+  |.if DUALNUM
+  |  mov RB, RD; jmp ->fff_resi
+  |.elif SSE
+  |  cvtsi2sd xmm0, RD; jmp ->fff_resxmm0
+  |.else
+  |  mov ARG1, RD; fild ARG1; jmp ->fff_resn
+  |.endif
+  |
+  |//-- Bit library --------------------------------------------------------
+  |
+  |.define TOBIT_BIAS, 0x59c00000	// 2^52 + 2^51 (float, not double!).
+  |
+  |.macro .ffunc_bit, name, kind, fdef
+  |  fdef name
+  |.if kind == 2
+  |.if SSE
+  |  sseconst_tobit xmm1, RBa
+  |.else
+  |  mov TMP1, TOBIT_BIAS
+  |.endif
+  |.endif
+  |  cmp dword [BASE+4], LJ_TISNUM
+  |.if DUALNUM
+  |  jne >1
+  |  mov RB, dword [BASE]
+  |.if kind > 0
+  |  jmp >2
+  |.else
+  |  jmp ->fff_resbit
+  |.endif
+  |1:
+  |  ja ->fff_fallback
+  |.else
+  |  jae ->fff_fallback
+  |.endif
+  |.if SSE
+  |  movsd xmm0, qword [BASE]
+  |.if kind < 2
+  |  sseconst_tobit xmm1, RBa
+  |.endif
+  |  addsd xmm0, xmm1
+  |  movd RB, xmm0
+  |.else
+  |  fld qword [BASE]
+  |.if kind < 2
+  |  mov TMP1, TOBIT_BIAS
+  |.endif
+  |  fadd TMP1
+  |  fstp FPARG1
+  |.if kind > 0
+  |  mov RB, ARG1
+  |.endif
+  |.endif
+  |2:
+  |.endmacro
+  |
+  |.macro .ffunc_bit, name, kind
+  |  .ffunc_bit name, kind, .ffunc_1
+  |.endmacro
+  |
+  |.ffunc_bit bit_tobit, 0
+  |.if DUALNUM or SSE
+  |.if not SSE
+  |  mov RB, ARG1
+  |.endif
+  |  jmp ->fff_resbit
+  |.else
+  |  fild ARG1
+  |  jmp ->fff_resn
+  |.endif
+  |
+  |.macro .ffunc_bit_op, name, ins
+  |  .ffunc_bit name, 2
+  |  mov TMP2, NARGS:RD			// Save for fallback.
+  |  lea RD, [BASE+NARGS:RD*8-16]
+  |1:
+  |  cmp RD, BASE
+  |  jbe ->fff_resbit
+  |  cmp dword [RD+4], LJ_TISNUM
+  |.if DUALNUM
+  |  jne >2
+  |  ins RB, dword [RD]
+  |  sub RD, 8
+  |  jmp <1
+  |2:
+  |  ja ->fff_fallback_bit_op
+  |.else
+  |  jae ->fff_fallback_bit_op
+  |.endif
+  |.if SSE
+  |  movsd xmm0, qword [RD]
+  |  addsd xmm0, xmm1
+  |  movd RA, xmm0
+  |  ins RB, RA
+  |.else
+  |  fld qword [RD]
+  |  fadd TMP1
+  |  fstp FPARG1
+  |  ins RB, ARG1
+  |.endif
+  |  sub RD, 8
+  |  jmp <1
+  |.endmacro
+  |
+  |.ffunc_bit_op bit_band, and
+  |.ffunc_bit_op bit_bor, or
+  |.ffunc_bit_op bit_bxor, xor
+  |
+  |.ffunc_bit bit_bswap, 1
+  |  bswap RB
+  |  jmp ->fff_resbit
+  |
+  |.ffunc_bit bit_bnot, 1
+  |  not RB
+  |.if DUALNUM
+  |  jmp ->fff_resbit
+  |.elif SSE
+  |->fff_resbit:
+  |  cvtsi2sd xmm0, RB
+  |  jmp ->fff_resxmm0
+  |.else
+  |->fff_resbit:
+  |  mov ARG1, RB
+  |  fild ARG1
+  |  jmp ->fff_resn
+  |.endif
+  |
+  |->fff_fallback_bit_op:
+  |  mov NARGS:RD, TMP2			// Restore for fallback
+  |  jmp ->fff_fallback
+  |
+  |.macro .ffunc_bit_sh, name, ins
+  |.if DUALNUM
+  |  .ffunc_bit name, 1, .ffunc_2
+  |  // Note: no inline conversion from number for 2nd argument!
+  |  cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback
+  |  mov RA, dword [BASE+8]
+  |.elif SSE
+  |  .ffunc_nnsse name
+  |  sseconst_tobit xmm2, RBa
+  |  addsd xmm0, xmm2
+  |  addsd xmm1, xmm2
+  |  movd RB, xmm0
+  |  movd RA, xmm1
+  |.else
+  |  .ffunc_nn name
+  |  mov TMP1, TOBIT_BIAS
+  |  fadd TMP1
+  |  fstp FPARG3
+  |  fadd TMP1
+  |  fstp FPARG1
+  |  mov RA, ARG3
+  |  mov RB, ARG1
+  |.endif
+  |  ins RB, cl				// Assumes RA is ecx.
+  |  jmp ->fff_resbit
+  |.endmacro
+  |
+  |.ffunc_bit_sh bit_lshift, shl
+  |.ffunc_bit_sh bit_rshift, shr
+  |.ffunc_bit_sh bit_arshift, sar
+  |.ffunc_bit_sh bit_rol, rol
+  |.ffunc_bit_sh bit_ror, ror
+  |
+  |//-----------------------------------------------------------------------
+  |
+  |->fff_fallback_2:
+  |  mov NARGS:RD, 1+2			// Other args are ignored, anyway.
+  |  jmp ->fff_fallback
+  |->fff_fallback_1:
+  |  mov NARGS:RD, 1+1			// Other args are ignored, anyway.
+  |->fff_fallback:			// Call fast function fallback handler.
+  |  // BASE = new base, RD = nargs+1
+  |  mov L:RB, SAVE_L
+  |  mov PC, [BASE-4]			// Fallback may overwrite PC.
+  |  mov SAVE_PC, PC			// Redundant (but a defined value).
+  |  mov L:RB->base, BASE
+  |  lea RD, [BASE+NARGS:RD*8-8]
+  |  lea RA, [RD+8*LUA_MINSTACK]	// Ensure enough space for handler.
+  |  mov L:RB->top, RD
+  |  mov CFUNC:RD, [BASE-8]
+  |  cmp RA, L:RB->maxstack
+  |  ja >5				// Need to grow stack.
+  |.if X64
+  |  mov CARG1d, L:RB
+  |.else
+  |  mov ARG1, L:RB
+  |.endif
+  |  call aword CFUNC:RD->f		// (lua_State *L)
+  |  mov BASE, L:RB->base
+  |  // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+  |  test RD, RD;  jg ->fff_res		// Returned nresults+1?
+  |1:
+  |  mov RA, L:RB->top
+  |  sub RA, BASE
+  |  shr RA, 3
+  |  test RD, RD
+  |  lea NARGS:RD, [RA+1]
+  |  mov LFUNC:RB, [BASE-8]
+  |  jne ->vm_call_tail			// Returned -1?
+  |  ins_callt				// Returned 0: retry fast path.
+  |
+  |// Reconstruct previous base for vmeta_call during tailcall.
+  |->vm_call_tail:
+  |  mov RA, BASE
+  |  test PC, FRAME_TYPE
+  |  jnz >3
+  |  movzx RB, PC_RA
+  |  not RBa				// Note: ~RB = -(RB+1)
+  |  lea BASE, [BASE+RB*8]		// base = base - (RB+1)*8
+  |  jmp ->vm_call_dispatch		// Resolve again for tailcall.
+  |3:
+  |  mov RB, PC
+  |  and RB, -8
+  |  sub BASE, RB
+  |  jmp ->vm_call_dispatch		// Resolve again for tailcall.
+  |
+  |5:  // Grow stack for fallback handler.
+  |  mov FCARG2, LUA_MINSTACK
+  |  mov FCARG1, L:RB
+  |  call extern lj_state_growstack@8	// (lua_State *L, int n)
+  |  mov BASE, L:RB->base
+  |  xor RD, RD				// Simulate a return 0.
+  |  jmp <1				// Dumb retry (goes through ff first).
+  |
+  |->fff_gcstep:			// Call GC step function.
+  |  // BASE = new base, RD = nargs+1
+  |  pop RBa				// Must keep stack at same level.
+  |  mov TMPa, RBa			// Save return address
+  |  mov L:RB, SAVE_L
+  |  mov SAVE_PC, PC			// Redundant (but a defined value).
+  |  mov L:RB->base, BASE
+  |  lea RD, [BASE+NARGS:RD*8-8]
+  |  mov FCARG1, L:RB
+  |  mov L:RB->top, RD
+  |  call extern lj_gc_step@4		// (lua_State *L)
+  |  mov BASE, L:RB->base
+  |  mov RD, L:RB->top
+  |  sub RD, BASE
+  |  shr RD, 3
+  |  add NARGS:RD, 1
+  |  mov RBa, TMPa
+  |  push RBa				// Restore return address.
+  |  ret
+  |
+  |//-----------------------------------------------------------------------
+  |//-- Special dispatch targets -------------------------------------------
+  |//-----------------------------------------------------------------------
+  |
+  |->vm_record:				// Dispatch target for recording phase.
+  |.if JIT
+  |  movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+  |  test RDL, HOOK_VMEVENT		// No recording while in vmevent.
+  |  jnz >5
+  |  // Decrement the hookcount for consistency, but always do the call.
+  |  test RDL, HOOK_ACTIVE
+  |  jnz >1
+  |  test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+  |  jz >1
+  |  dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+  |  jmp >1
+  |.endif
+  |
+  |->vm_rethook:			// Dispatch target for return hooks.
+  |  movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+  |  test RDL, HOOK_ACTIVE		// Hook already active?
+  |  jnz >5
+  |  jmp >1
+  |
+  |->vm_inshook:			// Dispatch target for instr/line hooks.
+  |  movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+  |  test RDL, HOOK_ACTIVE		// Hook already active?
+  |  jnz >5
+  |
+  |  test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+  |  jz >5
+  |  dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+  |  jz >1
+  |  test RDL, LUA_MASKLINE
+  |  jz >5
+  |1:
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE
+  |  mov FCARG2, PC			// Caveat: FCARG2 == BASE
+  |  mov FCARG1, L:RB
+  |  // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+  |  call extern lj_dispatch_ins@8	// (lua_State *L, BCIns *pc)
+  |3:
+  |  mov BASE, L:RB->base
+  |4:
+  |  movzx RA, PC_RA
+  |5:
+  |  movzx OP, PC_OP
+  |  movzx RD, PC_RD
+  |.if X64
+  |  jmp aword [DISPATCH+OP*8+GG_DISP2STATIC]	// Re-dispatch to static ins.
+  |.else
+  |  jmp aword [DISPATCH+OP*4+GG_DISP2STATIC]	// Re-dispatch to static ins.
+  |.endif
+  |
+  |->cont_hook:				// Continue from hook yield.
+  |  add PC, 4
+  |  mov RA, [RB-24]
+  |  mov MULTRES, RA			// Restore MULTRES for *M ins.
+  |  jmp <4
+  |
+  |->vm_hotloop:			// Hot loop counter underflow.
+  |.if JIT
+  |  mov LFUNC:RB, [BASE-8]		// Same as curr_topL(L).
+  |  mov RB, LFUNC:RB->pc
+  |  movzx RD, byte [RB+PC2PROTO(framesize)]
+  |  lea RD, [BASE+RD*8]
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE
+  |  mov L:RB->top, RD
+  |  mov FCARG2, PC
+  |  lea FCARG1, [DISPATCH+GG_DISP2J]
+  |  mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
+  |  mov SAVE_PC, PC
+  |  call extern lj_trace_hot@8		// (jit_State *J, const BCIns *pc)
+  |  jmp <3
+  |.endif
+  |
+  |->vm_callhook:			// Dispatch target for call hooks.
+  |  mov SAVE_PC, PC
+  |.if JIT
+  |  jmp >1
+  |.endif
+  |
+  |->vm_hotcall:			// Hot call counter underflow.
+  |.if JIT
+  |  mov SAVE_PC, PC
+  |  or PC, 1				// Marker for hot call.
+  |1:
+  |.endif
+  |  lea RD, [BASE+NARGS:RD*8-8]
+  |  mov L:RB, SAVE_L
+  |  mov L:RB->base, BASE
+  |  mov L:RB->top, RD
+  |  mov FCARG2, PC
+  |  mov FCARG1, L:RB
+  |  call extern lj_dispatch_call@8	// (lua_State *L, const BCIns *pc)
+  |  // ASMFunction returned in eax/rax (RDa).
+  |  mov SAVE_PC, 0			// Invalidate for subsequent line hook.
+  |.if JIT
+  |  and PC, -2
+  |.endif
+  |  mov BASE, L:RB->base
+  |  mov RAa, RDa
+  |  mov RD, L:RB->top
+  |  sub RD, BASE
+  |  mov RBa, RAa
+  |  movzx RA, PC_RA
+  |  shr RD, 3
+  |  add NARGS:RD, 1
+  |  jmp RBa
+  |
+  |//-----------------------------------------------------------------------
+  |//-- Trace exit handler -------------------------------------------------
+  |//-----------------------------------------------------------------------
+  |
+  |// Called from an exit stub with the exit number on the stack.
+  |// The 16 bit exit number is stored with two (sign-extended) push imm8.
+  |->vm_exit_handler:
+  |.if JIT
+  |.if X64
+  |  push r13; push r12
+  |  push r11; push r10; push r9; push r8
+  |  push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp
+  |  push rbx; push rdx; push rcx; push rax
+  |  movzx RC, byte [rbp-8]		// Reconstruct exit number.
+  |  mov RCH, byte [rbp-16]
+  |  mov [rbp-8], r15; mov [rbp-16], r14
+  |.else
+  |  push ebp; lea ebp, [esp+12]; push ebp
+  |  push ebx; push edx; push ecx; push eax
+  |  movzx RC, byte [ebp-4]		// Reconstruct exit number.
+  |  mov RCH, byte [ebp-8]
+  |  mov [ebp-4], edi; mov [ebp-8], esi
+  |.endif
+  |  // Caveat: DISPATCH is ebx.
+  |  mov DISPATCH, [ebp]
+  |  mov RA, [DISPATCH+DISPATCH_GL(vmstate)]	// Get trace number.
+  |  set_vmstate EXIT
+  |  mov [DISPATCH+DISPATCH_J(exitno)], RC
+  |  mov [DISPATCH+DISPATCH_J(parent)], RA
+  |.if X64
+  |.if X64WIN
+  |  sub rsp, 16*8+4*8			// Room for SSE regs + save area.
+  |.else
+  |  sub rsp, 16*8			// Room for SSE regs.
+  |.endif
+  |  add rbp, -128
+  |  movsd qword [rbp-8],   xmm15; movsd qword [rbp-16],  xmm14
+  |  movsd qword [rbp-24],  xmm13; movsd qword [rbp-32],  xmm12
+  |  movsd qword [rbp-40],  xmm11; movsd qword [rbp-48],  xmm10
+  |  movsd qword [rbp-56],  xmm9;  movsd qword [rbp-64],  xmm8
+  |  movsd qword [rbp-72],  xmm7;  movsd qword [rbp-80],  xmm6
+  |  movsd qword [rbp-88],  xmm5;  movsd qword [rbp-96],  xmm4
+  |  movsd qword [rbp-104], xmm3;  movsd qword [rbp-112], xmm2
+  |  movsd qword [rbp-120], xmm1;  movsd qword [rbp-128], xmm0
+  |.else
+  |  sub esp, 8*8+16			// Room for SSE regs + args.
+  |  movsd qword [ebp-40], xmm7; movsd qword [ebp-48], xmm6
+  |  movsd qword [ebp-56], xmm5; movsd qword [ebp-64], xmm4
+  |  movsd qword [ebp-72], xmm3; movsd qword [ebp-80], xmm2
+  |  movsd qword [ebp-88], xmm1; movsd qword [ebp-96], xmm0
+  |.endif
+  |  // Caveat: RB is ebp.
+  |  mov L:RB, [DISPATCH+DISPATCH_GL(jit_L)]
+  |  mov BASE, [DISPATCH+DISPATCH_GL(jit_base)]
+  |  mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
+  |  mov dword [DISPATCH+DISPATCH_GL(jit_L)], 0
+  |  mov L:RB->base, BASE
+  |.if X64WIN
+  |  lea CARG2, [rsp+4*8]
+  |.elif X64
+  |  mov CARG2, rsp
+  |.else
+  |  lea FCARG2, [esp+16]
+  |.endif
+  |  lea FCARG1, [DISPATCH+GG_DISP2J]
+  |  call extern lj_trace_exit@8	// (jit_State *J, ExitState *ex)
+  |  // MULTRES or negated error code returned in eax (RD).
+  |  mov RAa, L:RB->cframe
+  |  and RAa, CFRAME_RAWMASK
+  |.if X64WIN
+  |  // Reposition stack later.
+  |.elif X64
+  |  mov rsp, RAa			// Reposition stack to C frame.
+  |.else
+  |  mov esp, RAa			// Reposition stack to C frame.
+  |.endif
+  |  mov [RAa+CFRAME_OFS_L], L:RB	// Set SAVE_L (on-trace resume/yield).
+  |  mov BASE, L:RB->base
+  |  mov PC, [RAa+CFRAME_OFS_PC]	// Get SAVE_PC.
+  |.if X64
+  |  jmp >1
+  |.endif
+  |.endif
+  |->vm_exit_interp:
+  |  // RD = MULTRES or negated error code, BASE, PC and DISPATCH set.
+  |.if JIT
+  |.if X64
+  |  // Restore additional callee-save registers only used in compiled code.
+  |.if X64WIN
+  |  lea RAa, [rsp+9*16+4*8]
+  |1:
+  |  movdqa xmm15, [RAa-9*16]
+  |  movdqa xmm14, [RAa-8*16]
+  |  movdqa xmm13, [RAa-7*16]
+  |  movdqa xmm12, [RAa-6*16]
+  |  movdqa xmm11, [RAa-5*16]
+  |  movdqa xmm10, [RAa-4*16]
+  |  movdqa xmm9, [RAa-3*16]
+  |  movdqa xmm8, [RAa-2*16]
+  |  movdqa xmm7, [RAa-1*16]
+  |  mov rsp, RAa			// Reposition stack to C frame.
+  |  movdqa xmm6, [RAa]
+  |  mov r15, CSAVE_3
+  |  mov r14, CSAVE_4
+  |.else
+  |  add rsp, 16			// Reposition stack to C frame.
+  |1:
+  |.endif
+  |  mov r13, TMPa
+  |  mov r12, TMPQ
+  |.endif
+  |  test RD, RD; js >3			// Check for error from exit.
+  |  mov MULTRES, RD
+  |  mov LFUNC:KBASE, [BASE-8]
+  |  mov KBASE, LFUNC:KBASE->pc
+  |  mov KBASE, [KBASE+PC2PROTO(k)]
+  |  mov dword [DISPATCH+DISPATCH_GL(jit_L)], 0
+  |  set_vmstate INTERP
+  |  // Modified copy of ins_next which handles function header dispatch, too.
+  |  mov RC, [PC]
+  |  movzx RA, RCH
+  |  movzx OP, RCL
+  |  add PC, 4
+  |  shr RC, 16
+  |  cmp OP, BC_FUNCF			// Function header?
+  |  jb >2
+  |  mov RC, MULTRES			// RC/RD holds nres+1.
+  |2:
+  |.if X64
+  |  jmp aword [DISPATCH+OP*8]
+  |.else
+  |  jmp aword [DISPATCH+OP*4]
+  |.endif
+  |
+  |3:  // Rethrow error from the right C frame.
+  |  neg RD
+  |  mov FCARG1, L:RB
+  |  mov FCARG2, RD
+  |  call extern lj_err_throw@8		// (lua_State *L, int errcode)
+  |.endif
+  |
+  |//-----------------------------------------------------------------------
+  |//-- Math helper functions ----------------------------------------------
+  |//-----------------------------------------------------------------------
+  |
+  |// FP value rounding. Called by math.floor/math.ceil fast functions
+  |// and from JIT code.
+  |
+  |// x87 variant: Arg/ret on x87 stack. No int/xmm registers modified.
+  |.macro vm_round_x87, mode1, mode2
+  |  fnstcw word [esp+4]		// Caveat: overwrites ARG1 and ARG2.
+  |  mov [esp+8], eax
+  |  mov ax, mode1
+  |  or ax, [esp+4]
+  |.if mode2 ~= 0xffff
+  |  and ax, mode2
+  |.endif
+  |  mov [esp+6], ax
+  |  fldcw word [esp+6]
+  |  frndint
+  |  fldcw word [esp+4]
+  |  mov eax, [esp+8]
+  |  ret
+  |.endmacro
+  |
+  |// SSE variant: arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified.
+  |.macro vm_round_sse, mode
+  |  sseconst_abs xmm2, RDa
+  |  sseconst_2p52 xmm3, RDa
+  |  movaps xmm1, xmm0
+  |  andpd xmm1, xmm2			// |x|
+  |  ucomisd xmm3, xmm1			// No truncation if 2^52 <= |x|.
+  |  jbe >1
+  |  andnpd xmm2, xmm0			// Isolate sign bit.
+  |.if mode == 2		// trunc(x)?
+  |  movaps xmm0, xmm1
+  |  addsd xmm1, xmm3			// (|x| + 2^52) - 2^52
+  |  subsd xmm1, xmm3
+  |  sseconst_1 xmm3, RDa
+  |  cmpsd xmm0, xmm1, 1		// |x| < result?
+  |  andpd xmm0, xmm3
+  |  subsd xmm1, xmm0			// If yes, subtract -1.
+  |  orpd xmm1, xmm2			// Merge sign bit back in.
+  |.else
+  |  addsd xmm1, xmm3			// (|x| + 2^52) - 2^52
+  |  subsd xmm1, xmm3
+  |  orpd xmm1, xmm2			// Merge sign bit back in.
+  |  .if mode == 1		// ceil(x)?
+  |    sseconst_m1 xmm2, RDa		// Must subtract -1 to preserve -0.
+  |    cmpsd xmm0, xmm1, 6		// x > result?
+  |  .else			// floor(x)?
+  |    sseconst_1 xmm2, RDa
+  |    cmpsd xmm0, xmm1, 1		// x < result?
+  |  .endif
+  |  andpd xmm0, xmm2
+  |  subsd xmm1, xmm0			// If yes, subtract +-1.
+  |.endif
+  |  movaps xmm0, xmm1
+  |1:
+  |  ret
+  |.endmacro
+  |
+  |.macro vm_round, name, ssemode, mode1, mode2
+  |->name:
+  |.if not SSE
+  |  vm_round_x87 mode1, mode2
+  |.endif
+  |->name .. _sse:
+  |  vm_round_sse ssemode
+  |.endmacro
+  |
+  |  vm_round vm_floor, 0, 0x0400, 0xf7ff
+  |  vm_round vm_ceil,  1, 0x0800, 0xfbff
+  |  vm_round vm_trunc, 2, 0x0c00, 0xffff
+  |
+  |// FP modulo x%y. Called by BC_MOD* and vm_arith.
+  |->vm_mod:
+  |.if SSE
+  |// Args in xmm0/xmm1, return value in xmm0.
+  |// Caveat: xmm0-xmm5 and RC (eax) modified!
+  |  movaps xmm5, xmm0
+  |  divsd xmm0, xmm1
+  |  sseconst_abs xmm2, RDa
+  |  sseconst_2p52 xmm3, RDa
+  |  movaps xmm4, xmm0
+  |  andpd xmm4, xmm2			// |x/y|
+  |  ucomisd xmm3, xmm4			// No truncation if 2^52 <= |x/y|.
+  |  jbe >1
+  |  andnpd xmm2, xmm0			// Isolate sign bit.
+  |  addsd xmm4, xmm3			// (|x/y| + 2^52) - 2^52
+  |  subsd xmm4, xmm3
+  |  orpd xmm4, xmm2			// Merge sign bit back in.
+  |  sseconst_1 xmm2, RDa
+  |  cmpsd xmm0, xmm4, 1		// x/y < result?
+  |  andpd xmm0, xmm2
+  |  subsd xmm4, xmm0			// If yes, subtract 1.0.
+  |  movaps xmm0, xmm5
+  |  mulsd xmm1, xmm4
+  |  subsd xmm0, xmm1
+  |  ret
+  |1:
+  |  mulsd xmm1, xmm0
+  |  movaps xmm0, xmm5
+  |  subsd xmm0, xmm1
+  |  ret
+  |.else
+  |// Args/ret on x87 stack (y on top). No xmm registers modified.
+  |// Caveat: needs 3 slots on x87 stack! RC (eax) modified!
+  |  fld st1
+  |  fdiv st1
+  |  fnstcw word [esp+4]
+  |  mov ax, 0x0400
+  |  or ax, [esp+4]
+  |  and ax, 0xf7ff
+  |  mov [esp+6], ax
+  |  fldcw word [esp+6]
+  |  frndint
+  |  fldcw word [esp+4]
+  |  fmulp st1
+  |  fsubp st1
+  |  ret
+  |.endif
+  |
+  |// FP log2(x). Called by math.log(x, base).
+  |->vm_log2:
+  |.if X64WIN
+  |  movsd qword [rsp+8], xmm0		// Use scratch area.
+  |  fld1
+  |  fld qword [rsp+8]
+  |  fyl2x
+  |  fstp qword [rsp+8]
+  |  movsd xmm0, qword [rsp+8]
+  |.elif X64
+  |  movsd qword [rsp-8], xmm0		// Use red zone.
+  |  fld1
+  |  fld qword [rsp-8]
+  |  fyl2x
+  |  fstp qword [rsp-8]
+  |  movsd xmm0, qword [rsp-8]
+  |.else
+  |  fld1
+  |  fld qword [esp+4]
+  |  fyl2x
+  |.endif
+  |  ret
+  |
+  |// FP exponentiation e^x and 2^x. Called by math.exp fast function and
+  |// from JIT code. Arg/ret on x87 stack. No int/xmm regs modified.
+  |// Caveat: needs 3 slots on x87 stack!
+  |->vm_exp_x87:
+  |  fldl2e; fmulp st1				// e^x ==> 2^(x*log2(e))
+  |->vm_exp2_x87:
+  |  .if X64WIN
+  |    .define expscratch, dword [rsp+8]	// Use scratch area.
+  |  .elif X64
+  |    .define expscratch, dword [rsp-8]	// Use red zone.
+  |  .else
+  |    .define expscratch, dword [esp+4]	// Needs 4 byte scratch area.
+  |  .endif
+  |  fst expscratch				// Caveat: overwrites ARG1.
+  |  cmp expscratch, 0x7f800000; je >1		// Special case: e^+Inf = +Inf
+  |  cmp expscratch, 0xff800000; je >2		// Special case: e^-Inf = 0
+  |->vm_exp2raw:  // Entry point for vm_pow. Without +-Inf check.
+  |  fdup; frndint; fsub st1, st0; fxch		// Split into frac/int part.
+  |  f2xm1; fld1; faddp st1; fscale; fpop1	// ==> (2^frac-1 +1) << int
+  |1:
+  |  ret
+  |2:
+  |  fpop; fldz; ret
+  |
+  |// Generic power function x^y. Called by BC_POW, math.pow fast function,
+  |// and vm_arith.
+  |// Args/ret on x87 stack (y on top). RC (eax) modified.
+  |// Caveat: needs 3 slots on x87 stack!
+  |->vm_pow:
+  |.if not SSE
+  |  fist dword [esp+4]			// Store/reload int before comparison.
+  |  fild dword [esp+4]			// Integral exponent used in vm_powi.
+  |  fucomip st1
+  |  jnz >8				// Branc

<TRUNCATED>