You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2015/07/29 01:39:53 UTC

[15/62] [abbrv] trafficserver git commit: TS-3783 TS-3030 Add luajit v2.0.4 as a subtree

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_mcode.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_mcode.c b/lib/luajit/src/lj_mcode.c
new file mode 100644
index 0000000..d95ebeb
--- /dev/null
+++ b/lib/luajit/src/lj_mcode.c
@@ -0,0 +1,386 @@
+/*
+** Machine code management.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_mcode_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#if LJ_HASJIT
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_jit.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#endif
+#if LJ_HASJIT || LJ_HASFFI
+#include "lj_vm.h"
+#endif
+
+/* -- OS-specific functions ----------------------------------------------- */
+
+#if LJ_HASJIT || LJ_HASFFI
+
+/* Define this if you want to run LuaJIT with Valgrind. */
+#ifdef LUAJIT_USE_VALGRIND
+#include <valgrind/valgrind.h>
+#endif
+
+#if LJ_TARGET_IOS
+void sys_icache_invalidate(void *start, size_t len);
+#endif
+
+/* Synchronize data/instruction cache. */
+void lj_mcode_sync(void *start, void *end)
+{
+#ifdef LUAJIT_USE_VALGRIND
+  VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
+#endif
+#if LJ_TARGET_X86ORX64
+  UNUSED(start); UNUSED(end);
+#elif LJ_TARGET_IOS
+  sys_icache_invalidate(start, (char *)end-(char *)start);
+#elif LJ_TARGET_PPC
+  lj_vm_cachesync(start, end);
+#elif defined(__GNUC__)
+  __clear_cache(start, end);
+#else
+#error "Missing builtin to flush instruction cache"
+#endif
+}
+
+#endif
+
+#if LJ_HASJIT
+
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#define MCPROT_RW	PAGE_READWRITE
+#define MCPROT_RX	PAGE_EXECUTE_READ
+#define MCPROT_RWX	PAGE_EXECUTE_READWRITE
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
+{
+  void *p = VirtualAlloc((void *)hint, sz,
+			 MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
+  if (!p && !hint)
+    lj_trace_err(J, LJ_TRERR_MCODEAL);
+  return p;
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+  UNUSED(J); UNUSED(sz);
+  VirtualFree(p, 0, MEM_RELEASE);
+}
+
+static int mcode_setprot(void *p, size_t sz, DWORD prot)
+{
+  DWORD oprot;
+  return !VirtualProtect(p, sz, prot, &oprot);
+}
+
+#elif LJ_TARGET_POSIX
+
+#include <sys/mman.h>
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS	MAP_ANON
+#endif
+
+#define MCPROT_RW	(PROT_READ|PROT_WRITE)
+#define MCPROT_RX	(PROT_READ|PROT_EXEC)
+#define MCPROT_RWX	(PROT_READ|PROT_WRITE|PROT_EXEC)
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
+{
+  void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+  if (p == MAP_FAILED) {
+    if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
+    p = NULL;
+  }
+  return p;
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+  UNUSED(J);
+  munmap(p, sz);
+}
+
+static int mcode_setprot(void *p, size_t sz, int prot)
+{
+  return mprotect(p, sz, prot);
+}
+
+#elif LJ_64
+
+#error "Missing OS support for explicit placement of executable memory"
+
+#else
+
+/* Fallback allocator. This will fail if memory is not executable by default. */
+#define LUAJIT_UNPROTECT_MCODE
+#define MCPROT_RW	0
+#define MCPROT_RX	0
+#define MCPROT_RWX	0
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
+{
+  UNUSED(hint); UNUSED(prot);
+  return lj_mem_new(J->L, sz);
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+  lj_mem_free(J2G(J), p, sz);
+}
+
+#endif
+
+/* -- MCode area protection ----------------------------------------------- */
+
+/* Define this ONLY if page protection twiddling becomes a bottleneck. */
+#ifdef LUAJIT_UNPROTECT_MCODE
+
+/* It's generally considered to be a potential security risk to have
+** pages with simultaneous write *and* execute access in a process.
+**
+** Do not even think about using this mode for server processes or
+** apps handling untrusted external data (such as a browser).
+**
+** The security risk is not in LuaJIT itself -- but if an adversary finds
+** any *other* flaw in your C application logic, then any RWX memory page
+** simplifies writing an exploit considerably.
+*/
+#define MCPROT_GEN	MCPROT_RWX
+#define MCPROT_RUN	MCPROT_RWX
+
+static void mcode_protect(jit_State *J, int prot)
+{
+  UNUSED(J); UNUSED(prot);
+}
+
+#else
+
+/* This is the default behaviour and much safer:
+**
+** Most of the time the memory pages holding machine code are executable,
+** but NONE of them is writable.
+**
+** The current memory area is marked read-write (but NOT executable) only
+** during the short time window while the assembler generates machine code.
+*/
+#define MCPROT_GEN	MCPROT_RW
+#define MCPROT_RUN	MCPROT_RX
+
+/* Protection twiddling failed. Probably due to kernel security. */
+static LJ_NOINLINE void mcode_protfail(jit_State *J)
+{
+  lua_CFunction panic = J2G(J)->panic;
+  if (panic) {
+    lua_State *L = J->L;
+    setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
+    panic(L);
+  }
+}
+
+/* Change protection of MCode area. */
+static void mcode_protect(jit_State *J, int prot)
+{
+  if (J->mcprot != prot) {
+    if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
+      mcode_protfail(J);
+    J->mcprot = prot;
+  }
+}
+
+#endif
+
+/* -- MCode area allocation ----------------------------------------------- */
+
+#if LJ_TARGET_X64
+#define mcode_validptr(p)	((p) && (uintptr_t)(p) < (uintptr_t)1<<47)
+#else
+#define mcode_validptr(p)	((p) && (uintptr_t)(p) < 0xffff0000)
+#endif
+
+#ifdef LJ_TARGET_JUMPRANGE
+
+/* Get memory within relative jump distance of our code in 64 bit mode. */
+static void *mcode_alloc(jit_State *J, size_t sz)
+{
+  /* Target an address in the static assembler code (64K aligned).
+  ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
+  ** Use half the jump range so every address in the range can reach any other.
+  */
+#if LJ_TARGET_MIPS
+  /* Use the middle of the 256MB-aligned region. */
+  uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) +
+		     0x08000000u;
+#else
+  uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
+#endif
+  const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
+  /* First try a contiguous area below the last one. */
+  uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
+  int i;
+  for (i = 0; i < 32; i++) {  /* 32 attempts ought to be enough ... */
+    if (mcode_validptr(hint)) {
+      void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
+
+      if (mcode_validptr(p) &&
+	  ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
+	return p;
+      if (p) mcode_free(J, p, sz);  /* Free badly placed area. */
+    }
+    /* Next try probing pseudo-random addresses. */
+    do {
+      hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16;  /* 64K aligned. */
+    } while (!(hint + sz < range));
+    hint = target + hint - (range>>1);
+  }
+  lj_trace_err(J, LJ_TRERR_MCODEAL);  /* Give up. OS probably ignores hints? */
+  return NULL;
+}
+
+#else
+
+/* All memory addresses are reachable by relative jumps. */
+static void *mcode_alloc(jit_State *J, size_t sz)
+{
+#ifdef __OpenBSD__
+  /* Allow better executable memory allocation for OpenBSD W^X mode. */
+  void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
+  if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
+    mcode_free(J, p, sz);
+    return NULL;
+  }
+  return p;
+#else
+  return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
+#endif
+}
+
+#endif
+
+/* -- MCode area management ----------------------------------------------- */
+
+/* Linked list of MCode areas. */
+typedef struct MCLink {
+  MCode *next;		/* Next area. */
+  size_t size;		/* Size of current area. */
+} MCLink;
+
+/* Allocate a new MCode area. */
+static void mcode_allocarea(jit_State *J)
+{
+  MCode *oldarea = J->mcarea;
+  size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
+  sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
+  J->mcarea = (MCode *)mcode_alloc(J, sz);
+  J->szmcarea = sz;
+  J->mcprot = MCPROT_GEN;
+  J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
+  J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
+  ((MCLink *)J->mcarea)->next = oldarea;
+  ((MCLink *)J->mcarea)->size = sz;
+  J->szallmcarea += sz;
+}
+
+/* Free all MCode areas. */
+void lj_mcode_free(jit_State *J)
+{
+  MCode *mc = J->mcarea;
+  J->mcarea = NULL;
+  J->szallmcarea = 0;
+  while (mc) {
+    MCode *next = ((MCLink *)mc)->next;
+    mcode_free(J, mc, ((MCLink *)mc)->size);
+    mc = next;
+  }
+}
+
+/* -- MCode transactions -------------------------------------------------- */
+
+/* Reserve the remainder of the current MCode area. */
+MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
+{
+  if (!J->mcarea)
+    mcode_allocarea(J);
+  else
+    mcode_protect(J, MCPROT_GEN);
+  *lim = J->mcbot;
+  return J->mctop;
+}
+
+/* Commit the top part of the current MCode area. */
+void lj_mcode_commit(jit_State *J, MCode *top)
+{
+  J->mctop = top;
+  mcode_protect(J, MCPROT_RUN);
+}
+
+/* Abort the reservation. */
+void lj_mcode_abort(jit_State *J)
+{
+  if (J->mcarea)
+    mcode_protect(J, MCPROT_RUN);
+}
+
+/* Set/reset protection to allow patching of MCode areas. */
+MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
+{
+#ifdef LUAJIT_UNPROTECT_MCODE
+  UNUSED(J); UNUSED(ptr); UNUSED(finish);
+  return NULL;
+#else
+  if (finish) {
+    if (J->mcarea == ptr)
+      mcode_protect(J, MCPROT_RUN);
+    else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
+      mcode_protfail(J);
+    return NULL;
+  } else {
+    MCode *mc = J->mcarea;
+    /* Try current area first to use the protection cache. */
+    if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
+      mcode_protect(J, MCPROT_GEN);
+      return mc;
+    }
+    /* Otherwise search through the list of MCode areas. */
+    for (;;) {
+      mc = ((MCLink *)mc)->next;
+      lua_assert(mc != NULL);
+      if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
+	if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
+	  mcode_protfail(J);
+	return mc;
+      }
+    }
+  }
+#endif
+}
+
+/* Limit of MCode reservation reached. */
+void lj_mcode_limiterr(jit_State *J, size_t need)
+{
+  size_t sizemcode, maxmcode;
+  lj_mcode_abort(J);
+  sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
+  sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
+  maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
+  if ((size_t)need > sizemcode)
+    lj_trace_err(J, LJ_TRERR_MCODEOV);  /* Too long for any area. */
+  if (J->szallmcarea + sizemcode > maxmcode)
+    lj_trace_err(J, LJ_TRERR_MCODEAL);
+  mcode_allocarea(J);
+  lj_trace_err(J, LJ_TRERR_MCODELM);  /* Retry with new area. */
+}
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_mcode.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_mcode.h b/lib/luajit/src/lj_mcode.h
new file mode 100644
index 0000000..ee60452
--- /dev/null
+++ b/lib/luajit/src/lj_mcode.h
@@ -0,0 +1,30 @@
+/*
+** Machine code management.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_MCODE_H
+#define _LJ_MCODE_H
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT || LJ_HASFFI
+LJ_FUNC void lj_mcode_sync(void *start, void *end);
+#endif
+
+#if LJ_HASJIT
+
+#include "lj_jit.h"
+
+LJ_FUNC void lj_mcode_free(jit_State *J);
+LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim);
+LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m);
+LJ_FUNC void lj_mcode_abort(jit_State *J);
+LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish);
+LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need);
+
+#define lj_mcode_commitbot(J, m)	(J->mcbot = (m))
+
+#endif
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_meta.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_meta.c b/lib/luajit/src/lj_meta.c
new file mode 100644
index 0000000..faaaf70
--- /dev/null
+++ b/lib/luajit/src/lj_meta.c
@@ -0,0 +1,466 @@
+/*
+** Metamethod handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_meta_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+
+/* -- Metamethod handling ------------------------------------------------- */
+
+/* String interning of metamethod names for fast indexing. */
+void lj_meta_init(lua_State *L)
+{
+#define MMNAME(name)	"__" #name
+  const char *metanames = MMDEF(MMNAME);
+#undef MMNAME
+  global_State *g = G(L);
+  const char *p, *q;
+  uint32_t mm;
+  for (mm = 0, p = metanames; *p; mm++, p = q) {
+    GCstr *s;
+    for (q = p+2; *q && *q != '_'; q++) ;
+    s = lj_str_new(L, p, (size_t)(q-p));
+    /* NOBARRIER: g->gcroot[] is a GC root. */
+    setgcref(g->gcroot[GCROOT_MMNAME+mm], obj2gco(s));
+  }
+}
+
+/* Negative caching of a few fast metamethods. See the lj_meta_fast() macro. */
+cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name)
+{
+  cTValue *mo = lj_tab_getstr(mt, name);
+  lua_assert(mm <= MM_FAST);
+  if (!mo || tvisnil(mo)) {  /* No metamethod? */
+    mt->nomm |= (uint8_t)(1u<<mm);  /* Set negative cache flag. */
+    return NULL;
+  }
+  return mo;
+}
+
+/* Lookup metamethod for object. */
+cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm)
+{
+  GCtab *mt;
+  if (tvistab(o))
+    mt = tabref(tabV(o)->metatable);
+  else if (tvisudata(o))
+    mt = tabref(udataV(o)->metatable);
+  else
+    mt = tabref(basemt_obj(G(L), o));
+  if (mt) {
+    cTValue *mo = lj_tab_getstr(mt, mmname_str(G(L), mm));
+    if (mo)
+      return mo;
+  }
+  return niltv(L);
+}
+
+#if LJ_HASFFI
+/* Tailcall from C function. */
+int lj_meta_tailcall(lua_State *L, cTValue *tv)
+{
+  TValue *base = L->base;
+  TValue *top = L->top;
+  const BCIns *pc = frame_pc(base-1);  /* Preserve old PC from frame. */
+  copyTV(L, base-1, tv);  /* Replace frame with new object. */
+  top->u32.lo = LJ_CONT_TAILCALL;
+  setframe_pc(top, pc);
+  setframe_gc(top+1, obj2gco(L));  /* Dummy frame object. */
+  setframe_ftsz(top+1, (int)((char *)(top+2) - (char *)base) + FRAME_CONT);
+  L->base = L->top = top+2;
+  /*
+  ** before:   [old_mo|PC]    [... ...]
+  **                         ^base     ^top
+  ** after:    [new_mo|itype] [... ...] [NULL|PC] [dummy|delta]
+  **                                                           ^base/top
+  ** tailcall: [new_mo|PC]    [... ...]
+  **                         ^base     ^top
+  */
+  return 0;
+}
+#endif
+
+/* Setup call to metamethod to be run by Assembler VM. */
+static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo,
+		    cTValue *a, cTValue *b)
+{
+  /*
+  **           |-- framesize -> top       top+1       top+2 top+3
+  ** before:   [func slots ...]
+  ** mm setup: [func slots ...] [cont|?]  [mo|tmtype] [a]   [b]
+  ** in asm:   [func slots ...] [cont|PC] [mo|delta]  [a]   [b]
+  **           ^-- func base                          ^-- mm base
+  ** after mm: [func slots ...]           [result]
+  **                ^-- copy to base[PC_RA] --/     for lj_cont_ra
+  **                          istruecond + branch   for lj_cont_cond*
+  **                                       ignore   for lj_cont_nop
+  ** next PC:  [func slots ...]
+  */
+  TValue *top = L->top;
+  if (curr_funcisL(L)) top = curr_topL(L);
+  setcont(top, cont);  /* Assembler VM stores PC in upper word. */
+  copyTV(L, top+1, mo);  /* Store metamethod and two arguments. */
+  copyTV(L, top+2, a);
+  copyTV(L, top+3, b);
+  return top+2;  /* Return new base. */
+}
+
+/* -- C helpers for some instructions, called from assembler VM ----------- */
+
+/* Helper for TGET*. __index chain and metamethod. */
+cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k)
+{
+  int loop;
+  for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
+    cTValue *mo;
+    if (LJ_LIKELY(tvistab(o))) {
+      GCtab *t = tabV(o);
+      cTValue *tv = lj_tab_get(L, t, k);
+      if (!tvisnil(tv) ||
+	  !(mo = lj_meta_fast(L, tabref(t->metatable), MM_index)))
+	return tv;
+    } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_index))) {
+      lj_err_optype(L, o, LJ_ERR_OPINDEX);
+      return NULL;  /* unreachable */
+    }
+    if (tvisfunc(mo)) {
+      L->top = mmcall(L, lj_cont_ra, mo, o, k);
+      return NULL;  /* Trigger metamethod call. */
+    }
+    o = mo;
+  }
+  lj_err_msg(L, LJ_ERR_GETLOOP);
+  return NULL;  /* unreachable */
+}
+
+/* Helper for TSET*. __newindex chain and metamethod. */
+TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k)
+{
+  TValue tmp;
+  int loop;
+  for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
+    cTValue *mo;
+    if (LJ_LIKELY(tvistab(o))) {
+      GCtab *t = tabV(o);
+      cTValue *tv = lj_tab_get(L, t, k);
+      if (LJ_LIKELY(!tvisnil(tv))) {
+	t->nomm = 0;  /* Invalidate negative metamethod cache. */
+	lj_gc_anybarriert(L, t);
+	return (TValue *)tv;
+      } else if (!(mo = lj_meta_fast(L, tabref(t->metatable), MM_newindex))) {
+	t->nomm = 0;  /* Invalidate negative metamethod cache. */
+	lj_gc_anybarriert(L, t);
+	if (tv != niltv(L))
+	  return (TValue *)tv;
+	if (tvisnil(k)) lj_err_msg(L, LJ_ERR_NILIDX);
+	else if (tvisint(k)) { setnumV(&tmp, (lua_Number)intV(k)); k = &tmp; }
+	else if (tvisnum(k) && tvisnan(k)) lj_err_msg(L, LJ_ERR_NANIDX);
+	return lj_tab_newkey(L, t, k);
+      }
+    } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_newindex))) {
+      lj_err_optype(L, o, LJ_ERR_OPINDEX);
+      return NULL;  /* unreachable */
+    }
+    if (tvisfunc(mo)) {
+      L->top = mmcall(L, lj_cont_nop, mo, o, k);
+      /* L->top+2 = v filled in by caller. */
+      return NULL;  /* Trigger metamethod call. */
+    }
+    copyTV(L, &tmp, mo);
+    o = &tmp;
+  }
+  lj_err_msg(L, LJ_ERR_SETLOOP);
+  return NULL;  /* unreachable */
+}
+
+static cTValue *str2num(cTValue *o, TValue *n)
+{
+  if (tvisnum(o))
+    return o;
+  else if (tvisint(o))
+    return (setnumV(n, (lua_Number)intV(o)), n);
+  else if (tvisstr(o) && lj_strscan_num(strV(o), n))
+    return n;
+  else
+    return NULL;
+}
+
+/* Helper for arithmetic instructions. Coercion, metamethod. */
+TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc,
+		      BCReg op)
+{
+  MMS mm = bcmode_mm(op);
+  TValue tempb, tempc;
+  cTValue *b, *c;
+  if ((b = str2num(rb, &tempb)) != NULL &&
+      (c = str2num(rc, &tempc)) != NULL) {  /* Try coercion first. */
+    setnumV(ra, lj_vm_foldarith(numV(b), numV(c), (int)mm-MM_add));
+    return NULL;
+  } else {
+    cTValue *mo = lj_meta_lookup(L, rb, mm);
+    if (tvisnil(mo)) {
+      mo = lj_meta_lookup(L, rc, mm);
+      if (tvisnil(mo)) {
+	if (str2num(rb, &tempb) == NULL) rc = rb;
+	lj_err_optype(L, rc, LJ_ERR_OPARITH);
+	return NULL;  /* unreachable */
+      }
+    }
+    return mmcall(L, lj_cont_ra, mo, rb, rc);
+  }
+}
+
+/* In-place coercion of a number to a string. */
+static LJ_AINLINE int tostring(lua_State *L, TValue *o)
+{
+  if (tvisstr(o)) {
+    return 1;
+  } else if (tvisnumber(o)) {
+    setstrV(L, o, lj_str_fromnumber(L, o));
+    return 1;
+  } else {
+    return 0;
+  }
+}
+
+/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */
+TValue *lj_meta_cat(lua_State *L, TValue *top, int left)
+{
+  int fromc = 0;
+  if (left < 0) { left = -left; fromc = 1; }
+  do {
+    int n = 1;
+    if (!(tvisstr(top-1) || tvisnumber(top-1)) || !tostring(L, top)) {
+      cTValue *mo = lj_meta_lookup(L, top-1, MM_concat);
+      if (tvisnil(mo)) {
+	mo = lj_meta_lookup(L, top, MM_concat);
+	if (tvisnil(mo)) {
+	  if (tvisstr(top-1) || tvisnumber(top-1)) top++;
+	  lj_err_optype(L, top-1, LJ_ERR_OPCAT);
+	  return NULL;  /* unreachable */
+	}
+      }
+      /* One of the top two elements is not a string, call __cat metamethod:
+      **
+      ** before:    [...][CAT stack .........................]
+      **                                 top-1     top         top+1 top+2
+      ** pick two:  [...][CAT stack ...] [o1]      [o2]
+      ** setup mm:  [...][CAT stack ...] [cont|?]  [mo|tmtype] [o1]  [o2]
+      ** in asm:    [...][CAT stack ...] [cont|PC] [mo|delta]  [o1]  [o2]
+      **            ^-- func base                              ^-- mm base
+      ** after mm:  [...][CAT stack ...] <--push-- [result]
+      ** next step: [...][CAT stack .............]
+      */
+      copyTV(L, top+2, top);  /* Careful with the order of stack copies! */
+      copyTV(L, top+1, top-1);
+      copyTV(L, top, mo);
+      setcont(top-1, lj_cont_cat);
+      return top+1;  /* Trigger metamethod call. */
+    } else if (strV(top)->len == 0) {  /* Shortcut. */
+      (void)tostring(L, top-1);
+    } else {
+      /* Pick as many strings as possible from the top and concatenate them:
+      **
+      ** before:    [...][CAT stack ...........................]
+      ** pick str:  [...][CAT stack ...] [...... strings ......]
+      ** concat:    [...][CAT stack ...] [result]
+      ** next step: [...][CAT stack ............]
+      */
+      MSize tlen = strV(top)->len;
+      char *buffer;
+      int i;
+      for (n = 1; n <= left && tostring(L, top-n); n++) {
+	MSize len = strV(top-n)->len;
+	if (len >= LJ_MAX_STR - tlen)
+	  lj_err_msg(L, LJ_ERR_STROV);
+	tlen += len;
+      }
+      buffer = lj_str_needbuf(L, &G(L)->tmpbuf, tlen);
+      n--;
+      tlen = 0;
+      for (i = n; i >= 0; i--) {
+	MSize len = strV(top-i)->len;
+	memcpy(buffer + tlen, strVdata(top-i), len);
+	tlen += len;
+      }
+      setstrV(L, top-n, lj_str_new(L, buffer, tlen));
+    }
+    left -= n;
+    top -= n;
+  } while (left >= 1);
+  if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) {
+    if (!fromc) L->top = curr_topL(L);
+    lj_gc_step(L);
+  }
+  return NULL;
+}
+
+/* Helper for LEN. __len metamethod. */
+TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o)
+{
+  cTValue *mo = lj_meta_lookup(L, o, MM_len);
+  if (tvisnil(mo)) {
+    if (LJ_52 && tvistab(o))
+      tabref(tabV(o)->metatable)->nomm |= (uint8_t)(1u<<MM_len);
+    else
+      lj_err_optype(L, o, LJ_ERR_OPLEN);
+    return NULL;
+  }
+  return mmcall(L, lj_cont_ra, mo, o, LJ_52 ? o : niltv(L));
+}
+
+/* Helper for equality comparisons. __eq metamethod. */
+TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne)
+{
+  /* Field metatable must be at same offset for GCtab and GCudata! */
+  cTValue *mo = lj_meta_fast(L, tabref(o1->gch.metatable), MM_eq);
+  if (mo) {
+    TValue *top;
+    uint32_t it;
+    if (tabref(o1->gch.metatable) != tabref(o2->gch.metatable)) {
+      cTValue *mo2 = lj_meta_fast(L, tabref(o2->gch.metatable), MM_eq);
+      if (mo2 == NULL || !lj_obj_equal(mo, mo2))
+	return (TValue *)(intptr_t)ne;
+    }
+    top = curr_top(L);
+    setcont(top, ne ? lj_cont_condf : lj_cont_condt);
+    copyTV(L, top+1, mo);
+    it = ~(uint32_t)o1->gch.gct;
+    setgcV(L, top+2, o1, it);
+    setgcV(L, top+3, o2, it);
+    return top+2;  /* Trigger metamethod call. */
+  }
+  return (TValue *)(intptr_t)ne;
+}
+
+#if LJ_HASFFI
+TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins)
+{
+  ASMFunction cont = (bc_op(ins) & 1) ? lj_cont_condf : lj_cont_condt;
+  int op = (int)bc_op(ins) & ~1;
+  TValue tv;
+  cTValue *mo, *o2, *o1 = &L->base[bc_a(ins)];
+  cTValue *o1mm = o1;
+  if (op == BC_ISEQV) {
+    o2 = &L->base[bc_d(ins)];
+    if (!tviscdata(o1mm)) o1mm = o2;
+  } else if (op == BC_ISEQS) {
+    setstrV(L, &tv, gco2str(proto_kgc(curr_proto(L), ~(ptrdiff_t)bc_d(ins))));
+    o2 = &tv;
+  } else if (op == BC_ISEQN) {
+    o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)];
+  } else {
+    lua_assert(op == BC_ISEQP);
+    setitype(&tv, ~bc_d(ins));
+    o2 = &tv;
+  }
+  mo = lj_meta_lookup(L, o1mm, MM_eq);
+  if (LJ_LIKELY(!tvisnil(mo)))
+    return mmcall(L, cont, mo, o1, o2);
+  else
+    return (TValue *)(intptr_t)(bc_op(ins) & 1);
+}
+#endif
+
+/* Helper for ordered comparisons. String compare, __lt/__le metamethods. */
+TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op)
+{
+  if (LJ_HASFFI && (tviscdata(o1) || tviscdata(o2))) {
+    ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
+    MMS mm = (op & 2) ? MM_le : MM_lt;
+    cTValue *mo = lj_meta_lookup(L, tviscdata(o1) ? o1 : o2, mm);
+    if (LJ_UNLIKELY(tvisnil(mo))) goto err;
+    return mmcall(L, cont, mo, o1, o2);
+  } else if (LJ_52 || itype(o1) == itype(o2)) {
+    /* Never called with two numbers. */
+    if (tvisstr(o1) && tvisstr(o2)) {
+      int32_t res = lj_str_cmp(strV(o1), strV(o2));
+      return (TValue *)(intptr_t)(((op&2) ? res <= 0 : res < 0) ^ (op&1));
+    } else {
+    trymt:
+      while (1) {
+	ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
+	MMS mm = (op & 2) ? MM_le : MM_lt;
+	cTValue *mo = lj_meta_lookup(L, o1, mm);
+#if LJ_52
+	if (tvisnil(mo) && tvisnil((mo = lj_meta_lookup(L, o2, mm))))
+#else
+	cTValue *mo2 = lj_meta_lookup(L, o2, mm);
+	if (tvisnil(mo) || !lj_obj_equal(mo, mo2))
+#endif
+	{
+	  if (op & 2) {  /* MM_le not found: retry with MM_lt. */
+	    cTValue *ot = o1; o1 = o2; o2 = ot;  /* Swap operands. */
+	    op ^= 3;  /* Use LT and flip condition. */
+	    continue;
+	  }
+	  goto err;
+	}
+	return mmcall(L, cont, mo, o1, o2);
+      }
+    }
+  } else if (tvisbool(o1) && tvisbool(o2)) {
+    goto trymt;
+  } else {
+  err:
+    lj_err_comp(L, o1, o2);
+    return NULL;
+  }
+}
+
+/* Helper for calls. __call metamethod. */
+void lj_meta_call(lua_State *L, TValue *func, TValue *top)
+{
+  cTValue *mo = lj_meta_lookup(L, func, MM_call);
+  TValue *p;
+  if (!tvisfunc(mo))
+    lj_err_optype_call(L, func);
+  for (p = top; p > func; p--) copyTV(L, p, p-1);
+  copyTV(L, func, mo);
+}
+
+/* Helper for FORI. Coercion. */
+void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o)
+{
+  if (!lj_strscan_numberobj(o)) lj_err_msg(L, LJ_ERR_FORINIT);
+  if (!lj_strscan_numberobj(o+1)) lj_err_msg(L, LJ_ERR_FORLIM);
+  if (!lj_strscan_numberobj(o+2)) lj_err_msg(L, LJ_ERR_FORSTEP);
+  if (LJ_DUALNUM) {
+    /* Ensure all slots are integers or all slots are numbers. */
+    int32_t k[3];
+    int nint = 0;
+    ptrdiff_t i;
+    for (i = 0; i <= 2; i++) {
+      if (tvisint(o+i)) {
+	k[i] = intV(o+i); nint++;
+      } else {
+	k[i] = lj_num2int(numV(o+i)); nint += ((lua_Number)k[i] == numV(o+i));
+      }
+    }
+    if (nint == 3) {  /* Narrow to integers. */
+      setintV(o, k[0]);
+      setintV(o+1, k[1]);
+      setintV(o+2, k[2]);
+    } else if (nint != 0) {  /* Widen to numbers. */
+      if (tvisint(o)) setnumV(o, (lua_Number)intV(o));
+      if (tvisint(o+1)) setnumV(o+1, (lua_Number)intV(o+1));
+      if (tvisint(o+2)) setnumV(o+2, (lua_Number)intV(o+2));
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_meta.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_meta.h b/lib/luajit/src/lj_meta.h
new file mode 100644
index 0000000..2c1ad0d
--- /dev/null
+++ b/lib/luajit/src/lj_meta.h
@@ -0,0 +1,37 @@
+/*
+** Metamethod handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_META_H
+#define _LJ_META_H
+
+#include "lj_obj.h"
+
+/* Metamethod handling */
+LJ_FUNC void lj_meta_init(lua_State *L);
+LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name);
+LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm);
+#if LJ_HASFFI
+LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv);
+#endif
+
+#define lj_meta_fastg(g, mt, mm) \
+  ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \
+   lj_meta_cache(mt, mm, mmname_str(g, mm)))
+#define lj_meta_fast(L, mt, mm)	lj_meta_fastg(G(L), mt, mm)
+
+/* C helpers for some instructions, called from assembler VM. */
+LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k);
+LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k);
+LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb,
+			       cTValue *rc, BCReg op);
+LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left);
+LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o);
+LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne);
+LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins);
+LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op);
+LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top);
+LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o);
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_obj.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_obj.c b/lib/luajit/src/lj_obj.c
new file mode 100644
index 0000000..7fab714
--- /dev/null
+++ b/lib/luajit/src/lj_obj.c
@@ -0,0 +1,35 @@
+/*
+** Miscellaneous object handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_obj_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+/* Object type names. */
+LJ_DATADEF const char *const lj_obj_typename[] = {  /* ORDER LUA_T */
+  "no value", "nil", "boolean", "userdata", "number", "string",
+  "table", "function", "userdata", "thread", "proto", "cdata"
+};
+
+LJ_DATADEF const char *const lj_obj_itypename[] = {  /* ORDER LJ_T */
+  "nil", "boolean", "boolean", "userdata", "string", "upval", "thread",
+  "proto", "function", "trace", "cdata", "table", "userdata", "number"
+};
+
+/* Compare two objects without calling metamethods. */
+int lj_obj_equal(cTValue *o1, cTValue *o2)
+{
+  if (itype(o1) == itype(o2)) {
+    if (tvispri(o1))
+      return 1;
+    if (!tvisnum(o1))
+      return gcrefeq(o1->gcr, o2->gcr);
+  } else if (!tvisnumber(o1) || !tvisnumber(o2)) {
+    return 0;
+  }
+  return numberVnum(o1) == numberVnum(o2);
+}
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_obj.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_obj.h b/lib/luajit/src/lj_obj.h
new file mode 100644
index 0000000..6e8381c
--- /dev/null
+++ b/lib/luajit/src/lj_obj.h
@@ -0,0 +1,856 @@
+/*
+** LuaJIT VM tags, values and objects.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#ifndef _LJ_OBJ_H
+#define _LJ_OBJ_H
+
+#include "lua.h"
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* -- Memory references (32 bit address space) ---------------------------- */
+
+/* Memory size. */
+typedef uint32_t MSize;
+
+/* Memory reference */
+typedef struct MRef {
+  uint32_t ptr32;	/* Pseudo 32 bit pointer. */
+} MRef;
+
+#define mref(r, t)	((t *)(void *)(uintptr_t)(r).ptr32)
+
+#define setmref(r, p)	((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p))
+#define setmrefr(r, v)	((r).ptr32 = (v).ptr32)
+
+/* -- GC object references (32 bit address space) ------------------------- */
+
+/* GCobj reference */
+typedef struct GCRef {
+  uint32_t gcptr32;	/* Pseudo 32 bit pointer. */
+} GCRef;
+
+/* Common GC header for all collectable objects. */
+#define GCHeader	GCRef nextgc; uint8_t marked; uint8_t gct
+/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */
+
+#define gcref(r)	((GCobj *)(uintptr_t)(r).gcptr32)
+#define gcrefp(r, t)	((t *)(void *)(uintptr_t)(r).gcptr32)
+#define gcrefu(r)	((r).gcptr32)
+#define gcrefi(r)	((int32_t)(r).gcptr32)
+#define gcrefeq(r1, r2)	((r1).gcptr32 == (r2).gcptr32)
+#define gcnext(gc)	(gcref((gc)->gch.nextgc))
+
+#define setgcref(r, gc)	((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch)
+#define setgcrefi(r, i)	((r).gcptr32 = (uint32_t)(i))
+#define setgcrefp(r, p)	((r).gcptr32 = (uint32_t)(uintptr_t)(p))
+#define setgcrefnull(r)	((r).gcptr32 = 0)
+#define setgcrefr(r, v)	((r).gcptr32 = (v).gcptr32)
+
+/* IMPORTANT NOTE:
+**
+** All uses of the setgcref* macros MUST be accompanied with a write barrier.
+**
+** This is to ensure the integrity of the incremental GC. The invariant
+** to preserve is that a black object never points to a white object.
+** I.e. never store a white object into a field of a black object.
+**
+** It's ok to LEAVE OUT the write barrier ONLY in the following cases:
+** - The source is not a GC object (NULL).
+** - The target is a GC root. I.e. everything in global_State.
+** - The target is a lua_State field (threads are never black).
+** - The target is a stack slot, see setgcV et al.
+** - The target is an open upvalue, i.e. pointing to a stack slot.
+** - The target is a newly created object (i.e. marked white). But make
+**   sure nothing invokes the GC inbetween.
+** - The target and the source are the same object (self-reference).
+** - The target already contains the object (e.g. moving elements around).
+**
+** The most common case is a store to a stack slot. All other cases where
+** a barrier has been omitted are annotated with a NOBARRIER comment.
+**
+** The same logic applies for stores to table slots (array part or hash
+** part). ALL uses of lj_tab_set* require a barrier for the stored value
+** *and* the stored key, based on the above rules. In practice this means
+** a barrier is needed if *either* of the key or value are a GC object.
+**
+** It's ok to LEAVE OUT the write barrier in the following special cases:
+** - The stored value is nil. The key doesn't matter because it's either
+**   not resurrected or lj_tab_newkey() will take care of the key barrier.
+** - The key doesn't matter if the *previously* stored value is guaranteed
+**   to be non-nil (because the key is kept alive in the table).
+** - The key doesn't matter if it's guaranteed not to be part of the table,
+**   since lj_tab_newkey() takes care of the key barrier. This applies
+**   trivially to new tables, but watch out for resurrected keys. Storing
+**   a nil value leaves the key in the table!
+**
+** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used
+** by the interpreter for all table stores.
+**
+** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark
+** dead keys in tables. The reference is left in, but it's guaranteed to
+** be never dereferenced as long as the value is nil. It's ok if the key is
+** freed or if any object subsequently gets the same address.
+**
+** Not destroying dead keys helps to keep key hash slots stable. This avoids
+** specialization back-off for HREFK when a value flips between nil and
+** non-nil and the GC gets in the way. It also allows safely hoisting
+** HREF/HREFK across GC steps. Dead keys are only removed if a table is
+** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize.
+**
+** The trade-off is that a write barrier for tables must take the key into
+** account, too. Implicitly resurrecting the key by storing a non-nil value
+** may invalidate the incremental GC invariant.
+*/
+
+/* -- Common type definitions --------------------------------------------- */
+
+/* Types for handling bytecodes. Need this here, details in lj_bc.h. */
+typedef uint32_t BCIns;  /* Bytecode instruction. */
+typedef uint32_t BCPos;  /* Bytecode position. */
+typedef uint32_t BCReg;  /* Bytecode register. */
+typedef int32_t BCLine;  /* Bytecode line number. */
+
+/* Internal assembler functions. Never call these directly from C. */
+typedef void (*ASMFunction)(void);
+
+/* Resizable string buffer. Need this here, details in lj_str.h. */
+typedef struct SBuf {
+  char *buf;		/* String buffer base. */
+  MSize n;		/* String buffer length. */
+  MSize sz;		/* String buffer size. */
+} SBuf;
+
+/* -- Tags and values ----------------------------------------------------- */
+
+/* Frame link. */
+typedef union {
+  int32_t ftsz;		/* Frame type and size of previous frame. */
+  MRef pcr;		/* Overlaps PC for Lua frames. */
+} FrameLink;
+
+/* Tagged value. */
+typedef LJ_ALIGN(8) union TValue {
+  uint64_t u64;		/* 64 bit pattern overlaps number. */
+  lua_Number n;		/* Number object overlaps split tag/value object. */
+  struct {
+    LJ_ENDIAN_LOHI(
+      union {
+	GCRef gcr;	/* GCobj reference (if any). */
+	int32_t i;	/* Integer value. */
+      };
+    , uint32_t it;	/* Internal object tag. Must overlap MSW of number. */
+    )
+  };
+  struct {
+    LJ_ENDIAN_LOHI(
+      GCRef func;	/* Function for next frame (or dummy L). */
+    , FrameLink tp;	/* Link to previous frame. */
+    )
+  } fr;
+  struct {
+    LJ_ENDIAN_LOHI(
+      uint32_t lo;	/* Lower 32 bits of number. */
+    , uint32_t hi;	/* Upper 32 bits of number. */
+    )
+  } u32;
+} TValue;
+
+typedef const TValue cTValue;
+
+#define tvref(r)	(mref(r, TValue))
+
+/* More external and GCobj tags for internal objects. */
+#define LAST_TT		LUA_TTHREAD
+#define LUA_TPROTO	(LAST_TT+1)
+#define LUA_TCDATA	(LAST_TT+2)
+
+/* Internal object tags.
+**
+** Internal tags overlap the MSW of a number object (must be a double).
+** Interpreted as a double these are special NaNs. The FPU only generates
+** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available
+** for use as internal tags. Small negative numbers are used to shorten the
+** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate).
+**
+**                  ---MSW---.---LSW---
+** primitive types |  itype  |         |
+** lightuserdata   |  itype  |  void * |  (32 bit platforms)
+** lightuserdata   |ffff|    void *    |  (64 bit platforms, 47 bit pointers)
+** GC objects      |  itype  |  GCRef  |
+** int (LJ_DUALNUM)|  itype  |   int   |
+** number           -------double------
+**
+** ORDER LJ_T
+** Primitive types nil/false/true must be first, lightuserdata next.
+** GC objects are at the end, table/userdata must be lowest.
+** Also check lj_ir.h for similar ordering constraints.
+*/
+#define LJ_TNIL			(~0u)
+#define LJ_TFALSE		(~1u)
+#define LJ_TTRUE		(~2u)
+#define LJ_TLIGHTUD		(~3u)
+#define LJ_TSTR			(~4u)
+#define LJ_TUPVAL		(~5u)
+#define LJ_TTHREAD		(~6u)
+#define LJ_TPROTO		(~7u)
+#define LJ_TFUNC		(~8u)
+#define LJ_TTRACE		(~9u)
+#define LJ_TCDATA		(~10u)
+#define LJ_TTAB			(~11u)
+#define LJ_TUDATA		(~12u)
+/* This is just the canonical number type used in some places. */
+#define LJ_TNUMX		(~13u)
+
+/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */
+#if LJ_64
+#define LJ_TISNUM		0xfffeffffu
+#else
+#define LJ_TISNUM		LJ_TNUMX
+#endif
+#define LJ_TISTRUECOND		LJ_TFALSE
+#define LJ_TISPRI		LJ_TTRUE
+#define LJ_TISGCV		(LJ_TSTR+1)
+#define LJ_TISTABUD		LJ_TTAB
+
+/* -- String object ------------------------------------------------------- */
+
+/* String object header. String payload follows. */
+typedef struct GCstr {
+  GCHeader;
+  uint8_t reserved;	/* Used by lexer for fast lookup of reserved words. */
+  uint8_t unused;
+  MSize hash;		/* Hash of string. */
+  MSize len;		/* Size of string. */
+} GCstr;
+
+#define strref(r)	(&gcref((r))->str)
+#define strdata(s)	((const char *)((s)+1))
+#define strdatawr(s)	((char *)((s)+1))
+#define strVdata(o)	strdata(strV(o))
+#define sizestring(s)	(sizeof(struct GCstr)+(s)->len+1)
+
+/* -- Userdata object ----------------------------------------------------- */
+
+/* Userdata object. Payload follows. */
+typedef struct GCudata {
+  GCHeader;
+  uint8_t udtype;	/* Userdata type. */
+  uint8_t unused2;
+  GCRef env;		/* Should be at same offset in GCfunc. */
+  MSize len;		/* Size of payload. */
+  GCRef metatable;	/* Must be at same offset in GCtab. */
+  uint32_t align1;	/* To force 8 byte alignment of the payload. */
+} GCudata;
+
+/* Userdata types. */
+enum {
+  UDTYPE_USERDATA,	/* Regular userdata. */
+  UDTYPE_IO_FILE,	/* I/O library FILE. */
+  UDTYPE_FFI_CLIB,	/* FFI C library namespace. */
+  UDTYPE__MAX
+};
+
+#define uddata(u)	((void *)((u)+1))
+#define sizeudata(u)	(sizeof(struct GCudata)+(u)->len)
+
+/* -- C data object ------------------------------------------------------- */
+
+/* C data object. Payload follows. */
+typedef struct GCcdata {
+  GCHeader;
+  uint16_t ctypeid;	/* C type ID. */
+} GCcdata;
+
+/* Prepended to variable-sized or realigned C data objects. */
+typedef struct GCcdataVar {
+  uint16_t offset;	/* Offset to allocated memory (relative to GCcdata). */
+  uint16_t extra;	/* Extra space allocated (incl. GCcdata + GCcdatav). */
+  MSize len;		/* Size of payload. */
+} GCcdataVar;
+
+#define cdataptr(cd)	((void *)((cd)+1))
+#define cdataisv(cd)	((cd)->marked & 0x80)
+#define cdatav(cd)	((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar)))
+#define cdatavlen(cd)	check_exp(cdataisv(cd), cdatav(cd)->len)
+#define sizecdatav(cd)	(cdatavlen(cd) + cdatav(cd)->extra)
+#define memcdatav(cd)	((void *)((char *)(cd) - cdatav(cd)->offset))
+
+/* -- Prototype object ---------------------------------------------------- */
+
+#define SCALE_NUM_GCO	((int32_t)sizeof(lua_Number)/sizeof(GCRef))
+#define round_nkgc(n)	(((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1))
+
+typedef struct GCproto {
+  GCHeader;
+  uint8_t numparams;	/* Number of parameters. */
+  uint8_t framesize;	/* Fixed frame size. */
+  MSize sizebc;		/* Number of bytecode instructions. */
+  GCRef gclist;
+  MRef k;		/* Split constant array (points to the middle). */
+  MRef uv;		/* Upvalue list. local slot|0x8000 or parent uv idx. */
+  MSize sizekgc;	/* Number of collectable constants. */
+  MSize sizekn;		/* Number of lua_Number constants. */
+  MSize sizept;		/* Total size including colocated arrays. */
+  uint8_t sizeuv;	/* Number of upvalues. */
+  uint8_t flags;	/* Miscellaneous flags (see below). */
+  uint16_t trace;	/* Anchor for chain of root traces. */
+  /* ------ The following fields are for debugging/tracebacks only ------ */
+  GCRef chunkname;	/* Name of the chunk this function was defined in. */
+  BCLine firstline;	/* First line of the function definition. */
+  BCLine numline;	/* Number of lines for the function definition. */
+  MRef lineinfo;	/* Compressed map from bytecode ins. to source line. */
+  MRef uvinfo;		/* Upvalue names. */
+  MRef varinfo;		/* Names and compressed extents of local variables. */
+} GCproto;
+
+/* Flags for prototype. */
+#define PROTO_CHILD		0x01	/* Has child prototypes. */
+#define PROTO_VARARG		0x02	/* Vararg function. */
+#define PROTO_FFI		0x04	/* Uses BC_KCDATA for FFI datatypes. */
+#define PROTO_NOJIT		0x08	/* JIT disabled for this function. */
+#define PROTO_ILOOP		0x10	/* Patched bytecode with ILOOP etc. */
+/* Only used during parsing. */
+#define PROTO_HAS_RETURN	0x20	/* Already emitted a return. */
+#define PROTO_FIXUP_RETURN	0x40	/* Need to fixup emitted returns. */
+/* Top bits used for counting created closures. */
+#define PROTO_CLCOUNT		0x20	/* Base of saturating 3 bit counter. */
+#define PROTO_CLC_BITS		3
+#define PROTO_CLC_POLY		(3*PROTO_CLCOUNT)  /* Polymorphic threshold. */
+
+#define PROTO_UV_LOCAL		0x8000	/* Upvalue for local slot. */
+#define PROTO_UV_IMMUTABLE	0x4000	/* Immutable upvalue. */
+
+#define proto_kgc(pt, idx) \
+  check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \
+	    gcref(mref((pt)->k, GCRef)[(idx)]))
+#define proto_knumtv(pt, idx) \
+  check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)])
+#define proto_bc(pt)		((BCIns *)((char *)(pt) + sizeof(GCproto)))
+#define proto_bcpos(pt, pc)	((BCPos)((pc) - proto_bc(pt)))
+#define proto_uv(pt)		(mref((pt)->uv, uint16_t))
+
+#define proto_chunkname(pt)	(strref((pt)->chunkname))
+#define proto_chunknamestr(pt)	(strdata(proto_chunkname((pt))))
+#define proto_lineinfo(pt)	(mref((pt)->lineinfo, const void))
+#define proto_uvinfo(pt)	(mref((pt)->uvinfo, const uint8_t))
+#define proto_varinfo(pt)	(mref((pt)->varinfo, const uint8_t))
+
+/* -- Upvalue object ------------------------------------------------------ */
+
+typedef struct GCupval {
+  GCHeader;
+  uint8_t closed;	/* Set if closed (i.e. uv->v == &uv->u.value). */
+  uint8_t immutable;	/* Immutable value. */
+  union {
+    TValue tv;		/* If closed: the value itself. */
+    struct {		/* If open: double linked list, anchored at thread. */
+      GCRef prev;
+      GCRef next;
+    };
+  };
+  MRef v;		/* Points to stack slot (open) or above (closed). */
+  uint32_t dhash;	/* Disambiguation hash: dh1 != dh2 => cannot alias. */
+} GCupval;
+
+#define uvprev(uv_)	(&gcref((uv_)->prev)->uv)
+#define uvnext(uv_)	(&gcref((uv_)->next)->uv)
+#define uvval(uv_)	(mref((uv_)->v, TValue))
+
+/* -- Function object (closures) ------------------------------------------ */
+
+/* Common header for functions. env should be at same offset in GCudata. */
+#define GCfuncHeader \
+  GCHeader; uint8_t ffid; uint8_t nupvalues; \
+  GCRef env; GCRef gclist; MRef pc
+
+typedef struct GCfuncC {
+  GCfuncHeader;
+  lua_CFunction f;	/* C function to be called. */
+  TValue upvalue[1];	/* Array of upvalues (TValue). */
+} GCfuncC;
+
+typedef struct GCfuncL {
+  GCfuncHeader;
+  GCRef uvptr[1];	/* Array of _pointers_ to upvalue objects (GCupval). */
+} GCfuncL;
+
+typedef union GCfunc {
+  GCfuncC c;
+  GCfuncL l;
+} GCfunc;
+
+#define FF_LUA		0
+#define FF_C		1
+#define isluafunc(fn)	((fn)->c.ffid == FF_LUA)
+#define iscfunc(fn)	((fn)->c.ffid == FF_C)
+#define isffunc(fn)	((fn)->c.ffid > FF_C)
+#define funcproto(fn) \
+  check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto)))
+#define sizeCfunc(n)	(sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n))
+#define sizeLfunc(n)	(sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n))
+
+/* -- Table object -------------------------------------------------------- */
+
+/* Hash node. */
+typedef struct Node {
+  TValue val;		/* Value object. Must be first field. */
+  TValue key;		/* Key object. */
+  MRef next;		/* Hash chain. */
+  MRef freetop;		/* Top of free elements (stored in t->node[0]). */
+} Node;
+
+LJ_STATIC_ASSERT(offsetof(Node, val) == 0);
+
+typedef struct GCtab {
+  GCHeader;
+  uint8_t nomm;		/* Negative cache for fast metamethods. */
+  int8_t colo;		/* Array colocation. */
+  MRef array;		/* Array part. */
+  GCRef gclist;
+  GCRef metatable;	/* Must be at same offset in GCudata. */
+  MRef node;		/* Hash part. */
+  uint32_t asize;	/* Size of array part (keys [0, asize-1]). */
+  uint32_t hmask;	/* Hash part mask (size of hash part - 1). */
+} GCtab;
+
+#define sizetabcolo(n)	((n)*sizeof(TValue) + sizeof(GCtab))
+#define tabref(r)	(&gcref((r))->tab)
+#define noderef(r)	(mref((r), Node))
+#define nextnode(n)	(mref((n)->next, Node))
+
+/* -- State objects ------------------------------------------------------- */
+
+/* VM states. */
+enum {
+  LJ_VMST_INTERP,	/* Interpreter. */
+  LJ_VMST_C,		/* C function. */
+  LJ_VMST_GC,		/* Garbage collector. */
+  LJ_VMST_EXIT,		/* Trace exit handler. */
+  LJ_VMST_RECORD,	/* Trace recorder. */
+  LJ_VMST_OPT,		/* Optimizer. */
+  LJ_VMST_ASM,		/* Assembler. */
+  LJ_VMST__MAX
+};
+
+#define setvmstate(g, st)	((g)->vmstate = ~LJ_VMST_##st)
+
+/* Metamethods. ORDER MM */
+#ifdef LJ_HASFFI
+#define MMDEF_FFI(_) _(new)
+#else
+#define MMDEF_FFI(_)
+#endif
+
+#if LJ_52 || LJ_HASFFI
+#define MMDEF_PAIRS(_) _(pairs) _(ipairs)
+#else
+#define MMDEF_PAIRS(_)
+#define MM_pairs	255
+#define MM_ipairs	255
+#endif
+
+#define MMDEF(_) \
+  _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \
+  /* Only the above (fast) metamethods are negative cached (max. 8). */ \
+  _(lt) _(le) _(concat) _(call) \
+  /* The following must be in ORDER ARITH. */ \
+  _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \
+  /* The following are used in the standard libraries. */ \
+  _(metatable) _(tostring) MMDEF_FFI(_) MMDEF_PAIRS(_)
+
+typedef enum {
+#define MMENUM(name)	MM_##name,
+MMDEF(MMENUM)
+#undef MMENUM
+  MM__MAX,
+  MM____ = MM__MAX,
+  MM_FAST = MM_len
+} MMS;
+
+/* GC root IDs. */
+typedef enum {
+  GCROOT_MMNAME,	/* Metamethod names. */
+  GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1,
+  GCROOT_BASEMT,	/* Metatables for base types. */
+  GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX,
+  GCROOT_IO_INPUT,	/* Userdata for default I/O input file. */
+  GCROOT_IO_OUTPUT,	/* Userdata for default I/O output file. */
+  GCROOT_MAX
+} GCRootID;
+
+#define basemt_it(g, it)	((g)->gcroot[GCROOT_BASEMT+~(it)])
+#define basemt_obj(g, o)	((g)->gcroot[GCROOT_BASEMT+itypemap(o)])
+#define mmname_str(g, mm)	(strref((g)->gcroot[GCROOT_MMNAME+(mm)]))
+
+typedef struct GCState {
+  MSize total;		/* Memory currently allocated. */
+  MSize threshold;	/* Memory threshold. */
+  uint8_t currentwhite;	/* Current white color. */
+  uint8_t state;	/* GC state. */
+  uint8_t nocdatafin;	/* No cdata finalizer called. */
+  uint8_t unused2;
+  MSize sweepstr;	/* Sweep position in string table. */
+  GCRef root;		/* List of all collectable objects. */
+  MRef sweep;		/* Sweep position in root list. */
+  GCRef gray;		/* List of gray objects. */
+  GCRef grayagain;	/* List of objects for atomic traversal. */
+  GCRef weak;		/* List of weak tables (to be cleared). */
+  GCRef mmudata;	/* List of userdata (to be finalized). */
+  MSize stepmul;	/* Incremental GC step granularity. */
+  MSize debt;		/* Debt (how much GC is behind schedule). */
+  MSize estimate;	/* Estimate of memory actually in use. */
+  MSize pause;		/* Pause between successive GC cycles. */
+} GCState;
+
+/* Global state, shared by all threads of a Lua universe. */
+typedef struct global_State {
+  GCRef *strhash;	/* String hash table (hash chain anchors). */
+  MSize strmask;	/* String hash mask (size of hash table - 1). */
+  MSize strnum;		/* Number of strings in hash table. */
+  lua_Alloc allocf;	/* Memory allocator. */
+  void *allocd;		/* Memory allocator data. */
+  GCState gc;		/* Garbage collector. */
+  SBuf tmpbuf;		/* Temporary buffer for string concatenation. */
+  Node nilnode;		/* Fallback 1-element hash part (nil key and value). */
+  GCstr strempty;	/* Empty string. */
+  uint8_t stremptyz;	/* Zero terminator of empty string. */
+  uint8_t hookmask;	/* Hook mask. */
+  uint8_t dispatchmode;	/* Dispatch mode. */
+  uint8_t vmevmask;	/* VM event mask. */
+  GCRef mainthref;	/* Link to main thread. */
+  TValue registrytv;	/* Anchor for registry. */
+  TValue tmptv, tmptv2;	/* Temporary TValues. */
+  GCupval uvhead;	/* Head of double-linked list of all open upvalues. */
+  int32_t hookcount;	/* Instruction hook countdown. */
+  int32_t hookcstart;	/* Start count for instruction hook counter. */
+  lua_Hook hookf;	/* Hook function. */
+  lua_CFunction wrapf;	/* Wrapper for C function calls. */
+  lua_CFunction panic;	/* Called as a last resort for errors. */
+  volatile int32_t vmstate;  /* VM state or current JIT code trace number. */
+  BCIns bc_cfunc_int;	/* Bytecode for internal C function calls. */
+  BCIns bc_cfunc_ext;	/* Bytecode for external C function calls. */
+  GCRef jit_L;		/* Current JIT code lua_State or NULL. */
+  MRef jit_base;	/* Current JIT code L->base. */
+  MRef ctype_state;	/* Pointer to C type state. */
+  GCRef gcroot[GCROOT_MAX];  /* GC roots. */
+} global_State;
+
+#define mainthread(g)	(&gcref(g->mainthref)->th)
+#define niltv(L) \
+  check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val)
+#define niltvg(g) \
+  check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val)
+
+/* Hook management. Hook event masks are defined in lua.h. */
+#define HOOK_EVENTMASK		0x0f
+#define HOOK_ACTIVE		0x10
+#define HOOK_ACTIVE_SHIFT	4
+#define HOOK_VMEVENT		0x20
+#define HOOK_GC			0x40
+#define hook_active(g)		((g)->hookmask & HOOK_ACTIVE)
+#define hook_enter(g)		((g)->hookmask |= HOOK_ACTIVE)
+#define hook_entergc(g)		((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC))
+#define hook_vmevent(g)		((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT))
+#define hook_leave(g)		((g)->hookmask &= ~HOOK_ACTIVE)
+#define hook_save(g)		((g)->hookmask & ~HOOK_EVENTMASK)
+#define hook_restore(g, h) \
+  ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h))
+
+/* Per-thread state object. */
+struct lua_State {
+  GCHeader;
+  uint8_t dummy_ffid;	/* Fake FF_C for curr_funcisL() on dummy frames. */
+  uint8_t status;	/* Thread status. */
+  MRef glref;		/* Link to global state. */
+  GCRef gclist;		/* GC chain. */
+  TValue *base;		/* Base of currently executing function. */
+  TValue *top;		/* First free slot in the stack. */
+  MRef maxstack;	/* Last free slot in the stack. */
+  MRef stack;		/* Stack base. */
+  GCRef openupval;	/* List of open upvalues in the stack. */
+  GCRef env;		/* Thread environment (table of globals). */
+  void *cframe;		/* End of C stack frame chain. */
+  MSize stacksize;	/* True stack size (incl. LJ_STACK_EXTRA). */
+};
+
+#define G(L)			(mref(L->glref, global_State))
+#define registry(L)		(&G(L)->registrytv)
+
+/* Macros to access the currently executing (Lua) function. */
+#define curr_func(L)		(&gcref((L->base-1)->fr.func)->fn)
+#define curr_funcisL(L)		(isluafunc(curr_func(L)))
+#define curr_proto(L)		(funcproto(curr_func(L)))
+#define curr_topL(L)		(L->base + curr_proto(L)->framesize)
+#define curr_top(L)		(curr_funcisL(L) ? curr_topL(L) : L->top)
+
+/* -- GC object definition and conversions -------------------------------- */
+
+/* GC header for generic access to common fields of GC objects. */
+typedef struct GChead {
+  GCHeader;
+  uint8_t unused1;
+  uint8_t unused2;
+  GCRef env;
+  GCRef gclist;
+  GCRef metatable;
+} GChead;
+
+/* The env field SHOULD be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env));
+LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env));
+
+/* The metatable field MUST be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable));
+LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable));
+
+/* The gclist field MUST be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist));
+
+typedef union GCobj {
+  GChead gch;
+  GCstr str;
+  GCupval uv;
+  lua_State th;
+  GCproto pt;
+  GCfunc fn;
+  GCcdata cd;
+  GCtab tab;
+  GCudata ud;
+} GCobj;
+
+/* Macros to convert a GCobj pointer into a specific value. */
+#define gco2str(o)	check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str)
+#define gco2uv(o)	check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv)
+#define gco2th(o)	check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th)
+#define gco2pt(o)	check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt)
+#define gco2func(o)	check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn)
+#define gco2cd(o)	check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd)
+#define gco2tab(o)	check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab)
+#define gco2ud(o)	check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud)
+
+/* Macro to convert any collectable object into a GCobj pointer. */
+#define obj2gco(v)	((GCobj *)(v))
+
+/* -- TValue getters/setters ---------------------------------------------- */
+
+#ifdef LUA_USE_ASSERT
+#include "lj_gc.h"
+#endif
+
+/* Macros to test types. */
+#define itype(o)	((o)->it)
+#define tvisnil(o)	(itype(o) == LJ_TNIL)
+#define tvisfalse(o)	(itype(o) == LJ_TFALSE)
+#define tvistrue(o)	(itype(o) == LJ_TTRUE)
+#define tvisbool(o)	(tvisfalse(o) || tvistrue(o))
+#if LJ_64
+#define tvislightud(o)	(((int32_t)itype(o) >> 15) == -2)
+#else
+#define tvislightud(o)	(itype(o) == LJ_TLIGHTUD)
+#endif
+#define tvisstr(o)	(itype(o) == LJ_TSTR)
+#define tvisfunc(o)	(itype(o) == LJ_TFUNC)
+#define tvisthread(o)	(itype(o) == LJ_TTHREAD)
+#define tvisproto(o)	(itype(o) == LJ_TPROTO)
+#define tviscdata(o)	(itype(o) == LJ_TCDATA)
+#define tvistab(o)	(itype(o) == LJ_TTAB)
+#define tvisudata(o)	(itype(o) == LJ_TUDATA)
+#define tvisnumber(o)	(itype(o) <= LJ_TISNUM)
+#define tvisint(o)	(LJ_DUALNUM && itype(o) == LJ_TISNUM)
+#define tvisnum(o)	(itype(o) < LJ_TISNUM)
+
+#define tvistruecond(o)	(itype(o) < LJ_TISTRUECOND)
+#define tvispri(o)	(itype(o) >= LJ_TISPRI)
+#define tvistabud(o)	(itype(o) <= LJ_TISTABUD)  /* && !tvisnum() */
+#define tvisgcv(o)	((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV))
+
+/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */
+#define tvisnan(o)	((o)->n != (o)->n)
+#if LJ_64
+#define tviszero(o)	(((o)->u64 << 1) == 0)
+#else
+#define tviszero(o)	(((o)->u32.lo | ((o)->u32.hi << 1)) == 0)
+#endif
+#define tvispzero(o)	((o)->u64 == 0)
+#define tvismzero(o)	((o)->u64 == U64x(80000000,00000000))
+#define tvispone(o)	((o)->u64 == U64x(3ff00000,00000000))
+#define rawnumequal(o1, o2)	((o1)->u64 == (o2)->u64)
+
+/* Macros to convert type ids. */
+#if LJ_64
+#define itypemap(o) \
+  (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o))
+#else
+#define itypemap(o)	(tvisnumber(o) ? ~LJ_TNUMX : ~itype(o))
+#endif
+
+/* Macros to get tagged values. */
+#define gcval(o)	(gcref((o)->gcr))
+#define boolV(o)	check_exp(tvisbool(o), (LJ_TFALSE - (o)->it))
+#if LJ_64
+#define lightudV(o) \
+  check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff)))
+#else
+#define lightudV(o)	check_exp(tvislightud(o), gcrefp((o)->gcr, void))
+#endif
+#define gcV(o)		check_exp(tvisgcv(o), gcval(o))
+#define strV(o)		check_exp(tvisstr(o), &gcval(o)->str)
+#define funcV(o)	check_exp(tvisfunc(o), &gcval(o)->fn)
+#define threadV(o)	check_exp(tvisthread(o), &gcval(o)->th)
+#define protoV(o)	check_exp(tvisproto(o), &gcval(o)->pt)
+#define cdataV(o)	check_exp(tviscdata(o), &gcval(o)->cd)
+#define tabV(o)		check_exp(tvistab(o), &gcval(o)->tab)
+#define udataV(o)	check_exp(tvisudata(o), &gcval(o)->ud)
+#define numV(o)		check_exp(tvisnum(o), (o)->n)
+#define intV(o)		check_exp(tvisint(o), (int32_t)(o)->i)
+
+/* Macros to set tagged values. */
+#define setitype(o, i)		((o)->it = (i))
+#define setnilV(o)		((o)->it = LJ_TNIL)
+#define setboolV(o, x)		((o)->it = LJ_TFALSE-(uint32_t)(x))
+
+static LJ_AINLINE void setlightudV(TValue *o, void *p)
+{
+#if LJ_64
+  o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48);
+#else
+  setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD);
+#endif
+}
+
+#if LJ_64
+#define checklightudptr(L, p) \
+  (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p))
+#define setcont(o, f) \
+  ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin)
+#else
+#define checklightudptr(L, p)	(p)
+#define setcont(o, f)		setlightudV((o), (void *)(f))
+#endif
+
+#define tvchecklive(L, o) \
+  UNUSED(L), lua_assert(!tvisgcv(o) || \
+  ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o))))
+
+static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t itype)
+{
+  setgcref(o->gcr, v); setitype(o, itype); tvchecklive(L, o);
+}
+
+#define define_setV(name, type, tag) \
+static LJ_AINLINE void name(lua_State *L, TValue *o, type *v) \
+{ \
+  setgcV(L, o, obj2gco(v), tag); \
+}
+define_setV(setstrV, GCstr, LJ_TSTR)
+define_setV(setthreadV, lua_State, LJ_TTHREAD)
+define_setV(setprotoV, GCproto, LJ_TPROTO)
+define_setV(setfuncV, GCfunc, LJ_TFUNC)
+define_setV(setcdataV, GCcdata, LJ_TCDATA)
+define_setV(settabV, GCtab, LJ_TTAB)
+define_setV(setudataV, GCudata, LJ_TUDATA)
+
+#define setnumV(o, x)		((o)->n = (x))
+#define setnanV(o)		((o)->u64 = U64x(fff80000,00000000))
+#define setpinfV(o)		((o)->u64 = U64x(7ff00000,00000000))
+#define setminfV(o)		((o)->u64 = U64x(fff00000,00000000))
+
+static LJ_AINLINE void setintV(TValue *o, int32_t i)
+{
+#if LJ_DUALNUM
+  o->i = (uint32_t)i; setitype(o, LJ_TISNUM);
+#else
+  o->n = (lua_Number)i;
+#endif
+}
+
+static LJ_AINLINE void setint64V(TValue *o, int64_t i)
+{
+  if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i))
+    setintV(o, (int32_t)i);
+  else
+    setnumV(o, (lua_Number)i);
+}
+
+#if LJ_64
+#define setintptrV(o, i)	setint64V((o), (i))
+#else
+#define setintptrV(o, i)	setintV((o), (i))
+#endif
+
+/* Copy tagged values. */
+static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2)
+{
+  *o1 = *o2; tvchecklive(L, o1);
+}
+
+/* -- Number to integer conversion ---------------------------------------- */
+
+#if LJ_SOFTFP
+LJ_ASMF int32_t lj_vm_tobit(double x);
+#endif
+
+static LJ_AINLINE int32_t lj_num2bit(lua_Number n)
+{
+#if LJ_SOFTFP
+  return lj_vm_tobit(n);
+#else
+  TValue o;
+  o.n = n + 6755399441055744.0;  /* 2^52 + 2^51 */
+  return (int32_t)o.u32.lo;
+#endif
+}
+
+#if LJ_TARGET_X86 && !defined(__SSE2__)
+#define lj_num2int(n)   lj_num2bit((n))
+#else
+#define lj_num2int(n)   ((int32_t)(n))
+#endif
+
+static LJ_AINLINE uint64_t lj_num2u64(lua_Number n)
+{
+#ifdef _MSC_VER
+  if (n >= 9223372036854775808.0)  /* They think it's a feature. */
+    return (uint64_t)(int64_t)(n - 18446744073709551616.0);
+  else
+#endif
+    return (uint64_t)n;
+}
+
+static LJ_AINLINE int32_t numberVint(cTValue *o)
+{
+  if (LJ_LIKELY(tvisint(o)))
+    return intV(o);
+  else
+    return lj_num2int(numV(o));
+}
+
+static LJ_AINLINE lua_Number numberVnum(cTValue *o)
+{
+  if (LJ_UNLIKELY(tvisint(o)))
+    return (lua_Number)intV(o);
+  else
+    return numV(o);
+}
+
+/* -- Miscellaneous object handling --------------------------------------- */
+
+/* Names and maps for internal and external object tags. */
+LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1];
+LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1];
+
+#define lj_typename(o)	(lj_obj_itypename[itypemap(o)])
+
+/* Compare two objects without calling metamethods. */
+LJ_FUNC int lj_obj_equal(cTValue *o1, cTValue *o2);
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_opt_dce.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_opt_dce.c b/lib/luajit/src/lj_opt_dce.c
new file mode 100644
index 0000000..7f1faaf
--- /dev/null
+++ b/lib/luajit/src/lj_opt_dce.c
@@ -0,0 +1,78 @@
+/*
+** DCE: Dead Code Elimination. Pre-LOOP only -- ASM already performs DCE.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_dce_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref)		(&J->cur.ir[(ref)])
+
+/* Scan through all snapshots and mark all referenced instructions. */
+static void dce_marksnap(jit_State *J)
+{
+  SnapNo i, nsnap = J->cur.nsnap;
+  for (i = 0; i < nsnap; i++) {
+    SnapShot *snap = &J->cur.snap[i];
+    SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+    MSize n, nent = snap->nent;
+    for (n = 0; n < nent; n++) {
+      IRRef ref = snap_ref(map[n]);
+      if (ref >= REF_FIRST)
+	irt_setmark(IR(ref)->t);
+    }
+  }
+}
+
+/* Backwards propagate marks. Replace unused instructions with NOPs. */
+static void dce_propagate(jit_State *J)
+{
+  IRRef1 *pchain[IR__MAX];
+  IRRef ins;
+  uint32_t i;
+  for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i];
+  for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) {
+    IRIns *ir = IR(ins);
+    if (irt_ismarked(ir->t)) {
+      irt_clearmark(ir->t);
+      pchain[ir->o] = &ir->prev;
+    } else if (!ir_sideeff(ir)) {
+      *pchain[ir->o] = ir->prev;  /* Reroute original instruction chain. */
+      ir->t.irt = IRT_NIL;
+      ir->o = IR_NOP;  /* Replace instruction with NOP. */
+      ir->op1 = ir->op2 = 0;
+      ir->prev = 0;
+      continue;
+    }
+    if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
+    if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
+  }
+}
+
+/* Dead Code Elimination.
+**
+** First backpropagate marks for all used instructions. Then replace
+** the unused ones with a NOP. Note that compressing the IR to eliminate
+** the NOPs does not pay off.
+*/
+void lj_opt_dce(jit_State *J)
+{
+  if ((J->flags & JIT_F_OPT_DCE)) {
+    dce_marksnap(J);
+    dce_propagate(J);
+    memset(J->bpropcache, 0, sizeof(J->bpropcache));  /* Invalidate cache. */
+  }
+}
+
+#undef IR
+
+#endif