You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2015/07/29 01:39:47 UTC

[09/62] [abbrv] trafficserver git commit: TS-3783 TS-3030 Add luajit v2.0.4 as a subtree

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_snap.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_snap.c b/lib/luajit/src/lj_snap.c
new file mode 100644
index 0000000..5c870ba
--- /dev/null
+++ b/lib/luajit/src/lj_snap.c
@@ -0,0 +1,866 @@
+/*
+** Snapshot handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_snap_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_target.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#endif
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref)		(&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b)	(lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Emit raw IR without passing through optimizations. */
+#define emitir_raw(ot, a, b)	(lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- Snapshot buffer allocation ------------------------------------------ */
+
+/* Grow snapshot buffer. */
+void lj_snap_grow_buf_(jit_State *J, MSize need)
+{
+  MSize maxsnap = (MSize)J->param[JIT_P_maxsnap];
+  if (need > maxsnap)
+    lj_trace_err(J, LJ_TRERR_SNAPOV);
+  lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot);
+  J->cur.snap = J->snapbuf;
+}
+
+/* Grow snapshot map buffer. */
+void lj_snap_grow_map_(jit_State *J, MSize need)
+{
+  if (need < 2*J->sizesnapmap)
+    need = 2*J->sizesnapmap;
+  else if (need < 64)
+    need = 64;
+  J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf,
+		    J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry));
+  J->cur.snapmap = J->snapmapbuf;
+  J->sizesnapmap = need;
+}
+
+/* -- Snapshot generation ------------------------------------------------- */
+
+/* Add all modified slots to the snapshot. */
+static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
+{
+  IRRef retf = J->chain[IR_RETF];  /* Limits SLOAD restore elimination. */
+  BCReg s;
+  MSize n = 0;
+  for (s = 0; s < nslots; s++) {
+    TRef tr = J->slot[s];
+    IRRef ref = tref_ref(tr);
+    if (ref) {
+      SnapEntry sn = SNAP_TR(s, tr);
+      IRIns *ir = IR(ref);
+      if (!(sn & (SNAP_CONT|SNAP_FRAME)) &&
+	  ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
+	/* No need to snapshot unmodified non-inherited slots. */
+	if (!(ir->op2 & IRSLOAD_INHERIT))
+	  continue;
+	/* No need to restore readonly slots and unmodified non-parent slots. */
+	if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) &&
+	    (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT)
+	  sn |= SNAP_NORESTORE;
+      }
+      if (LJ_SOFTFP && irt_isnum(ir->t))
+	sn |= SNAP_SOFTFPNUM;
+      map[n++] = sn;
+    }
+  }
+  return n;
+}
+
+/* Add frame links at the end of the snapshot. */
+static BCReg snapshot_framelinks(jit_State *J, SnapEntry *map)
+{
+  cTValue *frame = J->L->base - 1;
+  cTValue *lim = J->L->base - J->baseslot;
+  cTValue *ftop = frame + funcproto(frame_func(frame))->framesize;
+  MSize f = 0;
+  map[f++] = SNAP_MKPC(J->pc);  /* The current PC is always the first entry. */
+  while (frame > lim) {  /* Backwards traversal of all frames above base. */
+    if (frame_islua(frame)) {
+      map[f++] = SNAP_MKPC(frame_pc(frame));
+      frame = frame_prevl(frame);
+    } else if (frame_iscont(frame)) {
+      map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
+      map[f++] = SNAP_MKPC(frame_contpc(frame));
+      frame = frame_prevd(frame);
+    } else {
+      lua_assert(!frame_isc(frame));
+      map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
+      frame = frame_prevd(frame);
+      continue;
+    }
+    if (frame + funcproto(frame_func(frame))->framesize > ftop)
+      ftop = frame + funcproto(frame_func(frame))->framesize;
+  }
+  lua_assert(f == (MSize)(1 + J->framedepth));
+  return (BCReg)(ftop - lim);
+}
+
+/* Take a snapshot of the current stack. */
+static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap)
+{
+  BCReg nslots = J->baseslot + J->maxslot;
+  MSize nent;
+  SnapEntry *p;
+  /* Conservative estimate. */
+  lj_snap_grow_map(J, nsnapmap + nslots + (MSize)J->framedepth+1);
+  p = &J->cur.snapmap[nsnapmap];
+  nent = snapshot_slots(J, p, nslots);
+  snap->topslot = (uint8_t)snapshot_framelinks(J, p + nent);
+  snap->mapofs = (uint16_t)nsnapmap;
+  snap->ref = (IRRef1)J->cur.nins;
+  snap->nent = (uint8_t)nent;
+  snap->nslots = (uint8_t)nslots;
+  snap->count = 0;
+  J->cur.nsnapmap = (uint16_t)(nsnapmap + nent + 1 + J->framedepth);
+}
+
+/* Add or merge a snapshot. */
+void lj_snap_add(jit_State *J)
+{
+  MSize nsnap = J->cur.nsnap;
+  MSize nsnapmap = J->cur.nsnapmap;
+  /* Merge if no ins. inbetween or if requested and no guard inbetween. */
+  if (J->mergesnap ? !irt_isguard(J->guardemit) :
+      (nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins)) {
+    if (nsnap == 1) {  /* But preserve snap #0 PC. */
+      emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
+      goto nomerge;
+    }
+    nsnapmap = J->cur.snap[--nsnap].mapofs;
+  } else {
+  nomerge:
+    lj_snap_grow_buf(J, nsnap+1);
+    J->cur.nsnap = (uint16_t)(nsnap+1);
+  }
+  J->mergesnap = 0;
+  J->guardemit.irt = 0;
+  snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap);
+}
+
+/* -- Snapshot modification ----------------------------------------------- */
+
+#define SNAP_USEDEF_SLOTS	(LJ_MAX_JSLOTS+LJ_STACK_EXTRA)
+
+/* Find unused slots with reaching-definitions bytecode data-flow analysis. */
+static BCReg snap_usedef(jit_State *J, uint8_t *udf,
+			 const BCIns *pc, BCReg maxslot)
+{
+  BCReg s;
+  GCobj *o;
+
+  if (maxslot == 0) return 0;
+#ifdef LUAJIT_USE_VALGRIND
+  /* Avoid errors for harmless reads beyond maxslot. */
+  memset(udf, 1, SNAP_USEDEF_SLOTS);
+#else
+  memset(udf, 1, maxslot);
+#endif
+
+  /* Treat open upvalues as used. */
+  o = gcref(J->L->openupval);
+  while (o) {
+    if (uvval(gco2uv(o)) < J->L->base) break;
+    udf[uvval(gco2uv(o)) - J->L->base] = 0;
+    o = gcref(o->gch.nextgc);
+  }
+
+#define USE_SLOT(s)		udf[(s)] &= ~1
+#define DEF_SLOT(s)		udf[(s)] *= 3
+
+  /* Scan through following bytecode and check for uses/defs. */
+  lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc);
+  for (;;) {
+    BCIns ins = *pc++;
+    BCOp op = bc_op(ins);
+    switch (bcmode_b(op)) {
+    case BCMvar: USE_SLOT(bc_b(ins)); break;
+    default: break;
+    }
+    switch (bcmode_c(op)) {
+    case BCMvar: USE_SLOT(bc_c(ins)); break;
+    case BCMrbase:
+      lua_assert(op == BC_CAT);
+      for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s);
+      for (; s < maxslot; s++) DEF_SLOT(s);
+      break;
+    case BCMjump:
+    handle_jump: {
+      BCReg minslot = bc_a(ins);
+      if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT;
+      else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-2])-1;
+      else if (op == BC_UCLO) { pc += bc_j(ins); break; }
+      for (s = minslot; s < maxslot; s++) DEF_SLOT(s);
+      return minslot < maxslot ? minslot : maxslot;
+      }
+    case BCMlit:
+      if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
+	goto handle_jump;
+      } else if (bc_isret(op)) {
+	BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1);
+	for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
+	for (; s < top; s++) USE_SLOT(s);
+	for (; s < maxslot; s++) DEF_SLOT(s);
+	return 0;
+      }
+      break;
+    case BCMfunc: return maxslot;  /* NYI: will abort, anyway. */
+    default: break;
+    }
+    switch (bcmode_a(op)) {
+    case BCMvar: USE_SLOT(bc_a(ins)); break;
+    case BCMdst:
+       if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins));
+       break;
+    case BCMbase:
+      if (op >= BC_CALLM && op <= BC_VARG) {
+	BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ?
+		    maxslot : (bc_a(ins) + bc_c(ins));
+	s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0);
+	for (; s < top; s++) USE_SLOT(s);
+	for (; s < maxslot; s++) DEF_SLOT(s);
+	if (op == BC_CALLT || op == BC_CALLMT) {
+	  for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
+	  return 0;
+	}
+      } else if (op == BC_KNIL) {
+	for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s);
+      } else if (op == BC_TSETM) {
+	for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s);
+      }
+      break;
+    default: break;
+    }
+    lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc);
+  }
+
+#undef USE_SLOT
+#undef DEF_SLOT
+
+  return 0;  /* unreachable */
+}
+
+/* Purge dead slots before the next snapshot. */
+void lj_snap_purge(jit_State *J)
+{
+  uint8_t udf[SNAP_USEDEF_SLOTS];
+  BCReg maxslot = J->maxslot;
+  BCReg s = snap_usedef(J, udf, J->pc, maxslot);
+  for (; s < maxslot; s++)
+    if (udf[s] != 0)
+      J->base[s] = 0;  /* Purge dead slots. */
+}
+
+/* Shrink last snapshot. */
+void lj_snap_shrink(jit_State *J)
+{
+  SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+  SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+  MSize n, m, nlim, nent = snap->nent;
+  uint8_t udf[SNAP_USEDEF_SLOTS];
+  BCReg maxslot = J->maxslot;
+  BCReg minslot = snap_usedef(J, udf, snap_pc(map[nent]), maxslot);
+  BCReg baseslot = J->baseslot;
+  maxslot += baseslot;
+  minslot += baseslot;
+  snap->nslots = (uint8_t)maxslot;
+  for (n = m = 0; n < nent; n++) {  /* Remove unused slots from snapshot. */
+    BCReg s = snap_slot(map[n]);
+    if (s < minslot || (s < maxslot && udf[s-baseslot] == 0))
+      map[m++] = map[n];  /* Only copy used slots. */
+  }
+  snap->nent = (uint8_t)m;
+  nlim = J->cur.nsnapmap - snap->mapofs - 1;
+  while (n <= nlim) map[m++] = map[n++];  /* Move PC + frame links down. */
+  J->cur.nsnapmap = (uint16_t)(snap->mapofs + m);  /* Free up space in map. */
+}
+
+/* -- Snapshot access ----------------------------------------------------- */
+
+/* Initialize a Bloom Filter with all renamed refs.
+** There are very few renames (often none), so the filter has
+** very few bits set. This makes it suitable for negative filtering.
+*/
+static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim)
+{
+  BloomFilter rfilt = 0;
+  IRIns *ir;
+  for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
+    if (ir->op2 <= lim)
+      bloomset(rfilt, ir->op1);
+  return rfilt;
+}
+
+/* Process matching renames to find the original RegSP. */
+static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs)
+{
+  IRIns *ir;
+  for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
+    if (ir->op1 == ref && ir->op2 <= lim)
+      rs = ir->prev;
+  return rs;
+}
+
+/* Copy RegSP from parent snapshot to the parent links of the IR. */
+IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir)
+{
+  SnapShot *snap = &T->snap[snapno];
+  SnapEntry *map = &T->snapmap[snap->mapofs];
+  BloomFilter rfilt = snap_renamefilter(T, snapno);
+  MSize n = 0;
+  IRRef ref = 0;
+  for ( ; ; ir++) {
+    uint32_t rs;
+    if (ir->o == IR_SLOAD) {
+      if (!(ir->op2 & IRSLOAD_PARENT)) break;
+      for ( ; ; n++) {
+	lua_assert(n < snap->nent);
+	if (snap_slot(map[n]) == ir->op1) {
+	  ref = snap_ref(map[n++]);
+	  break;
+	}
+      }
+    } else if (LJ_SOFTFP && ir->o == IR_HIOP) {
+      ref++;
+    } else if (ir->o == IR_PVAL) {
+      ref = ir->op1 + REF_BIAS;
+    } else {
+      break;
+    }
+    rs = T->ir[ref].prev;
+    if (bloomtest(rfilt, ref))
+      rs = snap_renameref(T, snapno, ref, rs);
+    ir->prev = (uint16_t)rs;
+    lua_assert(regsp_used(rs));
+  }
+  return ir;
+}
+
+/* -- Snapshot replay ----------------------------------------------------- */
+
+/* Replay constant from parent trace. */
+static TRef snap_replay_const(jit_State *J, IRIns *ir)
+{
+  /* Only have to deal with constants that can occur in stack slots. */
+  switch ((IROp)ir->o) {
+  case IR_KPRI: return TREF_PRI(irt_type(ir->t));
+  case IR_KINT: return lj_ir_kint(J, ir->i);
+  case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t));
+  case IR_KNUM: return lj_ir_k64(J, IR_KNUM, ir_knum(ir));
+  case IR_KINT64: return lj_ir_k64(J, IR_KINT64, ir_kint64(ir));
+  case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir));  /* Continuation. */
+  default: lua_assert(0); return TREF_NIL; break;
+  }
+}
+
+/* De-duplicate parent reference. */
+static TRef snap_dedup(jit_State *J, SnapEntry *map, MSize nmax, IRRef ref)
+{
+  MSize j;
+  for (j = 0; j < nmax; j++)
+    if (snap_ref(map[j]) == ref)
+      return J->slot[snap_slot(map[j])] & ~(SNAP_CONT|SNAP_FRAME);
+  return 0;
+}
+
+/* Emit parent reference with de-duplication. */
+static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
+		      BloomFilter seen, IRRef ref)
+{
+  IRIns *ir = &T->ir[ref];
+  TRef tr;
+  if (irref_isk(ref))
+    tr = snap_replay_const(J, ir);
+  else if (!regsp_used(ir->prev))
+    tr = 0;
+  else if (!bloomtest(seen, ref) || (tr = snap_dedup(J, map, nmax, ref)) == 0)
+    tr = emitir(IRT(IR_PVAL, irt_type(ir->t)), ref - REF_BIAS, 0);
+  return tr;
+}
+
+/* Check whether a sunk store corresponds to an allocation. Slow path. */
+static int snap_sunk_store2(jit_State *J, IRIns *ira, IRIns *irs)
+{
+  if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
+      irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
+    IRIns *irk = IR(irs->op1);
+    if (irk->o == IR_AREF || irk->o == IR_HREFK)
+      irk = IR(irk->op1);
+    return (IR(irk->op1) == ira);
+  }
+  return 0;
+}
+
+/* Check whether a sunk store corresponds to an allocation. Fast path. */
+static LJ_AINLINE int snap_sunk_store(jit_State *J, IRIns *ira, IRIns *irs)
+{
+  if (irs->s != 255)
+    return (ira + irs->s == irs);  /* Fast check. */
+  return snap_sunk_store2(J, ira, irs);
+}
+
+/* Replay snapshot state to setup side trace. */
+void lj_snap_replay(jit_State *J, GCtrace *T)
+{
+  SnapShot *snap = &T->snap[J->exitno];
+  SnapEntry *map = &T->snapmap[snap->mapofs];
+  MSize n, nent = snap->nent;
+  BloomFilter seen = 0;
+  int pass23 = 0;
+  J->framedepth = 0;
+  /* Emit IR for slots inherited from parent snapshot. */
+  for (n = 0; n < nent; n++) {
+    SnapEntry sn = map[n];
+    BCReg s = snap_slot(sn);
+    IRRef ref = snap_ref(sn);
+    IRIns *ir = &T->ir[ref];
+    TRef tr;
+    /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */
+    if (bloomtest(seen, ref) && (tr = snap_dedup(J, map, n, ref)) != 0)
+      goto setslot;
+    bloomset(seen, ref);
+    if (irref_isk(ref)) {
+      tr = snap_replay_const(J, ir);
+    } else if (!regsp_used(ir->prev)) {
+      pass23 = 1;
+      lua_assert(s != 0);
+      tr = s;
+    } else {
+      IRType t = irt_type(ir->t);
+      uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT;
+      if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM;
+      if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY);
+      tr = emitir_raw(IRT(IR_SLOAD, t), s, mode);
+    }
+  setslot:
+    J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME));  /* Same as TREF_* flags. */
+    J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && s);
+    if ((sn & SNAP_FRAME))
+      J->baseslot = s+1;
+  }
+  if (pass23) {
+    IRIns *irlast = &T->ir[snap->ref];
+    pass23 = 0;
+    /* Emit dependent PVALs. */
+    for (n = 0; n < nent; n++) {
+      SnapEntry sn = map[n];
+      IRRef refp = snap_ref(sn);
+      IRIns *ir = &T->ir[refp];
+      if (regsp_reg(ir->r) == RID_SUNK) {
+	if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue;
+	pass23 = 1;
+	lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP ||
+		   ir->o == IR_CNEW || ir->o == IR_CNEWI);
+	if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1);
+	if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2);
+	if (LJ_HASFFI && ir->o == IR_CNEWI) {
+	  if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP)
+	    snap_pref(J, T, map, nent, seen, (ir+1)->op2);
+	} else {
+	  IRIns *irs;
+	  for (irs = ir+1; irs < irlast; irs++)
+	    if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
+	      if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
+		snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
+	      else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) &&
+		       irs+1 < irlast && (irs+1)->o == IR_HIOP)
+		snap_pref(J, T, map, nent, seen, (irs+1)->op2);
+	    }
+	}
+      } else if (!irref_isk(refp) && !regsp_used(ir->prev)) {
+	lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT);
+	J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1);
+      }
+    }
+    /* Replay sunk instructions. */
+    for (n = 0; pass23 && n < nent; n++) {
+      SnapEntry sn = map[n];
+      IRRef refp = snap_ref(sn);
+      IRIns *ir = &T->ir[refp];
+      if (regsp_reg(ir->r) == RID_SUNK) {
+	TRef op1, op2;
+	if (J->slot[snap_slot(sn)] != snap_slot(sn)) {  /* De-dup allocs. */
+	  J->slot[snap_slot(sn)] = J->slot[J->slot[snap_slot(sn)]];
+	  continue;
+	}
+	op1 = ir->op1;
+	if (op1 >= T->nk) op1 = snap_pref(J, T, map, nent, seen, op1);
+	op2 = ir->op2;
+	if (op2 >= T->nk) op2 = snap_pref(J, T, map, nent, seen, op2);
+	if (LJ_HASFFI && ir->o == IR_CNEWI) {
+	  if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) {
+	    lj_needsplit(J);  /* Emit joining HIOP. */
+	    op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2,
+			     snap_pref(J, T, map, nent, seen, (ir+1)->op2));
+	  }
+	  J->slot[snap_slot(sn)] = emitir(ir->ot, op1, op2);
+	} else {
+	  IRIns *irs;
+	  TRef tr = emitir(ir->ot, op1, op2);
+	  J->slot[snap_slot(sn)] = tr;
+	  for (irs = ir+1; irs < irlast; irs++)
+	    if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
+	      IRIns *irr = &T->ir[irs->op1];
+	      TRef val, key = irr->op2, tmp = tr;
+	      if (irr->o != IR_FREF) {
+		IRIns *irk = &T->ir[key];
+		if (irr->o == IR_HREFK)
+		  key = lj_ir_kslot(J, snap_replay_const(J, &T->ir[irk->op1]),
+				    irk->op2);
+		else
+		  key = snap_replay_const(J, irk);
+		if (irr->o == IR_HREFK || irr->o == IR_AREF) {
+		  IRIns *irf = &T->ir[irr->op1];
+		  tmp = emitir(irf->ot, tmp, irf->op2);
+		}
+	      }
+	      tmp = emitir(irr->ot, tmp, key);
+	      val = snap_pref(J, T, map, nent, seen, irs->op2);
+	      if (val == 0) {
+		IRIns *irc = &T->ir[irs->op2];
+		lua_assert(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT);
+		val = snap_pref(J, T, map, nent, seen, irc->op1);
+		val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
+	      } else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) &&
+			 irs+1 < irlast && (irs+1)->o == IR_HIOP) {
+		IRType t = IRT_I64;
+		if (LJ_SOFTFP && irt_type((irs+1)->t) == IRT_SOFTFP)
+		  t = IRT_NUM;
+		lj_needsplit(J);
+		if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) {
+		  uint64_t k = (uint32_t)T->ir[irs->op2].i +
+			       ((uint64_t)T->ir[(irs+1)->op2].i << 32);
+		  val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM,
+				  lj_ir_k64_find(J, k));
+		} else {
+		  val = emitir_raw(IRT(IR_HIOP, t), val,
+			  snap_pref(J, T, map, nent, seen, (irs+1)->op2));
+		}
+		tmp = emitir(IRT(irs->o, t), tmp, val);
+		continue;
+	      }
+	      tmp = emitir(irs->ot, tmp, val);
+	    } else if (LJ_HASFFI && irs->o == IR_XBAR && ir->o == IR_CNEW) {
+	      emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+	    }
+	}
+      }
+    }
+  }
+  J->base = J->slot + J->baseslot;
+  J->maxslot = snap->nslots - J->baseslot;
+  lj_snap_add(J);
+  if (pass23)  /* Need explicit GC step _after_ initial snapshot. */
+    emitir_raw(IRTG(IR_GCSTEP, IRT_NIL), 0, 0);
+}
+
+/* -- Snapshot restore ---------------------------------------------------- */
+
+static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
+			SnapNo snapno, BloomFilter rfilt,
+			IRIns *ir, TValue *o);
+
+/* Restore a value from the trace exit state. */
+static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex,
+			    SnapNo snapno, BloomFilter rfilt,
+			    IRRef ref, TValue *o)
+{
+  IRIns *ir = &T->ir[ref];
+  IRType1 t = ir->t;
+  RegSP rs = ir->prev;
+  if (irref_isk(ref)) {  /* Restore constant slot. */
+    lj_ir_kvalue(J->L, o, ir);
+    return;
+  }
+  if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
+    rs = snap_renameref(T, snapno, ref, rs);
+  if (ra_hasspill(regsp_spill(rs))) {  /* Restore from spill slot. */
+    int32_t *sps = &ex->spill[regsp_spill(rs)];
+    if (irt_isinteger(t)) {
+      setintV(o, *sps);
+#if !LJ_SOFTFP
+    } else if (irt_isnum(t)) {
+      o->u64 = *(uint64_t *)sps;
+#endif
+    } else if (LJ_64 && irt_islightud(t)) {
+      /* 64 bit lightuserdata which may escape already has the tag bits. */
+      o->u64 = *(uint64_t *)sps;
+    } else {
+      lua_assert(!irt_ispri(t));  /* PRI refs never have a spill slot. */
+      setgcrefi(o->gcr, *sps);
+      setitype(o, irt_toitype(t));
+    }
+  } else {  /* Restore from register. */
+    Reg r = regsp_reg(rs);
+    if (ra_noreg(r)) {
+      lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT);
+      snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o);
+      if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o));
+      return;
+    } else if (irt_isinteger(t)) {
+      setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
+#if !LJ_SOFTFP
+    } else if (irt_isnum(t)) {
+      setnumV(o, ex->fpr[r-RID_MIN_FPR]);
+#endif
+    } else if (LJ_64 && irt_islightud(t)) {
+      /* 64 bit lightuserdata which may escape already has the tag bits. */
+      o->u64 = ex->gpr[r-RID_MIN_GPR];
+    } else {
+      if (!irt_ispri(t))
+	setgcrefi(o->gcr, ex->gpr[r-RID_MIN_GPR]);
+      setitype(o, irt_toitype(t));
+    }
+  }
+}
+
+#if LJ_HASFFI
+/* Restore raw data from the trace exit state. */
+static void snap_restoredata(GCtrace *T, ExitState *ex,
+			     SnapNo snapno, BloomFilter rfilt,
+			     IRRef ref, void *dst, CTSize sz)
+{
+  IRIns *ir = &T->ir[ref];
+  RegSP rs = ir->prev;
+  int32_t *src;
+  uint64_t tmp;
+  if (irref_isk(ref)) {
+    if (ir->o == IR_KNUM || ir->o == IR_KINT64) {
+      src = mref(ir->ptr, int32_t);
+    } else if (sz == 8) {
+      tmp = (uint64_t)(uint32_t)ir->i;
+      src = (int32_t *)&tmp;
+    } else {
+      src = &ir->i;
+    }
+  } else {
+    if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
+      rs = snap_renameref(T, snapno, ref, rs);
+    if (ra_hasspill(regsp_spill(rs))) {
+      src = &ex->spill[regsp_spill(rs)];
+      if (sz == 8 && !irt_is64(ir->t)) {
+	tmp = (uint64_t)(uint32_t)*src;
+	src = (int32_t *)&tmp;
+      }
+    } else {
+      Reg r = regsp_reg(rs);
+      if (ra_noreg(r)) {
+	/* Note: this assumes CNEWI is never used for SOFTFP split numbers. */
+	lua_assert(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT);
+	snap_restoredata(T, ex, snapno, rfilt, ir->op1, dst, 4);
+	*(lua_Number *)dst = (lua_Number)*(int32_t *)dst;
+	return;
+      }
+      src = (int32_t *)&ex->gpr[r-RID_MIN_GPR];
+#if !LJ_SOFTFP
+      if (r >= RID_MAX_GPR) {
+	src = (int32_t *)&ex->fpr[r-RID_MIN_FPR];
+#if LJ_TARGET_PPC
+	if (sz == 4) {  /* PPC FPRs are always doubles. */
+	  *(float *)dst = (float)*(double *)src;
+	  return;
+	}
+#else
+	if (LJ_BE && sz == 4) src++;
+#endif
+      }
+#endif
+    }
+  }
+  lua_assert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+  if (sz == 4) *(int32_t *)dst = *src;
+  else if (sz == 8) *(int64_t *)dst = *(int64_t *)src;
+  else if (sz == 1) *(int8_t *)dst = (int8_t)*src;
+  else *(int16_t *)dst = (int16_t)*src;
+}
+#endif
+
+/* Unsink allocation from the trace exit state. Unsink sunk stores. */
+static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
+			SnapNo snapno, BloomFilter rfilt,
+			IRIns *ir, TValue *o)
+{
+  lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP ||
+	     ir->o == IR_CNEW || ir->o == IR_CNEWI);
+#if LJ_HASFFI
+  if (ir->o == IR_CNEW || ir->o == IR_CNEWI) {
+    CTState *cts = ctype_cts(J->L);
+    CTypeID id = (CTypeID)T->ir[ir->op1].i;
+    CTSize sz = lj_ctype_size(cts, id);
+    GCcdata *cd = lj_cdata_new(cts, id, sz);
+    setcdataV(J->L, o, cd);
+    if (ir->o == IR_CNEWI) {
+      uint8_t *p = (uint8_t *)cdataptr(cd);
+      lua_assert(sz == 4 || sz == 8);
+      if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) {
+	snap_restoredata(T, ex, snapno, rfilt, (ir+1)->op2, LJ_LE?p+4:p, 4);
+	if (LJ_BE) p += 4;
+	sz = 4;
+      }
+      snap_restoredata(T, ex, snapno, rfilt, ir->op2, p, sz);
+    } else {
+      IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
+      for (irs = ir+1; irs < irlast; irs++)
+	if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
+	  IRIns *iro = &T->ir[T->ir[irs->op1].op2];
+	  uint8_t *p = (uint8_t *)cd;
+	  CTSize szs;
+	  lua_assert(irs->o == IR_XSTORE && T->ir[irs->op1].o == IR_ADD);
+	  lua_assert(iro->o == IR_KINT || iro->o == IR_KINT64);
+	  if (irt_is64(irs->t)) szs = 8;
+	  else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1;
+	  else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2;
+	  else szs = 4;
+	  if (LJ_64 && iro->o == IR_KINT64)
+	    p += (int64_t)ir_k64(iro)->u64;
+	  else
+	    p += iro->i;
+	  lua_assert(p >= (uint8_t *)cdataptr(cd) &&
+		     p + szs <= (uint8_t *)cdataptr(cd) + sz);
+	  if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
+	    lua_assert(szs == 4);
+	    snap_restoredata(T, ex, snapno, rfilt, (irs+1)->op2, LJ_LE?p+4:p,4);
+	    if (LJ_BE) p += 4;
+	  }
+	  snap_restoredata(T, ex, snapno, rfilt, irs->op2, p, szs);
+	}
+    }
+  } else
+#endif
+  {
+    IRIns *irs, *irlast;
+    GCtab *t = ir->o == IR_TNEW ? lj_tab_new(J->L, ir->op1, ir->op2) :
+				  lj_tab_dup(J->L, ir_ktab(&T->ir[ir->op1]));
+    settabV(J->L, o, t);
+    irlast = &T->ir[T->snap[snapno].ref];
+    for (irs = ir+1; irs < irlast; irs++)
+      if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
+	IRIns *irk = &T->ir[irs->op1];
+	TValue tmp, *val;
+	lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
+		   irs->o == IR_FSTORE);
+	if (irk->o == IR_FREF) {
+	  lua_assert(irk->op2 == IRFL_TAB_META);
+	  snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp);
+	  /* NOBARRIER: The table is new (marked white). */
+	  setgcref(t->metatable, obj2gco(tabV(&tmp)));
+	} else {
+	  irk = &T->ir[irk->op2];
+	  if (irk->o == IR_KSLOT) irk = &T->ir[irk->op1];
+	  lj_ir_kvalue(J->L, &tmp, irk);
+	  val = lj_tab_set(J->L, t, &tmp);
+	  /* NOBARRIER: The table is new (marked white). */
+	  snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val);
+	  if (LJ_SOFTFP && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
+	    snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp);
+	    val->u32.hi = tmp.u32.lo;
+	  }
+	}
+      }
+  }
+}
+
+/* Restore interpreter state from exit state with the help of a snapshot. */
+const BCIns *lj_snap_restore(jit_State *J, void *exptr)
+{
+  ExitState *ex = (ExitState *)exptr;
+  SnapNo snapno = J->exitno;  /* For now, snapno == exitno. */
+  GCtrace *T = traceref(J, J->parent);
+  SnapShot *snap = &T->snap[snapno];
+  MSize n, nent = snap->nent;
+  SnapEntry *map = &T->snapmap[snap->mapofs];
+  SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1];
+  int32_t ftsz0;
+  TValue *frame;
+  BloomFilter rfilt = snap_renamefilter(T, snapno);
+  const BCIns *pc = snap_pc(map[nent]);
+  lua_State *L = J->L;
+
+  /* Set interpreter PC to the next PC to get correct error messages. */
+  setcframe_pc(cframe_raw(L->cframe), pc+1);
+
+  /* Make sure the stack is big enough for the slots from the snapshot. */
+  if (LJ_UNLIKELY(L->base + snap->topslot >= tvref(L->maxstack))) {
+    L->top = curr_topL(L);
+    lj_state_growstack(L, snap->topslot - curr_proto(L)->framesize);
+  }
+
+  /* Fill stack slots with data from the registers and spill slots. */
+  frame = L->base-1;
+  ftsz0 = frame_ftsz(frame);  /* Preserve link to previous frame in slot #0. */
+  for (n = 0; n < nent; n++) {
+    SnapEntry sn = map[n];
+    if (!(sn & SNAP_NORESTORE)) {
+      TValue *o = &frame[snap_slot(sn)];
+      IRRef ref = snap_ref(sn);
+      IRIns *ir = &T->ir[ref];
+      if (ir->r == RID_SUNK) {
+	MSize j;
+	for (j = 0; j < n; j++)
+	  if (snap_ref(map[j]) == ref) {  /* De-duplicate sunk allocations. */
+	    copyTV(L, o, &frame[snap_slot(map[j])]);
+	    goto dupslot;
+	  }
+	snap_unsink(J, T, ex, snapno, rfilt, ir, o);
+      dupslot:
+	continue;
+      }
+      snap_restoreval(J, T, ex, snapno, rfilt, ref, o);
+      if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && tvisint(o)) {
+	TValue tmp;
+	snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp);
+	o->u32.hi = tmp.u32.lo;
+      } else if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+	/* Overwrite tag with frame link. */
+	o->fr.tp.ftsz = snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0;
+	L->base = o+1;
+      }
+    }
+  }
+  lua_assert(map + nent == flinks);
+
+  /* Compute current stack top. */
+  switch (bc_op(*pc)) {
+  default:
+    if (bc_op(*pc) < BC_FUNCF) {
+      L->top = curr_topL(L);
+      break;
+    }
+    /* fallthrough */
+  case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM:
+    L->top = frame + snap->nslots;
+    break;
+  }
+  return pc;
+}
+
+#undef IR
+#undef emitir_raw
+#undef emitir
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_snap.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_snap.h b/lib/luajit/src/lj_snap.h
new file mode 100644
index 0000000..9a125be
--- /dev/null
+++ b/lib/luajit/src/lj_snap.h
@@ -0,0 +1,34 @@
+/*
+** Snapshot handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_SNAP_H
+#define _LJ_SNAP_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+LJ_FUNC void lj_snap_add(jit_State *J);
+LJ_FUNC void lj_snap_purge(jit_State *J);
+LJ_FUNC void lj_snap_shrink(jit_State *J);
+LJ_FUNC IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir);
+LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T);
+LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr);
+LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need);
+LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need);
+
+static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need)
+{
+  if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need);
+}
+
+static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need)
+{
+  if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need);
+}
+
+#endif
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_state.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_state.c b/lib/luajit/src/lj_state.c
new file mode 100644
index 0000000..e654afa
--- /dev/null
+++ b/lib/luajit/src/lj_state.c
@@ -0,0 +1,287 @@
+/*
+** State and stack handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_state_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_lex.h"
+#include "lj_alloc.h"
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Stack sizes. */
+#define LJ_STACK_MIN	LUA_MINSTACK	/* Min. stack size. */
+#define LJ_STACK_MAX	LUAI_MAXSTACK	/* Max. stack size. */
+#define LJ_STACK_START	(2*LJ_STACK_MIN)	/* Starting stack size. */
+#define LJ_STACK_MAXEX	(LJ_STACK_MAX + 1 + LJ_STACK_EXTRA)
+
+/* Explanation of LJ_STACK_EXTRA:
+**
+** Calls to metamethods store their arguments beyond the current top
+** without checking for the stack limit. This avoids stack resizes which
+** would invalidate passed TValue pointers. The stack check is performed
+** later by the function header. This can safely resize the stack or raise
+** an error. Thus we need some extra slots beyond the current stack limit.
+**
+** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus
+** one extra slot if mobj is not a function. Only lj_meta_tset needs 5
+** slots above top, but then mobj is always a function. So we can get by
+** with 5 extra slots.
+*/
+
+/* Resize stack slots and adjust pointers in state. */
+static void resizestack(lua_State *L, MSize n)
+{
+  TValue *st, *oldst = tvref(L->stack);
+  ptrdiff_t delta;
+  MSize oldsize = L->stacksize;
+  MSize realsize = n + 1 + LJ_STACK_EXTRA;
+  GCobj *up;
+  lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1);
+  st = (TValue *)lj_mem_realloc(L, tvref(L->stack),
+				(MSize)(L->stacksize*sizeof(TValue)),
+				(MSize)(realsize*sizeof(TValue)));
+  setmref(L->stack, st);
+  delta = (char *)st - (char *)oldst;
+  setmref(L->maxstack, st + n);
+  while (oldsize < realsize)  /* Clear new slots. */
+    setnilV(st + oldsize++);
+  L->stacksize = realsize;
+  L->base = (TValue *)((char *)L->base + delta);
+  L->top = (TValue *)((char *)L->top + delta);
+  for (up = gcref(L->openupval); up != NULL; up = gcnext(up))
+    setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta));
+  if (obj2gco(L) == gcref(G(L)->jit_L))
+    setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta);
+}
+
+/* Relimit stack after error, in case the limit was overdrawn. */
+void lj_state_relimitstack(lua_State *L)
+{
+  if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1)
+    resizestack(L, LJ_STACK_MAX);
+}
+
+/* Try to shrink the stack (called from GC). */
+void lj_state_shrinkstack(lua_State *L, MSize used)
+{
+  if (L->stacksize > LJ_STACK_MAXEX)
+    return;  /* Avoid stack shrinking while handling stack overflow. */
+  if (4*used < L->stacksize &&
+      2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize &&
+      obj2gco(L) != gcref(G(L)->jit_L))  /* Don't shrink stack of live trace. */
+    resizestack(L, L->stacksize >> 1);
+}
+
+/* Try to grow stack. */
+void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need)
+{
+  MSize n;
+  if (L->stacksize > LJ_STACK_MAXEX)  /* Overflow while handling overflow? */
+    lj_err_throw(L, LUA_ERRERR);
+  n = L->stacksize + need;
+  if (n > LJ_STACK_MAX) {
+    n += 2*LUA_MINSTACK;
+  } else if (n < 2*L->stacksize) {
+    n = 2*L->stacksize;
+    if (n >= LJ_STACK_MAX)
+      n = LJ_STACK_MAX;
+  }
+  resizestack(L, n);
+  if (L->stacksize > LJ_STACK_MAXEX)
+    lj_err_msg(L, LJ_ERR_STKOV);
+}
+
+void LJ_FASTCALL lj_state_growstack1(lua_State *L)
+{
+  lj_state_growstack(L, 1);
+}
+
+/* Allocate basic stack for new state. */
+static void stack_init(lua_State *L1, lua_State *L)
+{
+  TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue);
+  setmref(L1->stack, st);
+  L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA;
+  stend = st + L1->stacksize;
+  setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1);
+  L1->base = L1->top = st+1;
+  setthreadV(L1, st, L1);  /* Needed for curr_funcisL() on empty stack. */
+  while (st < stend)  /* Clear new slots. */
+    setnilV(st++);
+}
+
+/* -- State handling ------------------------------------------------------ */
+
+/* Open parts that may cause memory-allocation errors. */
+static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud)
+{
+  global_State *g = G(L);
+  UNUSED(dummy);
+  UNUSED(ud);
+  stack_init(L, L);
+  /* NOBARRIER: State initialization, all objects are white. */
+  setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL)));
+  settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY));
+  lj_str_resize(L, LJ_MIN_STRTAB-1);
+  lj_meta_init(L);
+  lj_lex_init(L);
+  fixstring(lj_err_str(L, LJ_ERR_ERRMEM));  /* Preallocate memory error msg. */
+  g->gc.threshold = 4*g->gc.total;
+  lj_trace_initstate(g);
+  return NULL;
+}
+
+static void close_state(lua_State *L)
+{
+  global_State *g = G(L);
+  lj_func_closeuv(L, tvref(L->stack));
+  lj_gc_freeall(g);
+  lua_assert(gcref(g->gc.root) == obj2gco(L));
+  lua_assert(g->strnum == 0);
+  lj_trace_freestate(g);
+#if LJ_HASFFI
+  lj_ctype_freestate(g);
+#endif
+  lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef);
+  lj_str_freebuf(g, &g->tmpbuf);
+  lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
+  lua_assert(g->gc.total == sizeof(GG_State));
+#ifndef LUAJIT_USE_SYSMALLOC
+  if (g->allocf == lj_alloc_f)
+    lj_alloc_destroy(g->allocd);
+  else
+#endif
+    g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0);
+}
+
+#if LJ_64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC))
+lua_State *lj_state_newstate(lua_Alloc f, void *ud)
+#else
+LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
+#endif
+{
+  GG_State *GG = (GG_State *)f(ud, NULL, 0, sizeof(GG_State));
+  lua_State *L = &GG->L;
+  global_State *g = &GG->g;
+  if (GG == NULL || !checkptr32(GG)) return NULL;
+  memset(GG, 0, sizeof(GG_State));
+  L->gct = ~LJ_TTHREAD;
+  L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED;  /* Prevent free. */
+  L->dummy_ffid = FF_C;
+  setmref(L->glref, g);
+  g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED;
+  g->strempty.marked = LJ_GC_WHITE0;
+  g->strempty.gct = ~LJ_TSTR;
+  g->allocf = f;
+  g->allocd = ud;
+  setgcref(g->mainthref, obj2gco(L));
+  setgcref(g->uvhead.prev, obj2gco(&g->uvhead));
+  setgcref(g->uvhead.next, obj2gco(&g->uvhead));
+  g->strmask = ~(MSize)0;
+  setnilV(registry(L));
+  setnilV(&g->nilnode.val);
+  setnilV(&g->nilnode.key);
+  setmref(g->nilnode.freetop, &g->nilnode);
+  lj_str_initbuf(&g->tmpbuf);
+  g->gc.state = GCSpause;
+  setgcref(g->gc.root, obj2gco(L));
+  setmref(g->gc.sweep, &g->gc.root);
+  g->gc.total = sizeof(GG_State);
+  g->gc.pause = LUAI_GCPAUSE;
+  g->gc.stepmul = LUAI_GCMUL;
+  lj_dispatch_init((GG_State *)L);
+  L->status = LUA_ERRERR+1;  /* Avoid touching the stack upon memory error. */
+  if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) {
+    /* Memory allocation error: free partial state. */
+    close_state(L);
+    return NULL;
+  }
+  L->status = 0;
+  return L;
+}
+
+static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud)
+{
+  UNUSED(dummy);
+  UNUSED(ud);
+  lj_gc_finalize_cdata(L);
+  lj_gc_finalize_udata(L);
+  /* Frame pop omitted. */
+  return NULL;
+}
+
+LUA_API void lua_close(lua_State *L)
+{
+  global_State *g = G(L);
+  int i;
+  L = mainthread(g);  /* Only the main thread can be closed. */
+  lj_func_closeuv(L, tvref(L->stack));
+  lj_gc_separateudata(g, 1);  /* Separate udata which have GC metamethods. */
+#if LJ_HASJIT
+  G2J(g)->flags &= ~JIT_F_ON;
+  G2J(g)->state = LJ_TRACE_IDLE;
+  lj_dispatch_update(g);
+#endif
+  for (i = 0;;) {
+    hook_enter(g);
+    L->status = 0;
+    L->cframe = NULL;
+    L->base = L->top = tvref(L->stack) + 1;
+    if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == 0) {
+      if (++i >= 10) break;
+      lj_gc_separateudata(g, 1);  /* Separate udata again. */
+      if (gcref(g->gc.mmudata) == NULL)  /* Until nothing is left to do. */
+	break;
+    }
+  }
+  close_state(L);
+}
+
+lua_State *lj_state_new(lua_State *L)
+{
+  lua_State *L1 = lj_mem_newobj(L, lua_State);
+  L1->gct = ~LJ_TTHREAD;
+  L1->dummy_ffid = FF_C;
+  L1->status = 0;
+  L1->stacksize = 0;
+  setmref(L1->stack, NULL);
+  L1->cframe = NULL;
+  /* NOBARRIER: The lua_State is new (marked white). */
+  setgcrefnull(L1->openupval);
+  setmrefr(L1->glref, L->glref);
+  setgcrefr(L1->env, L->env);
+  stack_init(L1, L);  /* init stack */
+  lua_assert(iswhite(obj2gco(L1)));
+  return L1;
+}
+
+void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L)
+{
+  lua_assert(L != mainthread(g));
+  lj_func_closeuv(L, tvref(L->stack));
+  lua_assert(gcref(L->openupval) == NULL);
+  lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
+  lj_mem_freet(g, L);
+}
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_state.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_state.h b/lib/luajit/src/lj_state.h
new file mode 100644
index 0000000..687889a
--- /dev/null
+++ b/lib/luajit/src/lj_state.h
@@ -0,0 +1,35 @@
+/*
+** State and stack handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STATE_H
+#define _LJ_STATE_H
+
+#include "lj_obj.h"
+
+#define incr_top(L) \
+  (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0))
+
+#define savestack(L, p)		((char *)(p) - mref(L->stack, char))
+#define restorestack(L, n)	((TValue *)(mref(L->stack, char) + (n)))
+
+LJ_FUNC void lj_state_relimitstack(lua_State *L);
+LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used);
+LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need);
+LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L);
+
+static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need)
+{
+  if ((mref(L->maxstack, char) - (char *)L->top) <=
+      (ptrdiff_t)need*(ptrdiff_t)sizeof(TValue))
+    lj_state_growstack(L, need);
+}
+
+LJ_FUNC lua_State *lj_state_new(lua_State *L);
+LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L);
+#if LJ_64
+LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud);
+#endif
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_str.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_str.c b/lib/luajit/src/lj_str.c
new file mode 100644
index 0000000..ca60bcc
--- /dev/null
+++ b/lib/luajit/src/lj_str.c
@@ -0,0 +1,339 @@
+/*
+** String handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <stdio.h>
+
+#define lj_str_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_state.h"
+#include "lj_char.h"
+
+/* -- String interning ---------------------------------------------------- */
+
+/* Ordered compare of strings. Assumes string data is 4-byte aligned. */
+int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b)
+{
+  MSize i, n = a->len > b->len ? b->len : a->len;
+  for (i = 0; i < n; i += 4) {
+    /* Note: innocuous access up to end of string + 3. */
+    uint32_t va = *(const uint32_t *)(strdata(a)+i);
+    uint32_t vb = *(const uint32_t *)(strdata(b)+i);
+    if (va != vb) {
+#if LJ_LE
+      va = lj_bswap(va); vb = lj_bswap(vb);
+#endif
+      i -= n;
+      if ((int32_t)i >= -3) {
+	va >>= 32+(i<<3); vb >>= 32+(i<<3);
+	if (va == vb) break;
+      }
+      return va < vb ? -1 : 1;
+    }
+  }
+  return (int32_t)(a->len - b->len);
+}
+
+/* Fast string data comparison. Caveat: unaligned access to 1st string! */
+static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len)
+{
+  MSize i = 0;
+  lua_assert(len > 0);
+  lua_assert((((uintptr_t)a+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4);
+  do {  /* Note: innocuous access up to end of string + 3. */
+    uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i);
+    if (v) {
+      i -= len;
+#if LJ_LE
+      return (int32_t)i >= -3 ? (v << (32+(i<<3))) : 1;
+#else
+      return (int32_t)i >= -3 ? (v >> (32+(i<<3))) : 1;
+#endif
+    }
+    i += 4;
+  } while (i < len);
+  return 0;
+}
+
+/* Resize the string hash table (grow and shrink). */
+void lj_str_resize(lua_State *L, MSize newmask)
+{
+  global_State *g = G(L);
+  GCRef *newhash;
+  MSize i;
+  if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1)
+    return;  /* No resizing during GC traversal or if already too big. */
+  newhash = lj_mem_newvec(L, newmask+1, GCRef);
+  memset(newhash, 0, (newmask+1)*sizeof(GCRef));
+  for (i = g->strmask; i != ~(MSize)0; i--) {  /* Rehash old table. */
+    GCobj *p = gcref(g->strhash[i]);
+    while (p) {  /* Follow each hash chain and reinsert all strings. */
+      MSize h = gco2str(p)->hash & newmask;
+      GCobj *next = gcnext(p);
+      /* NOBARRIER: The string table is a GC root. */
+      setgcrefr(p->gch.nextgc, newhash[h]);
+      setgcref(newhash[h], p);
+      p = next;
+    }
+  }
+  lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef);
+  g->strmask = newmask;
+  g->strhash = newhash;
+}
+
+/* Intern a string and return string object. */
+GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx)
+{
+  global_State *g;
+  GCstr *s;
+  GCobj *o;
+  MSize len = (MSize)lenx;
+  MSize a, b, h = len;
+  if (lenx >= LJ_MAX_STR)
+    lj_err_msg(L, LJ_ERR_STROV);
+  g = G(L);
+  /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */
+  if (len >= 4) {  /* Caveat: unaligned access! */
+    a = lj_getu32(str);
+    h ^= lj_getu32(str+len-4);
+    b = lj_getu32(str+(len>>1)-2);
+    h ^= b; h -= lj_rol(b, 14);
+    b += lj_getu32(str+(len>>2)-1);
+  } else if (len > 0) {
+    a = *(const uint8_t *)str;
+    h ^= *(const uint8_t *)(str+len-1);
+    b = *(const uint8_t *)(str+(len>>1));
+    h ^= b; h -= lj_rol(b, 14);
+  } else {
+    return &g->strempty;
+  }
+  a ^= h; a -= lj_rol(h, 11);
+  b ^= a; b -= lj_rol(a, 25);
+  h ^= b; h -= lj_rol(b, 16);
+  /* Check if the string has already been interned. */
+  o = gcref(g->strhash[h & g->strmask]);
+  if (LJ_LIKELY((((uintptr_t)str+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) {
+    while (o != NULL) {
+      GCstr *sx = gco2str(o);
+      if (sx->len == len && str_fastcmp(str, strdata(sx), len) == 0) {
+	/* Resurrect if dead. Can only happen with fixstring() (keywords). */
+	if (isdead(g, o)) flipwhite(o);
+	return sx;  /* Return existing string. */
+      }
+      o = gcnext(o);
+    }
+  } else {  /* Slow path: end of string is too close to a page boundary. */
+    while (o != NULL) {
+      GCstr *sx = gco2str(o);
+      if (sx->len == len && memcmp(str, strdata(sx), len) == 0) {
+	/* Resurrect if dead. Can only happen with fixstring() (keywords). */
+	if (isdead(g, o)) flipwhite(o);
+	return sx;  /* Return existing string. */
+      }
+      o = gcnext(o);
+    }
+  }
+  /* Nope, create a new string. */
+  s = lj_mem_newt(L, sizeof(GCstr)+len+1, GCstr);
+  newwhite(g, s);
+  s->gct = ~LJ_TSTR;
+  s->len = len;
+  s->hash = h;
+  s->reserved = 0;
+  memcpy(strdatawr(s), str, len);
+  strdatawr(s)[len] = '\0';  /* Zero-terminate string. */
+  /* Add it to string hash table. */
+  h &= g->strmask;
+  s->nextgc = g->strhash[h];
+  /* NOBARRIER: The string table is a GC root. */
+  setgcref(g->strhash[h], obj2gco(s));
+  if (g->strnum++ > g->strmask)  /* Allow a 100% load factor. */
+    lj_str_resize(L, (g->strmask<<1)+1);  /* Grow string table. */
+  return s;  /* Return newly interned string. */
+}
+
+void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s)
+{
+  g->strnum--;
+  lj_mem_free(g, s, sizestring(s));
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+/* Print number to buffer. Canonicalizes non-finite values. */
+size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o)
+{
+  if (LJ_LIKELY((o->u32.hi << 1) < 0xffe00000)) {  /* Finite? */
+    lua_Number n = o->n;
+#if __BIONIC__
+    if (tvismzero(o)) { s[0] = '-'; s[1] = '0'; return 2; }
+#endif
+    return (size_t)lua_number2str(s, n);
+  } else if (((o->u32.hi & 0x000fffff) | o->u32.lo) != 0) {
+    s[0] = 'n'; s[1] = 'a'; s[2] = 'n'; return 3;
+  } else if ((o->u32.hi & 0x80000000) == 0) {
+    s[0] = 'i'; s[1] = 'n'; s[2] = 'f'; return 3;
+  } else {
+    s[0] = '-'; s[1] = 'i'; s[2] = 'n'; s[3] = 'f'; return 4;
+  }
+}
+
+/* Print integer to buffer. Returns pointer to start. */
+char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k)
+{
+  uint32_t u = (uint32_t)(k < 0 ? -k : k);
+  p += 1+10;
+  do { *--p = (char)('0' + u % 10); } while (u /= 10);
+  if (k < 0) *--p = '-';
+  return p;
+}
+
+/* Convert number to string. */
+GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np)
+{
+  char buf[LJ_STR_NUMBUF];
+  size_t len = lj_str_bufnum(buf, (TValue *)np);
+  return lj_str_new(L, buf, len);
+}
+
+/* Convert integer to string. */
+GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k)
+{
+  char s[1+10];
+  char *p = lj_str_bufint(s, k);
+  return lj_str_new(L, p, (size_t)(s+sizeof(s)-p));
+}
+
+GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o)
+{
+  return tvisint(o) ? lj_str_fromint(L, intV(o)) : lj_str_fromnum(L, &o->n);
+}
+
+/* -- String formatting --------------------------------------------------- */
+
+static void addstr(lua_State *L, SBuf *sb, const char *str, MSize len)
+{
+  char *p;
+  MSize i;
+  if (sb->n + len > sb->sz) {
+    MSize sz = sb->sz * 2;
+    while (sb->n + len > sz) sz = sz * 2;
+    lj_str_resizebuf(L, sb, sz);
+  }
+  p = sb->buf + sb->n;
+  sb->n += len;
+  for (i = 0; i < len; i++) p[i] = str[i];
+}
+
+static void addchar(lua_State *L, SBuf *sb, int c)
+{
+  if (sb->n + 1 > sb->sz) {
+    MSize sz = sb->sz * 2;
+    lj_str_resizebuf(L, sb, sz);
+  }
+  sb->buf[sb->n++] = (char)c;
+}
+
+/* Push formatted message as a string object to Lua stack. va_list variant. */
+const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp)
+{
+  SBuf *sb = &G(L)->tmpbuf;
+  lj_str_needbuf(L, sb, (MSize)strlen(fmt));
+  lj_str_resetbuf(sb);
+  for (;;) {
+    const char *e = strchr(fmt, '%');
+    if (e == NULL) break;
+    addstr(L, sb, fmt, (MSize)(e-fmt));
+    /* This function only handles %s, %c, %d, %f and %p formats. */
+    switch (e[1]) {
+    case 's': {
+      const char *s = va_arg(argp, char *);
+      if (s == NULL) s = "(null)";
+      addstr(L, sb, s, (MSize)strlen(s));
+      break;
+      }
+    case 'c':
+      addchar(L, sb, va_arg(argp, int));
+      break;
+    case 'd': {
+      char buf[LJ_STR_INTBUF];
+      char *p = lj_str_bufint(buf, va_arg(argp, int32_t));
+      addstr(L, sb, p, (MSize)(buf+LJ_STR_INTBUF-p));
+      break;
+      }
+    case 'f': {
+      char buf[LJ_STR_NUMBUF];
+      TValue tv;
+      MSize len;
+      tv.n = (lua_Number)(va_arg(argp, LUAI_UACNUMBER));
+      len = (MSize)lj_str_bufnum(buf, &tv);
+      addstr(L, sb, buf, len);
+      break;
+      }
+    case 'p': {
+#define FMTP_CHARS	(2*sizeof(ptrdiff_t))
+      char buf[2+FMTP_CHARS];
+      ptrdiff_t p = (ptrdiff_t)(va_arg(argp, void *));
+      ptrdiff_t i, lasti = 2+FMTP_CHARS;
+      if (p == 0) {
+	addstr(L, sb, "NULL", 4);
+	break;
+      }
+#if LJ_64
+      /* Shorten output for 64 bit pointers. */
+      lasti = 2+2*4+((p >> 32) ? 2+2*(lj_fls((uint32_t)(p >> 32))>>3) : 0);
+#endif
+      buf[0] = '0';
+      buf[1] = 'x';
+      for (i = lasti-1; i >= 2; i--, p >>= 4)
+	buf[i] = "0123456789abcdef"[(p & 15)];
+      addstr(L, sb, buf, (MSize)lasti);
+      break;
+      }
+    case '%':
+      addchar(L, sb, '%');
+      break;
+    default:
+      addchar(L, sb, '%');
+      addchar(L, sb, e[1]);
+      break;
+    }
+    fmt = e+2;
+  }
+  addstr(L, sb, fmt, (MSize)strlen(fmt));
+  setstrV(L, L->top, lj_str_new(L, sb->buf, sb->n));
+  incr_top(L);
+  return strVdata(L->top - 1);
+}
+
+/* Push formatted message as a string object to Lua stack. Vararg variant. */
+const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
+{
+  const char *msg;
+  va_list argp;
+  va_start(argp, fmt);
+  msg = lj_str_pushvf(L, fmt, argp);
+  va_end(argp);
+  return msg;
+}
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz)
+{
+  if (sz > sb->sz) {
+    if (sz < LJ_MIN_SBUF) sz = LJ_MIN_SBUF;
+    lj_str_resizebuf(L, sb, sz);
+  }
+  return sb->buf;
+}
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_str.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_str.h b/lib/luajit/src/lj_str.h
new file mode 100644
index 0000000..9969705
--- /dev/null
+++ b/lib/luajit/src/lj_str.h
@@ -0,0 +1,50 @@
+/*
+** String handling.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STR_H
+#define _LJ_STR_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+
+/* String interning. */
+LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b);
+LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask);
+LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len);
+LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s);
+
+#define lj_str_newz(L, s)	(lj_str_new(L, s, strlen(s)))
+#define lj_str_newlit(L, s)	(lj_str_new(L, "" s, sizeof(s)-1))
+
+/* Type conversions. */
+LJ_FUNC size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o);
+LJ_FUNC char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k);
+LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np);
+LJ_FUNC GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k);
+LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o);
+
+#define LJ_STR_INTBUF		(1+10)
+#define LJ_STR_NUMBUF		LUAI_MAXNUMBER2STR
+
+/* String formatting. */
+LJ_FUNC const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp);
+LJ_FUNC const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
+#if defined(__GNUC__)
+  __attribute__ ((format (printf, 2, 3)))
+#endif
+  ;
+
+/* Resizable string buffers. Struct definition in lj_obj.h. */
+LJ_FUNC char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz);
+
+#define lj_str_initbuf(sb)	((sb)->buf = NULL, (sb)->sz = 0)
+#define lj_str_resetbuf(sb)	((sb)->n = 0)
+#define lj_str_resizebuf(L, sb, size) \
+  ((sb)->buf = (char *)lj_mem_realloc(L, (sb)->buf, (sb)->sz, (size)), \
+   (sb)->sz = (size))
+#define lj_str_freebuf(g, sb)	lj_mem_free(g, (void *)(sb)->buf, (sb)->sz)
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_strscan.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_strscan.c b/lib/luajit/src/lj_strscan.c
new file mode 100644
index 0000000..568f647
--- /dev/null
+++ b/lib/luajit/src/lj_strscan.c
@@ -0,0 +1,498 @@
+/*
+** String scanning.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <math.h>
+
+#define lj_strscan_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+
+/* -- Scanning numbers ---------------------------------------------------- */
+
+/*
+** Rationale for the builtin string to number conversion library:
+**
+** It removes a dependency on libc's strtod(), which is a true portability
+** nightmare. Mainly due to the plethora of supported OS and toolchain
+** combinations. Sadly, the various implementations
+** a) are often buggy, incomplete (no hex floats) and/or imprecise,
+** b) sometimes crash or hang on certain inputs,
+** c) return non-standard NaNs that need to be filtered out, and
+** d) fail if the locale-specific decimal separator is not a dot,
+**    which can only be fixed with atrocious workarounds.
+**
+** Also, most of the strtod() implementations are hopelessly bloated,
+** which is not just an I-cache hog, but a problem for static linkage
+** on embedded systems, too.
+**
+** OTOH the builtin conversion function is very compact. Even though it
+** does a lot more, like parsing long longs, octal or imaginary numbers
+** and returning the result in different formats:
+** a) It needs less than 3 KB (!) of machine code (on x64 with -Os),
+** b) it doesn't perform any dynamic allocation and,
+** c) it needs only around 600 bytes of stack space.
+**
+** The builtin function is faster than strtod() for typical inputs, e.g.
+** "123", "1.5" or "1e6". Arguably, it's slower for very large exponents,
+** which are not very common (this could be fixed, if needed).
+**
+** And most importantly, the builtin function is equally precise on all
+** platforms. It correctly converts and rounds any input to a double.
+** If this is not the case, please send a bug report -- but PLEASE verify
+** that the implementation you're comparing to is not the culprit!
+**
+** The implementation quickly pre-scans the entire string first and
+** handles simple integers on-the-fly. Otherwise, it dispatches to the
+** base-specific parser. Hex and octal is straightforward.
+**
+** Decimal to binary conversion uses a fixed-length circular buffer in
+** base 100. Some simple cases are handled directly. For other cases, the
+** number in the buffer is up-scaled or down-scaled until the integer part
+** is in the proper range. Then the integer part is rounded and converted
+** to a double which is finally rescaled to the result. Denormals need
+** special treatment to prevent incorrect 'double rounding'.
+*/
+
+/* Definitions for circular decimal digit buffer (base 100 = 2 digits/byte). */
+#define STRSCAN_DIG	1024
+#define STRSCAN_MAXDIG	800		/* 772 + extra are sufficient. */
+#define STRSCAN_DDIG	(STRSCAN_DIG/2)
+#define STRSCAN_DMASK	(STRSCAN_DDIG-1)
+
+/* Helpers for circular buffer. */
+#define DNEXT(a)	(((a)+1) & STRSCAN_DMASK)
+#define DPREV(a)	(((a)-1) & STRSCAN_DMASK)
+#define DLEN(lo, hi)	((int32_t)(((lo)-(hi)) & STRSCAN_DMASK))
+
+#define casecmp(c, k)	(((c) | 0x20) == k)
+
+/* Final conversion to double. */
+static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg)
+{
+  double n;
+
+  /* Avoid double rounding for denormals. */
+  if (LJ_UNLIKELY(ex2 <= -1075 && x != 0)) {
+    /* NYI: all of this generates way too much code on 32 bit CPUs. */
+#if defined(__GNUC__) && LJ_64
+    int32_t b = (int32_t)(__builtin_clzll(x)^63);
+#else
+    int32_t b = (x>>32) ? 32+(int32_t)lj_fls((uint32_t)(x>>32)) :
+			  (int32_t)lj_fls((uint32_t)x);
+#endif
+    if ((int32_t)b + ex2 <= -1023 && (int32_t)b + ex2 >= -1075) {
+      uint64_t rb = (uint64_t)1 << (-1075-ex2);
+      if ((x & rb) && ((x & (rb+rb+rb-1)))) x += rb+rb;
+      x = (x & ~(rb+rb-1));
+    }
+  }
+
+  /* Convert to double using a signed int64_t conversion, then rescale. */
+  lua_assert((int64_t)x >= 0);
+  n = (double)(int64_t)x;
+  if (neg) n = -n;
+  if (ex2) n = ldexp(n, ex2);
+  o->n = n;
+}
+
+/* Parse hexadecimal number. */
+static StrScanFmt strscan_hex(const uint8_t *p, TValue *o,
+			      StrScanFmt fmt, uint32_t opt,
+			      int32_t ex2, int32_t neg, uint32_t dig)
+{
+  uint64_t x = 0;
+  uint32_t i;
+
+  /* Scan hex digits. */
+  for (i = dig > 16 ? 16 : dig ; i; i--, p++) {
+    uint32_t d = (*p != '.' ? *p : *++p); if (d > '9') d += 9;
+    x = (x << 4) + (d & 15);
+  }
+
+  /* Summarize rounding-effect of excess digits. */
+  for (i = 16; i < dig; i++, p++)
+    x |= ((*p != '.' ? *p : *++p) != '0'), ex2 += 4;
+
+  /* Format-specific handling. */
+  switch (fmt) {
+  case STRSCAN_INT:
+    if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
+      o->i = neg ? -(int32_t)x : (int32_t)x;
+      return STRSCAN_INT;  /* Fast path for 32 bit integers. */
+    }
+    if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; }
+    /* fallthrough */
+  case STRSCAN_U32:
+    if (dig > 8) return STRSCAN_ERROR;
+    o->i = neg ? -(int32_t)x : (int32_t)x;
+    return STRSCAN_U32;
+  case STRSCAN_I64:
+  case STRSCAN_U64:
+    if (dig > 16) return STRSCAN_ERROR;
+    o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+    return fmt;
+  default:
+    break;
+  }
+
+  /* Reduce range then convert to double. */
+  if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; }
+  strscan_double(x, o, ex2, neg);
+  return fmt;
+}
+
+/* Parse octal number. */
+static StrScanFmt strscan_oct(const uint8_t *p, TValue *o,
+			      StrScanFmt fmt, int32_t neg, uint32_t dig)
+{
+  uint64_t x = 0;
+
+  /* Scan octal digits. */
+  if (dig > 22 || (dig == 22 && *p > '1')) return STRSCAN_ERROR;
+  while (dig-- > 0) {
+    if (!(*p >= '0' && *p <= '7')) return STRSCAN_ERROR;
+    x = (x << 3) + (*p++ & 7);
+  }
+
+  /* Format-specific handling. */
+  switch (fmt) {
+  case STRSCAN_INT:
+    if (x >= 0x80000000u+neg) fmt = STRSCAN_U32;
+    /* fallthrough */
+  case STRSCAN_U32:
+    if ((x >> 32)) return STRSCAN_ERROR;
+    o->i = neg ? -(int32_t)x : (int32_t)x;
+    break;
+  default:
+  case STRSCAN_I64:
+  case STRSCAN_U64:
+    o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+    break;
+  }
+  return fmt;
+}
+
+/* Parse decimal number. */
+static StrScanFmt strscan_dec(const uint8_t *p, TValue *o,
+			      StrScanFmt fmt, uint32_t opt,
+			      int32_t ex10, int32_t neg, uint32_t dig)
+{
+  uint8_t xi[STRSCAN_DDIG], *xip = xi;
+
+  if (dig) {
+    uint32_t i = dig;
+    if (i > STRSCAN_MAXDIG) {
+      ex10 += (int32_t)(i - STRSCAN_MAXDIG);
+      i = STRSCAN_MAXDIG;
+    }
+    /* Scan unaligned leading digit. */
+    if (((ex10^i) & 1))
+      *xip++ = ((*p != '.' ? *p : *++p) & 15), i--, p++;
+    /* Scan aligned double-digits. */
+    for ( ; i > 1; i -= 2) {
+      uint32_t d = 10 * ((*p != '.' ? *p : *++p) & 15); p++;
+      *xip++ = d + ((*p != '.' ? *p : *++p) & 15); p++;
+    }
+    /* Scan and realign trailing digit. */
+    if (i) *xip++ = 10 * ((*p != '.' ? *p : *++p) & 15), ex10--, dig++, p++;
+
+    /* Summarize rounding-effect of excess digits. */
+    if (dig > STRSCAN_MAXDIG) {
+      do {
+	if ((*p != '.' ? *p : *++p) != '0') { xip[-1] |= 1; break; }
+	p++;
+      } while (--dig > STRSCAN_MAXDIG);
+      dig = STRSCAN_MAXDIG;
+    } else {  /* Simplify exponent. */
+      while (ex10 > 0 && dig <= 18) *xip++ = 0, ex10 -= 2, dig += 2;
+    }
+  } else {  /* Only got zeros. */
+    ex10 = 0;
+    xi[0] = 0;
+  }
+
+  /* Fast path for numbers in integer format (but handles e.g. 1e6, too). */
+  if (dig <= 20 && ex10 == 0) {
+    uint8_t *xis;
+    uint64_t x = xi[0];
+    double n;
+    for (xis = xi+1; xis < xip; xis++) x = x * 100 + *xis;
+    if (!(dig == 20 && (xi[0] > 18 || (int64_t)x >= 0))) {  /* No overflow? */
+      /* Format-specific handling. */
+      switch (fmt) {
+      case STRSCAN_INT:
+	if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
+	  o->i = neg ? -(int32_t)x : (int32_t)x;
+	  return STRSCAN_INT;  /* Fast path for 32 bit integers. */
+	}
+	if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; goto plainnumber; }
+	/* fallthrough */
+      case STRSCAN_U32:
+	if ((x >> 32) != 0) return STRSCAN_ERROR;
+	o->i = neg ? -(int32_t)x : (int32_t)x;
+	return STRSCAN_U32;
+      case STRSCAN_I64:
+      case STRSCAN_U64:
+	o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+	return fmt;
+      default:
+      plainnumber:  /* Fast path for plain numbers < 2^63. */
+	if ((int64_t)x < 0) break;
+	n = (double)(int64_t)x;
+	if (neg) n = -n;
+	o->n = n;
+	return fmt;
+      }
+    }
+  }
+
+  /* Slow non-integer path. */
+  if (fmt == STRSCAN_INT) {
+    if ((opt & STRSCAN_OPT_C)) return STRSCAN_ERROR;
+    fmt = STRSCAN_NUM;
+  } else if (fmt > STRSCAN_INT) {
+    return STRSCAN_ERROR;
+  }
+  {
+    uint32_t hi = 0, lo = (uint32_t)(xip-xi);
+    int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1);
+
+    lua_assert(lo > 0 && (ex10 & 1) == 0);
+
+    /* Handle simple overflow/underflow. */
+    if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; }
+    else if (idig < -326/2) { o->n = neg ? -0.0 : 0.0; return fmt; }
+
+    /* Scale up until we have at least 17 or 18 integer part digits. */
+    while (idig < 9 && idig < DLEN(lo, hi)) {
+      uint32_t i, cy = 0;
+      ex2 -= 6;
+      for (i = DPREV(lo); ; i = DPREV(i)) {
+	uint32_t d = (xi[i] << 6) + cy;
+	cy = (((d >> 2) * 5243) >> 17); d = d - cy * 100;  /* Div/mod 100. */
+	xi[i] = (uint8_t)d;
+	if (i == hi) break;
+	if (d == 0 && i == DPREV(lo)) lo = i;
+      }
+      if (cy) {
+	hi = DPREV(hi);
+	if (xi[DPREV(lo)] == 0) lo = DPREV(lo);
+	else if (hi == lo) { lo = DPREV(lo); xi[DPREV(lo)] |= xi[lo]; }
+	xi[hi] = (uint8_t)cy; idig++;
+      }
+    }
+
+    /* Scale down until no more than 17 or 18 integer part digits remain. */
+    while (idig > 9) {
+      uint32_t i = hi, cy = 0;
+      ex2 += 6;
+      do {
+	cy += xi[i];
+	xi[i] = (cy >> 6);
+	cy = 100 * (cy & 0x3f);
+	if (xi[i] == 0 && i == hi) hi = DNEXT(hi), idig--;
+	i = DNEXT(i);
+      } while (i != lo);
+      while (cy) {
+	if (hi == lo) { xi[DPREV(lo)] |= 1; break; }
+	xi[lo] = (cy >> 6); lo = DNEXT(lo);
+	cy = 100 * (cy & 0x3f);
+      }
+    }
+
+    /* Collect integer part digits and convert to rescaled double. */
+    {
+      uint64_t x = xi[hi];
+      uint32_t i;
+      for (i = DNEXT(hi); --idig > 0 && i != lo; i = DNEXT(i))
+	x = x * 100 + xi[i];
+      if (i == lo) {
+	while (--idig >= 0) x = x * 100;
+      } else {  /* Gather round bit from remaining digits. */
+	x <<= 1; ex2--;
+	do {
+	  if (xi[i]) { x |= 1; break; }
+	  i = DNEXT(i);
+	} while (i != lo);
+      }
+      strscan_double(x, o, ex2, neg);
+    }
+  }
+  return fmt;
+}
+
+/* Scan string containing a number. Returns format. Returns value in o. */
+StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
+{
+  int32_t neg = 0;
+
+  /* Remove leading space, parse sign and non-numbers. */
+  if (LJ_UNLIKELY(!lj_char_isdigit(*p))) {
+    while (lj_char_isspace(*p)) p++;
+    if (*p == '+' || *p == '-') neg = (*p++ == '-');
+    if (LJ_UNLIKELY(*p >= 'A')) {  /* Parse "inf", "infinity" or "nan". */
+      TValue tmp;
+      setnanV(&tmp);
+      if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'f')) {
+	if (neg) setminfV(&tmp); else setpinfV(&tmp);
+	p += 3;
+	if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'i') &&
+	    casecmp(p[3],'t') && casecmp(p[4],'y')) p += 5;
+      } else if (casecmp(p[0],'n') && casecmp(p[1],'a') && casecmp(p[2],'n')) {
+	p += 3;
+      }
+      while (lj_char_isspace(*p)) p++;
+      if (*p) return STRSCAN_ERROR;
+      o->u64 = tmp.u64;
+      return STRSCAN_NUM;
+    }
+  }
+
+  /* Parse regular number. */
+  {
+    StrScanFmt fmt = STRSCAN_INT;
+    int cmask = LJ_CHAR_DIGIT;
+    int base = (opt & STRSCAN_OPT_C) && *p == '0' ? 0 : 10;
+    const uint8_t *sp, *dp = NULL;
+    uint32_t dig = 0, hasdig = 0, x = 0;
+    int32_t ex = 0;
+
+    /* Determine base and skip leading zeros. */
+    if (LJ_UNLIKELY(*p <= '0')) {
+      if (*p == '0' && casecmp(p[1], 'x'))
+	base = 16, cmask = LJ_CHAR_XDIGIT, p += 2;
+      for ( ; ; p++) {
+	if (*p == '0') {
+	  hasdig = 1;
+	} else if (*p == '.') {
+	  if (dp) return STRSCAN_ERROR;
+	  dp = p;
+	} else {
+	  break;
+	}
+      }
+    }
+
+    /* Preliminary digit and decimal point scan. */
+    for (sp = p; ; p++) {
+      if (LJ_LIKELY(lj_char_isa(*p, cmask))) {
+	x = x * 10 + (*p & 15);  /* For fast path below. */
+	dig++;
+      } else if (*p == '.') {
+	if (dp) return STRSCAN_ERROR;
+	dp = p;
+      } else {
+	break;
+      }
+    }
+    if (!(hasdig | dig)) return STRSCAN_ERROR;
+
+    /* Handle decimal point. */
+    if (dp) {
+      fmt = STRSCAN_NUM;
+      if (dig) {
+	ex = (int32_t)(dp-(p-1)); dp = p-1;
+	while (ex < 0 && *dp-- == '0') ex++, dig--;  /* Skip trailing zeros. */
+	if (base == 16) ex *= 4;
+      }
+    }
+
+    /* Parse exponent. */
+    if (casecmp(*p, (uint32_t)(base == 16 ? 'p' : 'e'))) {
+      uint32_t xx;
+      int negx = 0;
+      fmt = STRSCAN_NUM; p++;
+      if (*p == '+' || *p == '-') negx = (*p++ == '-');
+      if (!lj_char_isdigit(*p)) return STRSCAN_ERROR;
+      xx = (*p++ & 15);
+      while (lj_char_isdigit(*p)) {
+	if (xx < 65536) xx = xx * 10 + (*p & 15);
+	p++;
+      }
+      ex += negx ? -(int32_t)xx : (int32_t)xx;
+    }
+
+    /* Parse suffix. */
+    if (*p) {
+      /* I (IMAG), U (U32), LL (I64), ULL/LLU (U64), L (long), UL/LU (ulong). */
+      /* NYI: f (float). Not needed until cp_number() handles non-integers. */
+      if (casecmp(*p, 'i')) {
+	if (!(opt & STRSCAN_OPT_IMAG)) return STRSCAN_ERROR;
+	p++; fmt = STRSCAN_IMAG;
+      } else if (fmt == STRSCAN_INT) {
+	if (casecmp(*p, 'u')) p++, fmt = STRSCAN_U32;
+	if (casecmp(*p, 'l')) {
+	  p++;
+	  if (casecmp(*p, 'l')) p++, fmt += STRSCAN_I64 - STRSCAN_INT;
+	  else if (!(opt & STRSCAN_OPT_C)) return STRSCAN_ERROR;
+	  else if (sizeof(long) == 8) fmt += STRSCAN_I64 - STRSCAN_INT;
+	}
+	if (casecmp(*p, 'u') && (fmt == STRSCAN_INT || fmt == STRSCAN_I64))
+	  p++, fmt += STRSCAN_U32 - STRSCAN_INT;
+	if ((fmt == STRSCAN_U32 && !(opt & STRSCAN_OPT_C)) ||
+	    (fmt >= STRSCAN_I64 && !(opt & STRSCAN_OPT_LL)))
+	  return STRSCAN_ERROR;
+      }
+      while (lj_char_isspace(*p)) p++;
+      if (*p) return STRSCAN_ERROR;
+    }
+
+    /* Fast path for decimal 32 bit integers. */
+    if (fmt == STRSCAN_INT && base == 10 &&
+	(dig < 10 || (dig == 10 && *sp <= '2' && x < 0x80000000u+neg))) {
+      int32_t y = neg ? -(int32_t)x : (int32_t)x;
+      if ((opt & STRSCAN_OPT_TONUM)) {
+	o->n = (double)y;
+	return STRSCAN_NUM;
+      } else {
+	o->i = y;
+	return STRSCAN_INT;
+      }
+    }
+
+    /* Dispatch to base-specific parser. */
+    if (base == 0 && !(fmt == STRSCAN_NUM || fmt == STRSCAN_IMAG))
+      return strscan_oct(sp, o, fmt, neg, dig);
+    if (base == 16)
+      fmt = strscan_hex(sp, o, fmt, opt, ex, neg, dig);
+    else
+      fmt = strscan_dec(sp, o, fmt, opt, ex, neg, dig);
+
+    /* Try to convert number to integer, if requested. */
+    if (fmt == STRSCAN_NUM && (opt & STRSCAN_OPT_TOINT)) {
+      double n = o->n;
+      int32_t i = lj_num2int(n);
+      if (n == (lua_Number)i) { o->i = i; return STRSCAN_INT; }
+    }
+    return fmt;
+  }
+}
+
+int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o)
+{
+  StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o,
+				   STRSCAN_OPT_TONUM);
+  lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM);
+  return (fmt != STRSCAN_ERROR);
+}
+
+#if LJ_DUALNUM
+int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o)
+{
+  StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o,
+				   STRSCAN_OPT_TOINT);
+  lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT);
+  if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM);
+  return (fmt != STRSCAN_ERROR);
+}
+#endif
+
+#undef DNEXT
+#undef DPREV
+#undef DLEN
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_strscan.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_strscan.h b/lib/luajit/src/lj_strscan.h
new file mode 100644
index 0000000..7760689
--- /dev/null
+++ b/lib/luajit/src/lj_strscan.h
@@ -0,0 +1,39 @@
+/*
+** String scanning.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STRSCAN_H
+#define _LJ_STRSCAN_H
+
+#include "lj_obj.h"
+
+/* Options for accepted/returned formats. */
+#define STRSCAN_OPT_TOINT	0x01  /* Convert to int32_t, if possible. */
+#define STRSCAN_OPT_TONUM	0x02  /* Always convert to double. */
+#define STRSCAN_OPT_IMAG	0x04
+#define STRSCAN_OPT_LL		0x08
+#define STRSCAN_OPT_C		0x10
+
+/* Returned format. */
+typedef enum {
+  STRSCAN_ERROR,
+  STRSCAN_NUM, STRSCAN_IMAG,
+  STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64,
+} StrScanFmt;
+
+LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt);
+LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o);
+#if LJ_DUALNUM
+LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o);
+#else
+#define lj_strscan_number(s, o)		lj_strscan_num((s), (o))
+#endif
+
+/* Check for number or convert string to number/int in-place (!). */
+static LJ_AINLINE int lj_strscan_numberobj(TValue *o)
+{
+  return tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), o));
+}
+
+#endif