You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2015/07/29 01:39:54 UTC
[16/62] [abbrv] trafficserver git commit: TS-3783 TS-3030 Add luajit
v2.0.4 as a subtree
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_ir.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_ir.h b/lib/luajit/src/lj_ir.h
new file mode 100644
index 0000000..8126482
--- /dev/null
+++ b/lib/luajit/src/lj_ir.h
@@ -0,0 +1,551 @@
+/*
+** SSA IR (Intermediate Representation) format.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IR_H
+#define _LJ_IR_H
+
+#include "lj_obj.h"
+
+/* -- IR instructions ----------------------------------------------------- */
+
+/* IR instruction definition. Order matters, see below. ORDER IR */
+#define IRDEF(_) \
+ /* Guarded assertions. */ \
+ /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \
+ _(LT, N , ref, ref) \
+ _(GE, N , ref, ref) \
+ _(LE, N , ref, ref) \
+ _(GT, N , ref, ref) \
+ \
+ _(ULT, N , ref, ref) \
+ _(UGE, N , ref, ref) \
+ _(ULE, N , ref, ref) \
+ _(UGT, N , ref, ref) \
+ \
+ _(EQ, C , ref, ref) \
+ _(NE, C , ref, ref) \
+ \
+ _(ABC, N , ref, ref) \
+ _(RETF, S , ref, ref) \
+ \
+ /* Miscellaneous ops. */ \
+ _(NOP, N , ___, ___) \
+ _(BASE, N , lit, lit) \
+ _(PVAL, N , lit, ___) \
+ _(GCSTEP, S , ___, ___) \
+ _(HIOP, S , ref, ref) \
+ _(LOOP, S , ___, ___) \
+ _(USE, S , ref, ___) \
+ _(PHI, S , ref, ref) \
+ _(RENAME, S , ref, lit) \
+ \
+ /* Constants. */ \
+ _(KPRI, N , ___, ___) \
+ _(KINT, N , cst, ___) \
+ _(KGC, N , cst, ___) \
+ _(KPTR, N , cst, ___) \
+ _(KKPTR, N , cst, ___) \
+ _(KNULL, N , cst, ___) \
+ _(KNUM, N , cst, ___) \
+ _(KINT64, N , cst, ___) \
+ _(KSLOT, N , ref, lit) \
+ \
+ /* Bit ops. */ \
+ _(BNOT, N , ref, ___) \
+ _(BSWAP, N , ref, ___) \
+ _(BAND, C , ref, ref) \
+ _(BOR, C , ref, ref) \
+ _(BXOR, C , ref, ref) \
+ _(BSHL, N , ref, ref) \
+ _(BSHR, N , ref, ref) \
+ _(BSAR, N , ref, ref) \
+ _(BROL, N , ref, ref) \
+ _(BROR, N , ref, ref) \
+ \
+ /* Arithmetic ops. ORDER ARITH */ \
+ _(ADD, C , ref, ref) \
+ _(SUB, N , ref, ref) \
+ _(MUL, C , ref, ref) \
+ _(DIV, N , ref, ref) \
+ _(MOD, N , ref, ref) \
+ _(POW, N , ref, ref) \
+ _(NEG, N , ref, ref) \
+ \
+ _(ABS, N , ref, ref) \
+ _(ATAN2, N , ref, ref) \
+ _(LDEXP, N , ref, ref) \
+ _(MIN, C , ref, ref) \
+ _(MAX, C , ref, ref) \
+ _(FPMATH, N , ref, lit) \
+ \
+ /* Overflow-checking arithmetic ops. */ \
+ _(ADDOV, CW, ref, ref) \
+ _(SUBOV, NW, ref, ref) \
+ _(MULOV, CW, ref, ref) \
+ \
+ /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \
+ \
+ /* Memory references. */ \
+ _(AREF, R , ref, ref) \
+ _(HREFK, R , ref, ref) \
+ _(HREF, L , ref, ref) \
+ _(NEWREF, S , ref, ref) \
+ _(UREFO, LW, ref, lit) \
+ _(UREFC, LW, ref, lit) \
+ _(FREF, R , ref, lit) \
+ _(STRREF, N , ref, ref) \
+ \
+ /* Loads and Stores. These must be in the same order. */ \
+ _(ALOAD, L , ref, ___) \
+ _(HLOAD, L , ref, ___) \
+ _(ULOAD, L , ref, ___) \
+ _(FLOAD, L , ref, lit) \
+ _(XLOAD, L , ref, lit) \
+ _(SLOAD, L , lit, lit) \
+ _(VLOAD, L , ref, ___) \
+ \
+ _(ASTORE, S , ref, ref) \
+ _(HSTORE, S , ref, ref) \
+ _(USTORE, S , ref, ref) \
+ _(FSTORE, S , ref, ref) \
+ _(XSTORE, S , ref, ref) \
+ \
+ /* Allocations. */ \
+ _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \
+ _(XSNEW, A , ref, ref) \
+ _(TNEW, AW, lit, lit) \
+ _(TDUP, AW, ref, ___) \
+ _(CNEW, AW, ref, ref) \
+ _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \
+ \
+ /* Barriers. */ \
+ _(TBAR, S , ref, ___) \
+ _(OBAR, S , ref, ref) \
+ _(XBAR, S , ___, ___) \
+ \
+ /* Type conversions. */ \
+ _(CONV, NW, ref, lit) \
+ _(TOBIT, N , ref, ref) \
+ _(TOSTR, N , ref, ___) \
+ _(STRTO, N , ref, ___) \
+ \
+ /* Calls. */ \
+ _(CALLN, N , ref, lit) \
+ _(CALLL, L , ref, lit) \
+ _(CALLS, S , ref, lit) \
+ _(CALLXS, S , ref, ref) \
+ _(CARG, N , ref, ref) \
+ \
+ /* End of list. */
+
+/* IR opcodes (max. 256). */
+typedef enum {
+#define IRENUM(name, m, m1, m2) IR_##name,
+IRDEF(IRENUM)
+#undef IRENUM
+ IR__MAX
+} IROp;
+
+/* Stored opcode. */
+typedef uint8_t IROp1;
+
+LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE);
+LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE);
+LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT);
+LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT);
+LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT);
+
+/* Delta between xLOAD and xSTORE. */
+#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD)
+
+LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE);
+LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE);
+LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE);
+LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE);
+
+/* -- Named IR literals --------------------------------------------------- */
+
+/* FPMATH sub-functions. ORDER FPM. */
+#define IRFPMDEF(_) \
+ _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \
+ _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \
+ _(SIN) _(COS) _(TAN) \
+ _(OTHER)
+
+typedef enum {
+#define FPMENUM(name) IRFPM_##name,
+IRFPMDEF(FPMENUM)
+#undef FPMENUM
+ IRFPM__MAX
+} IRFPMathOp;
+
+/* FLOAD fields. */
+#define IRFLDEF(_) \
+ _(STR_LEN, offsetof(GCstr, len)) \
+ _(FUNC_ENV, offsetof(GCfunc, l.env)) \
+ _(FUNC_PC, offsetof(GCfunc, l.pc)) \
+ _(TAB_META, offsetof(GCtab, metatable)) \
+ _(TAB_ARRAY, offsetof(GCtab, array)) \
+ _(TAB_NODE, offsetof(GCtab, node)) \
+ _(TAB_ASIZE, offsetof(GCtab, asize)) \
+ _(TAB_HMASK, offsetof(GCtab, hmask)) \
+ _(TAB_NOMM, offsetof(GCtab, nomm)) \
+ _(UDATA_META, offsetof(GCudata, metatable)) \
+ _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \
+ _(UDATA_FILE, sizeof(GCudata)) \
+ _(CDATA_CTYPEID, offsetof(GCcdata, ctypeid)) \
+ _(CDATA_PTR, sizeof(GCcdata)) \
+ _(CDATA_INT, sizeof(GCcdata)) \
+ _(CDATA_INT64, sizeof(GCcdata)) \
+ _(CDATA_INT64_4, sizeof(GCcdata) + 4)
+
+typedef enum {
+#define FLENUM(name, ofs) IRFL_##name,
+IRFLDEF(FLENUM)
+#undef FLENUM
+ IRFL__MAX
+} IRFieldID;
+
+/* SLOAD mode bits, stored in op2. */
+#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */
+#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */
+#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */
+#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */
+#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */
+#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */
+
+/* XLOAD mode, stored in op2. */
+#define IRXLOAD_READONLY 1 /* Load from read-only data. */
+#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */
+#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */
+
+/* CONV mode, stored in op2. */
+#define IRCONV_SRCMASK 0x001f /* Source IRType. */
+#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
+#define IRCONV_DSH 5
+#define IRCONV_NUM_INT ((IRT_NUM<<IRCONV_DSH)|IRT_INT)
+#define IRCONV_INT_NUM ((IRT_INT<<IRCONV_DSH)|IRT_NUM)
+#define IRCONV_TRUNC 0x0400 /* Truncate number to integer. */
+#define IRCONV_SEXT 0x0800 /* Sign-extend integer to integer. */
+#define IRCONV_MODEMASK 0x0fff
+#define IRCONV_CONVMASK 0xf000
+#define IRCONV_CSH 12
+/* Number to integer conversion mode. Ordered by strength of the checks. */
+#define IRCONV_TOBIT (0<<IRCONV_CSH) /* None. Cache only: TOBIT conv. */
+#define IRCONV_ANY (1<<IRCONV_CSH) /* Any FP number is ok. */
+#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */
+#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */
+
+/* -- IR operands --------------------------------------------------------- */
+
+/* IR operand mode (2 bit). */
+typedef enum {
+ IRMref, /* IR reference. */
+ IRMlit, /* 16 bit unsigned literal. */
+ IRMcst, /* Constant literal: i, gcr or ptr. */
+ IRMnone /* Unused operand. */
+} IRMode;
+#define IRM___ IRMnone
+
+/* Mode bits: Commutative, {Normal/Ref, Alloc, Load, Store}, Non-weak guard. */
+#define IRM_C 0x10
+
+#define IRM_N 0x00
+#define IRM_R IRM_N
+#define IRM_A 0x20
+#define IRM_L 0x40
+#define IRM_S 0x60
+
+#define IRM_W 0x80
+
+#define IRM_NW (IRM_N|IRM_W)
+#define IRM_CW (IRM_C|IRM_W)
+#define IRM_AW (IRM_A|IRM_W)
+#define IRM_LW (IRM_L|IRM_W)
+
+#define irm_op1(m) ((IRMode)((m)&3))
+#define irm_op2(m) ((IRMode)(((m)>>2)&3))
+#define irm_iscomm(m) ((m) & IRM_C)
+#define irm_kind(m) ((m) & IRM_S)
+
+#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W),
+
+LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1];
+
+/* -- IR instruction types ------------------------------------------------ */
+
+/* Map of itypes to non-negative numbers. ORDER LJ_T.
+** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for
+** IRT_P32 and IRT_P64, which never escape the IR.
+** The various integers are only used in the IR and can only escape to
+** a TValue after implicit or explicit conversion. Their types must be
+** contiguous and next to IRT_NUM (see the typerange macros below).
+*/
+#define IRTDEF(_) \
+ _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) _(STR, 4) \
+ _(P32, 4) _(THREAD, 4) _(PROTO, 4) _(FUNC, 4) _(P64, 8) _(CDATA, 4) \
+ _(TAB, 4) _(UDATA, 4) \
+ _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \
+ _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \
+ _(SOFTFP, 4) /* There is room for 9 more types. */
+
+/* IR result type and flags (8 bit). */
+typedef enum {
+#define IRTENUM(name, size) IRT_##name,
+IRTDEF(IRTENUM)
+#undef IRTENUM
+ IRT__MAX,
+
+ /* Native pointer type and the corresponding integer type. */
+ IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32,
+ IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT,
+ IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32,
+
+ /* Additional flags. */
+ IRT_MARK = 0x20, /* Marker for misc. purposes. */
+ IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */
+ IRT_GUARD = 0x80, /* Instruction is a guard. */
+
+ /* Masks. */
+ IRT_TYPE = 0x1f,
+ IRT_T = 0xff
+} IRType;
+
+#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE)
+
+/* Stored IRType. */
+typedef struct IRType1 { uint8_t irt; } IRType1;
+
+#define IRT(o, t) ((uint32_t)(((o)<<8) | (t)))
+#define IRTI(o) (IRT((o), IRT_INT))
+#define IRTN(o) (IRT((o), IRT_NUM))
+#define IRTG(o, t) (IRT((o), IRT_GUARD|(t)))
+#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT))
+
+#define irt_t(t) ((IRType)(t).irt)
+#define irt_type(t) ((IRType)((t).irt & IRT_TYPE))
+#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0)
+#define irt_typerange(t, first, last) \
+ ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first))
+
+#define irt_isnil(t) (irt_type(t) == IRT_NIL)
+#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE)
+#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD)
+#define irt_isstr(t) (irt_type(t) == IRT_STR)
+#define irt_istab(t) (irt_type(t) == IRT_TAB)
+#define irt_iscdata(t) (irt_type(t) == IRT_CDATA)
+#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT)
+#define irt_isnum(t) (irt_type(t) == IRT_NUM)
+#define irt_isint(t) (irt_type(t) == IRT_INT)
+#define irt_isi8(t) (irt_type(t) == IRT_I8)
+#define irt_isu8(t) (irt_type(t) == IRT_U8)
+#define irt_isi16(t) (irt_type(t) == IRT_I16)
+#define irt_isu16(t) (irt_type(t) == IRT_U16)
+#define irt_isu32(t) (irt_type(t) == IRT_U32)
+#define irt_isi64(t) (irt_type(t) == IRT_I64)
+#define irt_isu64(t) (irt_type(t) == IRT_U64)
+
+#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t))
+#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT))
+#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA))
+#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
+#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64))
+
+#if LJ_64
+#define IRT_IS64 \
+ ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|(1u<<IRT_LIGHTUD))
+#else
+#define IRT_IS64 \
+ ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64))
+#endif
+
+#define irt_is64(t) ((IRT_IS64 >> irt_type(t)) & 1)
+#define irt_is64orfp(t) (((IRT_IS64|(1u<<IRT_FLOAT))>>irt_type(t)) & 1)
+
+#define irt_size(t) (lj_ir_type_size[irt_t((t))])
+
+LJ_DATA const uint8_t lj_ir_type_size[];
+
+static LJ_AINLINE IRType itype2irt(const TValue *tv)
+{
+ if (tvisint(tv))
+ return IRT_INT;
+ else if (tvisnum(tv))
+ return IRT_NUM;
+#if LJ_64
+ else if (tvislightud(tv))
+ return IRT_LIGHTUD;
+#endif
+ else
+ return (IRType)~itype(tv);
+}
+
+static LJ_AINLINE uint32_t irt_toitype_(IRType t)
+{
+ lua_assert(!LJ_64 || t != IRT_LIGHTUD);
+ if (LJ_DUALNUM && t > IRT_NUM) {
+ return LJ_TISNUM;
+ } else {
+ lua_assert(t <= IRT_NUM);
+ return ~(uint32_t)t;
+ }
+}
+
+#define irt_toitype(t) irt_toitype_(irt_type((t)))
+
+#define irt_isguard(t) ((t).irt & IRT_GUARD)
+#define irt_ismarked(t) ((t).irt & IRT_MARK)
+#define irt_setmark(t) ((t).irt |= IRT_MARK)
+#define irt_clearmark(t) ((t).irt &= ~IRT_MARK)
+#define irt_isphi(t) ((t).irt & IRT_ISPHI)
+#define irt_setphi(t) ((t).irt |= IRT_ISPHI)
+#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI)
+
+/* Stored combined IR opcode and type. */
+typedef uint16_t IROpT;
+
+/* -- IR references ------------------------------------------------------- */
+
+/* IR references. */
+typedef uint16_t IRRef1; /* One stored reference. */
+typedef uint32_t IRRef2; /* Two stored references. */
+typedef uint32_t IRRef; /* Used to pass around references. */
+
+/* Fixed references. */
+enum {
+ REF_BIAS = 0x8000,
+ REF_TRUE = REF_BIAS-3,
+ REF_FALSE = REF_BIAS-2,
+ REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */
+ REF_BASE = REF_BIAS, /* /--- IR grows upwards. */
+ REF_FIRST = REF_BIAS+1,
+ REF_DROP = 0xffff
+};
+
+/* Note: IRMlit operands must be < REF_BIAS, too!
+** This allows for fast and uniform manipulation of all operands
+** without looking up the operand mode in lj_ir_mode:
+** - CSE calculates the maximum reference of two operands.
+** This must work with mixed reference/literal operands, too.
+** - DCE marking only checks for operand >= REF_BIAS.
+** - LOOP needs to substitute reference operands.
+** Constant references and literals must not be modified.
+*/
+
+#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16))
+
+#define irref_isk(ref) ((ref) < REF_BIAS)
+
+/* Tagged IR references (32 bit).
+**
+** +-------+-------+---------------+
+** | irt | flags | ref |
+** +-------+-------+---------------+
+**
+** The tag holds a copy of the IRType and speeds up IR type checks.
+*/
+typedef uint32_t TRef;
+
+#define TREF_REFMASK 0x0000ffff
+#define TREF_FRAME 0x00010000
+#define TREF_CONT 0x00020000
+
+#define TREF(ref, t) ((TRef)((ref) + ((t)<<24)))
+
+#define tref_ref(tr) ((IRRef1)(tr))
+#define tref_t(tr) ((IRType)((tr)>>24))
+#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE))
+#define tref_typerange(tr, first, last) \
+ ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first))
+
+#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24))
+#define tref_isnil(tr) (tref_istype((tr), IRT_NIL))
+#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE))
+#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE))
+#define tref_isstr(tr) (tref_istype((tr), IRT_STR))
+#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC))
+#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA))
+#define tref_istab(tr) (tref_istype((tr), IRT_TAB))
+#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA))
+#define tref_isnum(tr) (tref_istype((tr), IRT_NUM))
+#define tref_isint(tr) (tref_istype((tr), IRT_INT))
+
+#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE))
+#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE))
+#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE))
+#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT))
+#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT))
+#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr)))
+#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA))
+
+#define tref_isk(tr) (irref_isk(tref_ref((tr))))
+#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2))))
+
+#define TREF_PRI(t) (TREF(REF_NIL-(t), (t)))
+#define TREF_NIL (TREF_PRI(IRT_NIL))
+#define TREF_FALSE (TREF_PRI(IRT_FALSE))
+#define TREF_TRUE (TREF_PRI(IRT_TRUE))
+
+/* -- IR format ----------------------------------------------------------- */
+
+/* IR instruction format (64 bit).
+**
+** 16 16 8 8 8 8
+** +-------+-------+---+---+---+---+
+** | op1 | op2 | t | o | r | s |
+** +-------+-------+---+---+---+---+
+** | op12/i/gco | ot | prev | (alternative fields in union)
+** +---------------+-------+-------+
+** 32 16 16
+**
+** prev is only valid prior to register allocation and then reused for r + s.
+*/
+
+typedef union IRIns {
+ struct {
+ LJ_ENDIAN_LOHI(
+ IRRef1 op1; /* IR operand 1. */
+ , IRRef1 op2; /* IR operand 2. */
+ )
+ IROpT ot; /* IR opcode and type (overlaps t and o). */
+ IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */
+ };
+ struct {
+ IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */
+ LJ_ENDIAN_LOHI(
+ IRType1 t; /* IR type. */
+ , IROp1 o; /* IR opcode. */
+ )
+ LJ_ENDIAN_LOHI(
+ uint8_t r; /* Register allocation (overlaps prev). */
+ , uint8_t s; /* Spill slot allocation (overlaps prev). */
+ )
+ };
+ int32_t i; /* 32 bit signed integer literal (overlaps op12). */
+ GCRef gcr; /* GCobj constant (overlaps op12). */
+ MRef ptr; /* Pointer constant (overlaps op12). */
+} IRIns;
+
+#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr))
+#define ir_kstr(ir) (gco2str(ir_kgc((ir))))
+#define ir_ktab(ir) (gco2tab(ir_kgc((ir))))
+#define ir_kfunc(ir) (gco2func(ir_kgc((ir))))
+#define ir_kcdata(ir) (gco2cd(ir_kgc((ir))))
+#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue))
+#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
+#define ir_k64(ir) \
+ check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
+#define ir_kptr(ir) \
+ check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void))
+
+/* A store or any other op with a non-weak guard has a side-effect. */
+static LJ_AINLINE int ir_sideeff(IRIns *ir)
+{
+ return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S);
+}
+
+LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W);
+
+#endif
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_ircall.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_ircall.h b/lib/luajit/src/lj_ircall.h
new file mode 100644
index 0000000..893dac2
--- /dev/null
+++ b/lib/luajit/src/lj_ircall.h
@@ -0,0 +1,277 @@
+/*
+** IR CALL* instruction definitions.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IRCALL_H
+#define _LJ_IRCALL_H
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+
+/* C call info for CALL* instructions. */
+typedef struct CCallInfo {
+ ASMFunction func; /* Function pointer. */
+ uint32_t flags; /* Number of arguments and flags. */
+} CCallInfo;
+
+#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* Extract # of args. */
+#define CCI_NARGS_MAX 32 /* Max. # of args. */
+
+#define CCI_OTSHIFT 16
+#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */
+#define CCI_OPSHIFT 24
+#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */
+
+#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT)
+#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT)
+#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT)
+#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL)
+#define CCI_CALL_FL (CCI_CALL_L|CCI_CC_FASTCALL)
+#define CCI_CALL_FS (CCI_CALL_S|CCI_CC_FASTCALL)
+
+/* C call info flags. */
+#define CCI_L 0x0100 /* Implicit L arg. */
+#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */
+#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */
+#define CCI_VARARG 0x0800 /* Vararg function. */
+
+#define CCI_CC_MASK 0x3000 /* Calling convention mask. */
+#define CCI_CC_SHIFT 12
+/* ORDER CC */
+#define CCI_CC_CDECL 0x0000 /* Default cdecl calling convention. */
+#define CCI_CC_THISCALL 0x1000 /* Thiscall calling convention. */
+#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */
+#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */
+
+/* Helpers for conditional function definitions. */
+#define IRCALLCOND_ANY(x) x
+
+#if LJ_TARGET_X86ORX64
+#define IRCALLCOND_FPMATH(x) NULL
+#else
+#define IRCALLCOND_FPMATH(x) x
+#endif
+
+#if LJ_SOFTFP
+#define IRCALLCOND_SOFTFP(x) x
+#if LJ_HASFFI
+#define IRCALLCOND_SOFTFP_FFI(x) x
+#else
+#define IRCALLCOND_SOFTFP_FFI(x) NULL
+#endif
+#else
+#define IRCALLCOND_SOFTFP(x) NULL
+#define IRCALLCOND_SOFTFP_FFI(x) NULL
+#endif
+
+#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS)
+
+#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
+#define IRCALLCOND_FP64_FFI(x) x
+#else
+#define IRCALLCOND_FP64_FFI(x) NULL
+#endif
+
+#if LJ_HASFFI
+#define IRCALLCOND_FFI(x) x
+#if LJ_32
+#define IRCALLCOND_FFI32(x) x
+#else
+#define IRCALLCOND_FFI32(x) NULL
+#endif
+#else
+#define IRCALLCOND_FFI(x) NULL
+#define IRCALLCOND_FFI32(x) NULL
+#endif
+
+#if LJ_TARGET_X86
+#define CCI_RANDFPR 0 /* Clang on OSX/x86 is overzealous. */
+#else
+#define CCI_RANDFPR CCI_NOFPRCLOBBER
+#endif
+
+#if LJ_SOFTFP
+#define ARG1_FP 2 /* Treat as 2 32 bit arguments. */
+#else
+#define ARG1_FP 1
+#endif
+
+#if LJ_32
+#define ARG2_64 4 /* Treat as 4 32 bit arguments. */
+#else
+#define ARG2_64 2
+#endif
+
+/* Function definitions for CALL* instructions. */
+#define IRCALLDEF(_) \
+ _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \
+ _(ANY, lj_str_new, 3, S, STR, CCI_L) \
+ _(ANY, lj_strscan_num, 2, FN, INT, 0) \
+ _(ANY, lj_str_fromint, 2, FN, STR, CCI_L) \
+ _(ANY, lj_str_fromnum, 2, FN, STR, CCI_L) \
+ _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \
+ _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \
+ _(ANY, lj_tab_newkey, 3, S, P32, CCI_L) \
+ _(ANY, lj_tab_len, 1, FL, INT, 0) \
+ _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \
+ _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \
+ _(ANY, lj_mem_newgco, 2, FS, P32, CCI_L) \
+ _(ANY, lj_math_random_step, 1, FS, NUM, CCI_CASTU64|CCI_RANDFPR)\
+ _(ANY, lj_vm_modi, 2, FN, INT, 0) \
+ _(ANY, sinh, ARG1_FP, N, NUM, 0) \
+ _(ANY, cosh, ARG1_FP, N, NUM, 0) \
+ _(ANY, tanh, ARG1_FP, N, NUM, 0) \
+ _(ANY, fputc, 2, S, INT, 0) \
+ _(ANY, fwrite, 4, S, INT, 0) \
+ _(ANY, fflush, 1, S, INT, 0) \
+ /* ORDER FPM */ \
+ _(FPMATH, lj_vm_floor, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_ceil, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_trunc, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, sqrt, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, exp, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_exp2, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, log, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_log2, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, log10, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, sin, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, cos, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, tan, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_powi, ARG1_FP+1, N, NUM, 0) \
+ _(FPMATH, pow, ARG1_FP*2, N, NUM, 0) \
+ _(FPMATH, atan2, ARG1_FP*2, N, NUM, 0) \
+ _(FPMATH, ldexp, ARG1_FP+1, N, NUM, 0) \
+ _(SOFTFP, lj_vm_tobit, 2, N, INT, 0) \
+ _(SOFTFP, softfp_add, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_sub, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_mul, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_div, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_cmp, 4, N, NIL, 0) \
+ _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \
+ _(SOFTFP, softfp_d2i, 2, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \
+ _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \
+ _(SOFTFP_FFI, softfp_d2ui, 2, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_d2f, 2, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \
+ _(FP64_FFI, fp64_l2d, 2, N, NUM, 0) \
+ _(FP64_FFI, fp64_ul2d, 2, N, NUM, 0) \
+ _(FP64_FFI, fp64_l2f, 2, N, FLOAT, 0) \
+ _(FP64_FFI, fp64_ul2f, 2, N, FLOAT, 0) \
+ _(FP64_FFI, fp64_d2l, ARG1_FP, N, I64, 0) \
+ _(FP64_FFI, fp64_d2ul, ARG1_FP, N, U64, 0) \
+ _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \
+ _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \
+ _(FFI, lj_carith_divi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_divu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_modi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_modu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_powi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_powu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_cdata_setfin, 2, FN, P32, CCI_L) \
+ _(FFI, strlen, 1, L, INTP, 0) \
+ _(FFI, memcpy, 3, S, PTR, 0) \
+ _(FFI, memset, 3, S, PTR, 0) \
+ _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_mul64, ARG2_64, N, I64, CCI_NOFPRCLOBBER)
+ \
+ /* End of list. */
+
+typedef enum {
+#define IRCALLENUM(cond, name, nargs, kind, type, flags) IRCALL_##name,
+IRCALLDEF(IRCALLENUM)
+#undef IRCALLENUM
+ IRCALL__MAX
+} IRCallID;
+
+LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...);
+
+LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1];
+
+/* Soft-float declarations. */
+#if LJ_SOFTFP
+#if LJ_TARGET_ARM
+#define softfp_add __aeabi_dadd
+#define softfp_sub __aeabi_dsub
+#define softfp_mul __aeabi_dmul
+#define softfp_div __aeabi_ddiv
+#define softfp_cmp __aeabi_cdcmple
+#define softfp_i2d __aeabi_i2d
+#define softfp_d2i __aeabi_d2iz
+#define softfp_ui2d __aeabi_ui2d
+#define softfp_f2d __aeabi_f2d
+#define softfp_d2ui __aeabi_d2uiz
+#define softfp_d2f __aeabi_d2f
+#define softfp_i2f __aeabi_i2f
+#define softfp_ui2f __aeabi_ui2f
+#define softfp_f2i __aeabi_f2iz
+#define softfp_f2ui __aeabi_f2uiz
+#define fp64_l2d __aeabi_l2d
+#define fp64_ul2d __aeabi_ul2d
+#define fp64_l2f __aeabi_l2f
+#define fp64_ul2f __aeabi_ul2f
+#if LJ_TARGET_IOS
+#define fp64_d2l __fixdfdi
+#define fp64_d2ul __fixunsdfdi
+#define fp64_f2l __fixsfdi
+#define fp64_f2ul __fixunssfdi
+#else
+#define fp64_d2l __aeabi_d2lz
+#define fp64_d2ul __aeabi_d2ulz
+#define fp64_f2l __aeabi_f2lz
+#define fp64_f2ul __aeabi_f2ulz
+#endif
+#else
+#error "Missing soft-float definitions for target architecture"
+#endif
+extern double softfp_add(double a, double b);
+extern double softfp_sub(double a, double b);
+extern double softfp_mul(double a, double b);
+extern double softfp_div(double a, double b);
+extern void softfp_cmp(double a, double b);
+extern double softfp_i2d(int32_t a);
+extern int32_t softfp_d2i(double a);
+#if LJ_HASFFI
+extern double softfp_ui2d(uint32_t a);
+extern double softfp_f2d(float a);
+extern uint32_t softfp_d2ui(double a);
+extern float softfp_d2f(double a);
+extern float softfp_i2f(int32_t a);
+extern float softfp_ui2f(uint32_t a);
+extern int32_t softfp_f2i(float a);
+extern uint32_t softfp_f2ui(float a);
+#endif
+#endif
+
+#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP)
+#ifdef __GNUC__
+#define fp64_l2d __floatdidf
+#define fp64_ul2d __floatundidf
+#define fp64_l2f __floatdisf
+#define fp64_ul2f __floatundisf
+#define fp64_d2l __fixdfdi
+#define fp64_d2ul __fixunsdfdi
+#define fp64_f2l __fixsfdi
+#define fp64_f2ul __fixunssfdi
+#else
+#error "Missing fp64 helper definitions for this compiler"
+#endif
+#endif
+
+#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
+extern double fp64_l2d(int64_t a);
+extern double fp64_ul2d(uint64_t a);
+extern float fp64_l2f(int64_t a);
+extern float fp64_ul2f(uint64_t a);
+extern int64_t fp64_d2l(double a);
+extern uint64_t fp64_d2ul(double a);
+extern int64_t fp64_f2l(float a);
+extern uint64_t fp64_f2ul(float a);
+#endif
+
+#endif
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_iropt.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_iropt.h b/lib/luajit/src/lj_iropt.h
new file mode 100644
index 0000000..4e424e7
--- /dev/null
+++ b/lib/luajit/src/lj_iropt.h
@@ -0,0 +1,161 @@
+/*
+** Common header for IR emitter and optimizations.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IROPT_H
+#define _LJ_IROPT_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* IR emitter. */
+LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J);
+
+/* Save current IR in J->fold.ins, but do not emit it (yet). */
+static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b)
+{
+ J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b;
+}
+
+#define lj_ir_set(J, ot, a, b) \
+ lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b))
+
+/* Get ref of next IR instruction and optionally grow IR.
+** Note: this may invalidate all IRIns*!
+*/
+static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J)
+{
+ IRRef ref = J->cur.nins;
+ if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J);
+ J->cur.nins = ref + 1;
+ return ref;
+}
+
+/* Interning of constants. */
+LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
+LJ_FUNC void lj_ir_k64_freeall(jit_State *J);
+LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv);
+LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
+LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t);
+LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr);
+LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t);
+LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot);
+
+#if LJ_64
+#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k))
+#else
+#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k))
+#endif
+
+static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n)
+{
+ TValue tv;
+ tv.n = n;
+ return lj_ir_knum_u64(J, tv.u64);
+}
+
+#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR)
+#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB)
+#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC)
+#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr))
+#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr))
+
+/* Special FP constants. */
+#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000))
+#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000))
+#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000))
+
+/* Special 128 bit SIMD constants. */
+#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS))
+#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG))
+
+/* Access to constants. */
+LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir);
+
+/* Convert IR operand types. */
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr);
+
+/* Miscellaneous IR ops. */
+LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op);
+LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op);
+LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref);
+
+/* Emit IR instructions with on-the-fly optimizations. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim);
+
+/* Special return values for the fold functions. */
+enum {
+ NEXTFOLD, /* Couldn't fold, pass on. */
+ RETRYFOLD, /* Retry fold with modified fins. */
+ KINTFOLD, /* Return ref for int constant in fins->i. */
+ FAILFOLD, /* Guard would always fail. */
+ DROPFOLD, /* Guard eliminated. */
+ MAX_FOLD
+};
+
+#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD)
+#define INT64FOLD(k) (lj_ir_kint64(J, (k)))
+#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond))
+#define LEFTFOLD (J->fold.ins.op1)
+#define RIGHTFOLD (J->fold.ins.op2)
+#define CSEFOLD (lj_opt_cse(J))
+#define EMITFOLD (lj_ir_emit(J))
+
+/* Load/store forwarding. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim);
+LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref);
+
+/* Dead-store elimination. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J);
+
+/* Narrowing. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr);
+#if LJ_HASFFI
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key);
+#endif
+LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
+ TValue *vb, TValue *vc, IROp op);
+LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc);
+LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc);
+LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc);
+LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase);
+
+/* Optimization passes. */
+LJ_FUNC void lj_opt_dce(jit_State *J);
+LJ_FUNC int lj_opt_loop(jit_State *J);
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+LJ_FUNC void lj_opt_split(jit_State *J);
+#else
+#define lj_opt_split(J) UNUSED(J)
+#endif
+LJ_FUNC void lj_opt_sink(jit_State *J);
+
+#endif
+
+#endif
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_jit.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_jit.h b/lib/luajit/src/lj_jit.h
new file mode 100644
index 0000000..eb76547
--- /dev/null
+++ b/lib/luajit/src/lj_jit.h
@@ -0,0 +1,417 @@
+/*
+** Common definitions for the JIT compiler.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_JIT_H
+#define _LJ_JIT_H
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+
+/* JIT engine flags. */
+#define JIT_F_ON 0x00000001
+
+/* CPU-specific JIT engine flags. */
+#if LJ_TARGET_X86ORX64
+#define JIT_F_CMOV 0x00000010
+#define JIT_F_SSE2 0x00000020
+#define JIT_F_SSE3 0x00000040
+#define JIT_F_SSE4_1 0x00000080
+#define JIT_F_P4 0x00000100
+#define JIT_F_PREFER_IMUL 0x00000200
+#define JIT_F_SPLIT_XMM 0x00000400
+#define JIT_F_LEA_AGU 0x00000800
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_CMOV
+#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM"
+#elif LJ_TARGET_ARM
+#define JIT_F_ARMV6_ 0x00000010
+#define JIT_F_ARMV6T2_ 0x00000020
+#define JIT_F_ARMV7 0x00000040
+#define JIT_F_VFPV2 0x00000080
+#define JIT_F_VFPV3 0x00000100
+
+#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7)
+#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7)
+#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3)
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_ARMV6_
+#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3"
+#elif LJ_TARGET_PPC
+#define JIT_F_SQRT 0x00000010
+#define JIT_F_ROUND 0x00000020
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_SQRT
+#define JIT_F_CPUSTRING "\4SQRT\5ROUND"
+#elif LJ_TARGET_MIPS
+#define JIT_F_MIPS32R2 0x00000010
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_MIPS32R2
+#define JIT_F_CPUSTRING "\010MIPS32R2"
+#else
+#define JIT_F_CPU_FIRST 0
+#define JIT_F_CPUSTRING ""
+#endif
+
+/* Optimization flags. */
+#define JIT_F_OPT_MASK 0x0fff0000
+
+#define JIT_F_OPT_FOLD 0x00010000
+#define JIT_F_OPT_CSE 0x00020000
+#define JIT_F_OPT_DCE 0x00040000
+#define JIT_F_OPT_FWD 0x00080000
+#define JIT_F_OPT_DSE 0x00100000
+#define JIT_F_OPT_NARROW 0x00200000
+#define JIT_F_OPT_LOOP 0x00400000
+#define JIT_F_OPT_ABC 0x00800000
+#define JIT_F_OPT_SINK 0x01000000
+#define JIT_F_OPT_FUSE 0x02000000
+
+/* Optimizations names for -O. Must match the order above. */
+#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD
+#define JIT_F_OPTSTRING \
+ "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse"
+
+/* Optimization levels set a fixed combination of flags. */
+#define JIT_F_OPT_0 0
+#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE)
+#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP)
+#define JIT_F_OPT_3 (JIT_F_OPT_2|\
+ JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE)
+#define JIT_F_OPT_DEFAULT JIT_F_OPT_3
+
+#if LJ_TARGET_WINDOWS || LJ_64
+/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */
+#define JIT_P_sizemcode_DEFAULT 64
+#else
+/* Could go as low as 4K, but the mmap() overhead would be rather high. */
+#define JIT_P_sizemcode_DEFAULT 32
+#endif
+
+/* Optimization parameters and their defaults. Length is a char in octal! */
+#define JIT_PARAMDEF(_) \
+ _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \
+ _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \
+ _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \
+ _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \
+ _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \
+ \
+ _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \
+ _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \
+ _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \
+ \
+ _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \
+ _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \
+ _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \
+ _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \
+ \
+ /* Size of each machine code area (in KBytes). */ \
+ _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \
+ /* Max. total size of all machine code areas (in KBytes). */ \
+ _(\010, maxmcode, 512) \
+ /* End of list. */
+
+enum {
+#define JIT_PARAMENUM(len, name, value) JIT_P_##name,
+JIT_PARAMDEF(JIT_PARAMENUM)
+#undef JIT_PARAMENUM
+ JIT_P__MAX
+};
+
+#define JIT_PARAMSTR(len, name, value) #len #name
+#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR)
+
+/* Trace compiler state. */
+typedef enum {
+ LJ_TRACE_IDLE, /* Trace compiler idle. */
+ LJ_TRACE_ACTIVE = 0x10,
+ LJ_TRACE_RECORD, /* Bytecode recording active. */
+ LJ_TRACE_START, /* New trace started. */
+ LJ_TRACE_END, /* End of trace. */
+ LJ_TRACE_ASM, /* Assemble trace. */
+ LJ_TRACE_ERR /* Trace aborted with error. */
+} TraceState;
+
+/* Post-processing action. */
+typedef enum {
+ LJ_POST_NONE, /* No action. */
+ LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */
+ LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */
+ LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */
+ LJ_POST_FIXBOOL, /* Fixup boolean result. */
+ LJ_POST_FIXCONST, /* Fixup constant results. */
+ LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */
+} PostProc;
+
+/* Machine code type. */
+#if LJ_TARGET_X86ORX64
+typedef uint8_t MCode;
+#else
+typedef uint32_t MCode;
+#endif
+
+/* Stack snapshot header. */
+typedef struct SnapShot {
+ uint16_t mapofs; /* Offset into snapshot map. */
+ IRRef1 ref; /* First IR ref for this snapshot. */
+ uint8_t nslots; /* Number of valid slots. */
+ uint8_t topslot; /* Maximum frame extent. */
+ uint8_t nent; /* Number of compressed entries. */
+ uint8_t count; /* Count of taken exits for this snapshot. */
+} SnapShot;
+
+#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */
+
+/* Compressed snapshot entry. */
+typedef uint32_t SnapEntry;
+
+#define SNAP_FRAME 0x010000 /* Frame slot. */
+#define SNAP_CONT 0x020000 /* Continuation slot. */
+#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */
+#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */
+LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME);
+LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
+
+#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref))
+#define SNAP_TR(slot, tr) \
+ (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK)))
+#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc))
+#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz))
+#define snap_ref(sn) ((sn) & 0xffff)
+#define snap_slot(sn) ((BCReg)((sn) >> 24))
+#define snap_isframe(sn) ((sn) & SNAP_FRAME)
+#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn))
+#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
+
+/* Snapshot and exit numbers. */
+typedef uint32_t SnapNo;
+typedef uint32_t ExitNo;
+
+/* Trace number. */
+typedef uint32_t TraceNo; /* Used to pass around trace numbers. */
+typedef uint16_t TraceNo1; /* Stored trace number. */
+
+/* Type of link. ORDER LJ_TRLINK */
+typedef enum {
+ LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */
+ LJ_TRLINK_ROOT, /* Link to other root trace. */
+ LJ_TRLINK_LOOP, /* Loop to same trace. */
+ LJ_TRLINK_TAILREC, /* Tail-recursion. */
+ LJ_TRLINK_UPREC, /* Up-recursion. */
+ LJ_TRLINK_DOWNREC, /* Down-recursion. */
+ LJ_TRLINK_INTERP, /* Fallback to interpreter. */
+ LJ_TRLINK_RETURN /* Return to interpreter. */
+} TraceLink;
+
+/* Trace object. */
+typedef struct GCtrace {
+ GCHeader;
+ uint8_t topslot; /* Top stack slot already checked to be allocated. */
+ uint8_t linktype; /* Type of link. */
+ IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */
+ GCRef gclist;
+ IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */
+ IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */
+ uint16_t nsnap; /* Number of snapshots. */
+ uint16_t nsnapmap; /* Number of snapshot map elements. */
+ SnapShot *snap; /* Snapshot array. */
+ SnapEntry *snapmap; /* Snapshot map. */
+ GCRef startpt; /* Starting prototype. */
+ MRef startpc; /* Bytecode PC of starting instruction. */
+ BCIns startins; /* Original bytecode of starting instruction. */
+ MSize szmcode; /* Size of machine code. */
+ MCode *mcode; /* Start of machine code. */
+ MSize mcloop; /* Offset of loop start in machine code. */
+ uint16_t nchild; /* Number of child traces (root trace only). */
+ uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */
+ TraceNo1 traceno; /* Trace number. */
+ TraceNo1 link; /* Linked trace (or self for loops). */
+ TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */
+ TraceNo1 nextroot; /* Next root trace for same prototype. */
+ TraceNo1 nextside; /* Next side trace of same root trace. */
+ uint8_t sinktags; /* Trace has SINK tags. */
+ uint8_t unused1;
+#ifdef LUAJIT_USE_GDBJIT
+ void *gdbjit_entry; /* GDB JIT entry. */
+#endif
+} GCtrace;
+
+#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o))
+#define traceref(J, n) \
+ check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)]))
+
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist));
+
+static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap)
+{
+ if (snap+1 == &T->snap[T->nsnap])
+ return T->nsnapmap;
+ else
+ return (snap+1)->mapofs;
+}
+
+/* Round-robin penalty cache for bytecodes leading to aborted traces. */
+typedef struct HotPenalty {
+ MRef pc; /* Starting bytecode PC. */
+ uint16_t val; /* Penalty value, i.e. hotcount start. */
+ uint16_t reason; /* Abort reason (really TraceErr). */
+} HotPenalty;
+
+#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */
+#define PENALTY_MIN (36*2) /* Minimum penalty value. */
+#define PENALTY_MAX 60000 /* Maximum penalty value. */
+#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */
+
+/* Round-robin backpropagation cache for narrowing conversions. */
+typedef struct BPropEntry {
+ IRRef1 key; /* Key: original reference. */
+ IRRef1 val; /* Value: reference after conversion. */
+ IRRef mode; /* Mode for this entry (currently IRCONV_*). */
+} BPropEntry;
+
+/* Number of slots for the backpropagation cache. Must be a power of 2. */
+#define BPROP_SLOTS 16
+
+/* Scalar evolution analysis cache. */
+typedef struct ScEvEntry {
+ MRef pc; /* Bytecode PC of FORI. */
+ IRRef1 idx; /* Index reference. */
+ IRRef1 start; /* Constant start reference. */
+ IRRef1 stop; /* Constant stop reference. */
+ IRRef1 step; /* Constant step reference. */
+ IRType1 t; /* Scalar type. */
+ uint8_t dir; /* Direction. 1: +, 0: -. */
+} ScEvEntry;
+
+/* 128 bit SIMD constants. */
+enum {
+ LJ_KSIMD_ABS,
+ LJ_KSIMD_NEG,
+ LJ_KSIMD__MAX
+};
+
+/* Get 16 byte aligned pointer to SIMD constant. */
+#define LJ_KSIMD(J, n) \
+ ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
+
+/* Set/reset flag to activate the SPLIT pass for the current trace. */
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+#define lj_needsplit(J) (J->needsplit = 1)
+#define lj_resetsplit(J) (J->needsplit = 0)
+#else
+#define lj_needsplit(J) UNUSED(J)
+#define lj_resetsplit(J) UNUSED(J)
+#endif
+
+/* Fold state is used to fold instructions on-the-fly. */
+typedef struct FoldState {
+ IRIns ins; /* Currently emitted instruction. */
+ IRIns left; /* Instruction referenced by left operand. */
+ IRIns right; /* Instruction referenced by right operand. */
+} FoldState;
+
+/* JIT compiler state. */
+typedef struct jit_State {
+ GCtrace cur; /* Current trace. */
+
+ lua_State *L; /* Current Lua state. */
+ const BCIns *pc; /* Current PC. */
+ GCfunc *fn; /* Current function. */
+ GCproto *pt; /* Current prototype. */
+ TRef *base; /* Current frame base, points into J->slots. */
+
+ uint32_t flags; /* JIT engine flags. */
+ BCReg maxslot; /* Relative to baseslot. */
+ BCReg baseslot; /* Current frame base, offset into J->slots. */
+
+ uint8_t mergesnap; /* Allowed to merge with next snapshot. */
+ uint8_t needsnap; /* Need snapshot before recording next bytecode. */
+ IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */
+ uint8_t bcskip; /* Number of bytecode instructions to skip. */
+
+ FoldState fold; /* Fold state. */
+
+ const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */
+ MSize bc_extent; /* Extent of the range. */
+
+ TraceState state; /* Trace compiler state. */
+
+ int32_t instunroll; /* Unroll counter for instable loops. */
+ int32_t loopunroll; /* Unroll counter for loop ops in side traces. */
+ int32_t tailcalled; /* Number of successive tailcalls. */
+ int32_t framedepth; /* Current frame depth. */
+ int32_t retdepth; /* Return frame depth (count of RETF). */
+
+ MRef k64; /* Pointer to chained array of 64 bit constants. */
+ TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
+
+ IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */
+ IRRef irtoplim; /* Upper limit of instuction buffer (biased). */
+ IRRef irbotlim; /* Lower limit of instuction buffer (biased). */
+ IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */
+
+ MSize sizesnap; /* Size of temp. snapshot buffer. */
+ SnapShot *snapbuf; /* Temp. snapshot buffer. */
+ SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */
+ MSize sizesnapmap; /* Size of temp. snapshot map buffer. */
+
+ PostProc postproc; /* Required post-processing after execution. */
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+ int needsplit; /* Need SPLIT pass. */
+#endif
+
+ GCRef *trace; /* Array of traces. */
+ TraceNo freetrace; /* Start of scan for next free trace. */
+ MSize sizetrace; /* Size of trace array. */
+
+ IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */
+ TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */
+
+ int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
+
+ MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
+
+ HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
+ uint32_t penaltyslot; /* Round-robin index into penalty slots. */
+ uint32_t prngstate; /* PRNG state. */
+
+ BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */
+ uint32_t bpropslot; /* Round-robin index into bpropcache slots. */
+
+ ScEvEntry scev; /* Scalar evolution analysis cache slots. */
+
+ const BCIns *startpc; /* Bytecode PC of starting instruction. */
+ TraceNo parent; /* Parent of current side trace (0 for root traces). */
+ ExitNo exitno; /* Exit number in parent of current side trace. */
+
+ BCIns *patchpc; /* PC for pending re-patch. */
+ BCIns patchins; /* Instruction for pending re-patch. */
+
+ int mcprot; /* Protection of current mcode area. */
+ MCode *mcarea; /* Base of current mcode area. */
+ MCode *mctop; /* Top of current mcode area. */
+ MCode *mcbot; /* Bottom of current mcode area. */
+ size_t szmcarea; /* Size of current mcode area. */
+ size_t szallmcarea; /* Total size of all allocated mcode areas. */
+
+ TValue errinfo; /* Additional info element for trace errors. */
+}
+#if LJ_TARGET_ARM
+LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */
+#endif
+jit_State;
+
+/* Trivial PRNG e.g. used for penalty randomization. */
+static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
+{
+ /* Yes, this LCG is very weak, but that doesn't matter for our use case. */
+ J->prngstate = J->prngstate * 1103515245 + 12345;
+ return J->prngstate >> (32-bits);
+}
+
+#endif
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_lex.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_lex.c b/lib/luajit/src/lj_lex.c
new file mode 100644
index 0000000..e1dc3cd
--- /dev/null
+++ b/lib/luajit/src/lj_lex.c
@@ -0,0 +1,482 @@
+/*
+** Lexical analyzer.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_lex_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#if LJ_HASFFI
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lualib.h"
+#endif
+#include "lj_state.h"
+#include "lj_lex.h"
+#include "lj_parse.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+
+/* Lua lexer token names. */
+static const char *const tokennames[] = {
+#define TKSTR1(name) #name,
+#define TKSTR2(name, sym) #sym,
+TKDEF(TKSTR1, TKSTR2)
+#undef TKSTR1
+#undef TKSTR2
+ NULL
+};
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+#define char2int(c) ((int)(uint8_t)(c))
+#define next(ls) \
+ (ls->current = (ls->n--) > 0 ? char2int(*ls->p++) : fillbuf(ls))
+#define save_and_next(ls) (save(ls, ls->current), next(ls))
+#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r')
+#define END_OF_STREAM (-1)
+
+static int fillbuf(LexState *ls)
+{
+ size_t sz;
+ const char *buf = ls->rfunc(ls->L, ls->rdata, &sz);
+ if (buf == NULL || sz == 0) return END_OF_STREAM;
+ ls->n = (MSize)sz - 1;
+ ls->p = buf;
+ return char2int(*(ls->p++));
+}
+
+static LJ_NOINLINE void save_grow(LexState *ls, int c)
+{
+ MSize newsize;
+ if (ls->sb.sz >= LJ_MAX_STR/2)
+ lj_lex_error(ls, 0, LJ_ERR_XELEM);
+ newsize = ls->sb.sz * 2;
+ lj_str_resizebuf(ls->L, &ls->sb, newsize);
+ ls->sb.buf[ls->sb.n++] = (char)c;
+}
+
+static LJ_AINLINE void save(LexState *ls, int c)
+{
+ if (LJ_UNLIKELY(ls->sb.n + 1 > ls->sb.sz))
+ save_grow(ls, c);
+ else
+ ls->sb.buf[ls->sb.n++] = (char)c;
+}
+
+static void inclinenumber(LexState *ls)
+{
+ int old = ls->current;
+ lua_assert(currIsNewline(ls));
+ next(ls); /* skip `\n' or `\r' */
+ if (currIsNewline(ls) && ls->current != old)
+ next(ls); /* skip `\n\r' or `\r\n' */
+ if (++ls->linenumber >= LJ_MAX_LINE)
+ lj_lex_error(ls, ls->token, LJ_ERR_XLINES);
+}
+
+/* -- Scanner for terminals ----------------------------------------------- */
+
+/* Parse a number literal. */
+static void lex_number(LexState *ls, TValue *tv)
+{
+ StrScanFmt fmt;
+ int c, xp = 'e';
+ lua_assert(lj_char_isdigit(ls->current));
+ if ((c = ls->current) == '0') {
+ save_and_next(ls);
+ if ((ls->current | 0x20) == 'x') xp = 'p';
+ }
+ while (lj_char_isident(ls->current) || ls->current == '.' ||
+ ((ls->current == '-' || ls->current == '+') && (c | 0x20) == xp)) {
+ c = ls->current;
+ save_and_next(ls);
+ }
+ save(ls, '\0');
+ fmt = lj_strscan_scan((const uint8_t *)ls->sb.buf, tv,
+ (LJ_DUALNUM ? STRSCAN_OPT_TOINT : STRSCAN_OPT_TONUM) |
+ (LJ_HASFFI ? (STRSCAN_OPT_LL|STRSCAN_OPT_IMAG) : 0));
+ if (LJ_DUALNUM && fmt == STRSCAN_INT) {
+ setitype(tv, LJ_TISNUM);
+ } else if (fmt == STRSCAN_NUM) {
+ /* Already in correct format. */
+#if LJ_HASFFI
+ } else if (fmt != STRSCAN_ERROR) {
+ lua_State *L = ls->L;
+ GCcdata *cd;
+ lua_assert(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG);
+ if (!ctype_ctsG(G(L))) {
+ ptrdiff_t oldtop = savestack(L, L->top);
+ luaopen_ffi(L); /* Load FFI library on-demand. */
+ L->top = restorestack(L, oldtop);
+ }
+ if (fmt == STRSCAN_IMAG) {
+ cd = lj_cdata_new_(L, CTID_COMPLEX_DOUBLE, 2*sizeof(double));
+ ((double *)cdataptr(cd))[0] = 0;
+ ((double *)cdataptr(cd))[1] = numV(tv);
+ } else {
+ cd = lj_cdata_new_(L, fmt==STRSCAN_I64 ? CTID_INT64 : CTID_UINT64, 8);
+ *(uint64_t *)cdataptr(cd) = tv->u64;
+ }
+ lj_parse_keepcdata(ls, tv, cd);
+#endif
+ } else {
+ lua_assert(fmt == STRSCAN_ERROR);
+ lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER);
+ }
+}
+
+static int skip_sep(LexState *ls)
+{
+ int count = 0;
+ int s = ls->current;
+ lua_assert(s == '[' || s == ']');
+ save_and_next(ls);
+ while (ls->current == '=') {
+ save_and_next(ls);
+ count++;
+ }
+ return (ls->current == s) ? count : (-count) - 1;
+}
+
+static void read_long_string(LexState *ls, TValue *tv, int sep)
+{
+ save_and_next(ls); /* skip 2nd `[' */
+ if (currIsNewline(ls)) /* string starts with a newline? */
+ inclinenumber(ls); /* skip it */
+ for (;;) {
+ switch (ls->current) {
+ case END_OF_STREAM:
+ lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM);
+ break;
+ case ']':
+ if (skip_sep(ls) == sep) {
+ save_and_next(ls); /* skip 2nd `]' */
+ goto endloop;
+ }
+ break;
+ case '\n':
+ case '\r':
+ save(ls, '\n');
+ inclinenumber(ls);
+ if (!tv) lj_str_resetbuf(&ls->sb); /* avoid wasting space */
+ break;
+ default:
+ if (tv) save_and_next(ls);
+ else next(ls);
+ break;
+ }
+ } endloop:
+ if (tv) {
+ GCstr *str = lj_parse_keepstr(ls, ls->sb.buf + (2 + (MSize)sep),
+ ls->sb.n - 2*(2 + (MSize)sep));
+ setstrV(ls->L, tv, str);
+ }
+}
+
+static void read_string(LexState *ls, int delim, TValue *tv)
+{
+ save_and_next(ls);
+ while (ls->current != delim) {
+ switch (ls->current) {
+ case END_OF_STREAM:
+ lj_lex_error(ls, TK_eof, LJ_ERR_XSTR);
+ continue;
+ case '\n':
+ case '\r':
+ lj_lex_error(ls, TK_string, LJ_ERR_XSTR);
+ continue;
+ case '\\': {
+ int c = next(ls); /* Skip the '\\'. */
+ switch (c) {
+ case 'a': c = '\a'; break;
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+ case 'x': /* Hexadecimal escape '\xXX'. */
+ c = (next(ls) & 15u) << 4;
+ if (!lj_char_isdigit(ls->current)) {
+ if (!lj_char_isxdigit(ls->current)) goto err_xesc;
+ c += 9 << 4;
+ }
+ c += (next(ls) & 15u);
+ if (!lj_char_isdigit(ls->current)) {
+ if (!lj_char_isxdigit(ls->current)) goto err_xesc;
+ c += 9;
+ }
+ break;
+ case 'z': /* Skip whitespace. */
+ next(ls);
+ while (lj_char_isspace(ls->current))
+ if (currIsNewline(ls)) inclinenumber(ls); else next(ls);
+ continue;
+ case '\n': case '\r': save(ls, '\n'); inclinenumber(ls); continue;
+ case '\\': case '\"': case '\'': break;
+ case END_OF_STREAM: continue;
+ default:
+ if (!lj_char_isdigit(c))
+ goto err_xesc;
+ c -= '0'; /* Decimal escape '\ddd'. */
+ if (lj_char_isdigit(next(ls))) {
+ c = c*10 + (ls->current - '0');
+ if (lj_char_isdigit(next(ls))) {
+ c = c*10 + (ls->current - '0');
+ if (c > 255) {
+ err_xesc:
+ lj_lex_error(ls, TK_string, LJ_ERR_XESC);
+ }
+ next(ls);
+ }
+ }
+ save(ls, c);
+ continue;
+ }
+ save(ls, c);
+ next(ls);
+ continue;
+ }
+ default:
+ save_and_next(ls);
+ break;
+ }
+ }
+ save_and_next(ls); /* skip delimiter */
+ setstrV(ls->L, tv, lj_parse_keepstr(ls, ls->sb.buf + 1, ls->sb.n - 2));
+}
+
+/* -- Main lexical scanner ------------------------------------------------ */
+
+static int llex(LexState *ls, TValue *tv)
+{
+ lj_str_resetbuf(&ls->sb);
+ for (;;) {
+ if (lj_char_isident(ls->current)) {
+ GCstr *s;
+ if (lj_char_isdigit(ls->current)) { /* Numeric literal. */
+ lex_number(ls, tv);
+ return TK_number;
+ }
+ /* Identifier or reserved word. */
+ do {
+ save_and_next(ls);
+ } while (lj_char_isident(ls->current));
+ s = lj_parse_keepstr(ls, ls->sb.buf, ls->sb.n);
+ setstrV(ls->L, tv, s);
+ if (s->reserved > 0) /* Reserved word? */
+ return TK_OFS + s->reserved;
+ return TK_name;
+ }
+ switch (ls->current) {
+ case '\n':
+ case '\r':
+ inclinenumber(ls);
+ continue;
+ case ' ':
+ case '\t':
+ case '\v':
+ case '\f':
+ next(ls);
+ continue;
+ case '-':
+ next(ls);
+ if (ls->current != '-') return '-';
+ /* else is a comment */
+ next(ls);
+ if (ls->current == '[') {
+ int sep = skip_sep(ls);
+ lj_str_resetbuf(&ls->sb); /* `skip_sep' may dirty the buffer */
+ if (sep >= 0) {
+ read_long_string(ls, NULL, sep); /* long comment */
+ lj_str_resetbuf(&ls->sb);
+ continue;
+ }
+ }
+ /* else short comment */
+ while (!currIsNewline(ls) && ls->current != END_OF_STREAM)
+ next(ls);
+ continue;
+ case '[': {
+ int sep = skip_sep(ls);
+ if (sep >= 0) {
+ read_long_string(ls, tv, sep);
+ return TK_string;
+ } else if (sep == -1) {
+ return '[';
+ } else {
+ lj_lex_error(ls, TK_string, LJ_ERR_XLDELIM);
+ continue;
+ }
+ }
+ case '=':
+ next(ls);
+ if (ls->current != '=') return '='; else { next(ls); return TK_eq; }
+ case '<':
+ next(ls);
+ if (ls->current != '=') return '<'; else { next(ls); return TK_le; }
+ case '>':
+ next(ls);
+ if (ls->current != '=') return '>'; else { next(ls); return TK_ge; }
+ case '~':
+ next(ls);
+ if (ls->current != '=') return '~'; else { next(ls); return TK_ne; }
+ case ':':
+ next(ls);
+ if (ls->current != ':') return ':'; else { next(ls); return TK_label; }
+ case '"':
+ case '\'':
+ read_string(ls, ls->current, tv);
+ return TK_string;
+ case '.':
+ save_and_next(ls);
+ if (ls->current == '.') {
+ next(ls);
+ if (ls->current == '.') {
+ next(ls);
+ return TK_dots; /* ... */
+ }
+ return TK_concat; /* .. */
+ } else if (!lj_char_isdigit(ls->current)) {
+ return '.';
+ } else {
+ lex_number(ls, tv);
+ return TK_number;
+ }
+ case END_OF_STREAM:
+ return TK_eof;
+ default: {
+ int c = ls->current;
+ next(ls);
+ return c; /* Single-char tokens (+ - / ...). */
+ }
+ }
+ }
+}
+
+/* -- Lexer API ----------------------------------------------------------- */
+
+/* Setup lexer state. */
+int lj_lex_setup(lua_State *L, LexState *ls)
+{
+ int header = 0;
+ ls->L = L;
+ ls->fs = NULL;
+ ls->n = 0;
+ ls->p = NULL;
+ ls->vstack = NULL;
+ ls->sizevstack = 0;
+ ls->vtop = 0;
+ ls->bcstack = NULL;
+ ls->sizebcstack = 0;
+ ls->token = 0;
+ ls->lookahead = TK_eof; /* No look-ahead token. */
+ ls->linenumber = 1;
+ ls->lastline = 1;
+ lj_str_resizebuf(ls->L, &ls->sb, LJ_MIN_SBUF);
+ next(ls); /* Read-ahead first char. */
+ if (ls->current == 0xef && ls->n >= 2 && char2int(ls->p[0]) == 0xbb &&
+ char2int(ls->p[1]) == 0xbf) { /* Skip UTF-8 BOM (if buffered). */
+ ls->n -= 2;
+ ls->p += 2;
+ next(ls);
+ header = 1;
+ }
+ if (ls->current == '#') { /* Skip POSIX #! header line. */
+ do {
+ next(ls);
+ if (ls->current == END_OF_STREAM) return 0;
+ } while (!currIsNewline(ls));
+ inclinenumber(ls);
+ header = 1;
+ }
+ if (ls->current == LUA_SIGNATURE[0]) { /* Bytecode dump. */
+ if (header) {
+ /*
+ ** Loading bytecode with an extra header is disabled for security
+ ** reasons. This may circumvent the usual check for bytecode vs.
+ ** Lua code by looking at the first char. Since this is a potential
+ ** security violation no attempt is made to echo the chunkname either.
+ */
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_BCBAD));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Cleanup lexer state. */
+void lj_lex_cleanup(lua_State *L, LexState *ls)
+{
+ global_State *g = G(L);
+ lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine);
+ lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo);
+ lj_str_freebuf(g, &ls->sb);
+}
+
+void lj_lex_next(LexState *ls)
+{
+ ls->lastline = ls->linenumber;
+ if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */
+ ls->token = llex(ls, &ls->tokenval); /* Get next token. */
+ } else { /* Otherwise return lookahead token. */
+ ls->token = ls->lookahead;
+ ls->lookahead = TK_eof;
+ ls->tokenval = ls->lookaheadval;
+ }
+}
+
+LexToken lj_lex_lookahead(LexState *ls)
+{
+ lua_assert(ls->lookahead == TK_eof);
+ ls->lookahead = llex(ls, &ls->lookaheadval);
+ return ls->lookahead;
+}
+
+const char *lj_lex_token2str(LexState *ls, LexToken token)
+{
+ if (token > TK_OFS)
+ return tokennames[token-TK_OFS-1];
+ else if (!lj_char_iscntrl(token))
+ return lj_str_pushf(ls->L, "%c", token);
+ else
+ return lj_str_pushf(ls->L, "char(%d)", token);
+}
+
+void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...)
+{
+ const char *tok;
+ va_list argp;
+ if (token == 0) {
+ tok = NULL;
+ } else if (token == TK_name || token == TK_string || token == TK_number) {
+ save(ls, '\0');
+ tok = ls->sb.buf;
+ } else {
+ tok = lj_lex_token2str(ls, token);
+ }
+ va_start(argp, em);
+ lj_err_lex(ls->L, ls->chunkname, tok, ls->linenumber, em, argp);
+ va_end(argp);
+}
+
+void lj_lex_init(lua_State *L)
+{
+ uint32_t i;
+ for (i = 0; i < TK_RESERVED; i++) {
+ GCstr *s = lj_str_newz(L, tokennames[i]);
+ fixstring(s); /* Reserved words are never collected. */
+ s->reserved = (uint8_t)(i+1);
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_lex.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_lex.h b/lib/luajit/src/lj_lex.h
new file mode 100644
index 0000000..fe01768
--- /dev/null
+++ b/lib/luajit/src/lj_lex.h
@@ -0,0 +1,85 @@
+/*
+** Lexical analyzer.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_LEX_H
+#define _LJ_LEX_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+#include "lj_err.h"
+
+/* Lua lexer tokens. */
+#define TKDEF(_, __) \
+ _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \
+ _(for) _(function) _(goto) _(if) _(in) _(local) _(nil) _(not) _(or) \
+ _(repeat) _(return) _(then) _(true) _(until) _(while) \
+ __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \
+ __(label, ::) __(number, <number>) __(name, <name>) __(string, <string>) \
+ __(eof, <eof>)
+
+enum {
+ TK_OFS = 256,
+#define TKENUM1(name) TK_##name,
+#define TKENUM2(name, sym) TK_##name,
+TKDEF(TKENUM1, TKENUM2)
+#undef TKENUM1
+#undef TKENUM2
+ TK_RESERVED = TK_while - TK_OFS
+};
+
+typedef int LexToken;
+
+/* Combined bytecode ins/line. Only used during bytecode generation. */
+typedef struct BCInsLine {
+ BCIns ins; /* Bytecode instruction. */
+ BCLine line; /* Line number for this bytecode. */
+} BCInsLine;
+
+/* Info for local variables. Only used during bytecode generation. */
+typedef struct VarInfo {
+ GCRef name; /* Local variable name or goto/label name. */
+ BCPos startpc; /* First point where the local variable is active. */
+ BCPos endpc; /* First point where the local variable is dead. */
+ uint8_t slot; /* Variable slot. */
+ uint8_t info; /* Variable/goto/label info. */
+} VarInfo;
+
+/* Lua lexer state. */
+typedef struct LexState {
+ struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */
+ struct lua_State *L; /* Lua state. */
+ TValue tokenval; /* Current token value. */
+ TValue lookaheadval; /* Lookahead token value. */
+ int current; /* Current character (charint). */
+ LexToken token; /* Current token. */
+ LexToken lookahead; /* Lookahead token. */
+ MSize n; /* Bytes left in input buffer. */
+ const char *p; /* Current position in input buffer. */
+ SBuf sb; /* String buffer for tokens. */
+ lua_Reader rfunc; /* Reader callback. */
+ void *rdata; /* Reader callback data. */
+ BCLine linenumber; /* Input line counter. */
+ BCLine lastline; /* Line of last token. */
+ GCstr *chunkname; /* Current chunk name (interned string). */
+ const char *chunkarg; /* Chunk name argument. */
+ const char *mode; /* Allow loading bytecode (b) and/or source text (t). */
+ VarInfo *vstack; /* Stack for names and extents of local variables. */
+ MSize sizevstack; /* Size of variable stack. */
+ MSize vtop; /* Top of variable stack. */
+ BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */
+ MSize sizebcstack; /* Size of bytecode stack. */
+ uint32_t level; /* Syntactical nesting level. */
+} LexState;
+
+LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls);
+LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls);
+LJ_FUNC void lj_lex_next(LexState *ls);
+LJ_FUNC LexToken lj_lex_lookahead(LexState *ls);
+LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken token);
+LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...);
+LJ_FUNC void lj_lex_init(lua_State *L);
+
+#endif
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_lib.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_lib.c b/lib/luajit/src/lj_lib.c
new file mode 100644
index 0000000..856685e
--- /dev/null
+++ b/lib/luajit/src/lj_lib.c
@@ -0,0 +1,258 @@
+/*
+** Library function support.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_lib_c
+#define LUA_CORE
+
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_bc.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_lib.h"
+
+/* -- Library initialization ---------------------------------------------- */
+
+static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize)
+{
+ if (libname) {
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_getfield(L, -1, libname);
+ if (!tvistab(L->top-1)) {
+ L->top--;
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, hsize) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, libname);
+ settabV(L, L->top, tabV(L->top-1));
+ L->top++;
+ lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
+ }
+ L->top--;
+ settabV(L, L->top-1, tabV(L->top));
+ } else {
+ lua_createtable(L, 0, hsize);
+ }
+ return tabV(L->top-1);
+}
+
+void lj_lib_register(lua_State *L, const char *libname,
+ const uint8_t *p, const lua_CFunction *cf)
+{
+ GCtab *env = tabref(L->env);
+ GCfunc *ofn = NULL;
+ int ffid = *p++;
+ BCIns *bcff = &L2GG(L)->bcff[*p++];
+ GCtab *tab = lib_create_table(L, libname, *p++);
+ ptrdiff_t tpos = L->top - L->base;
+
+ /* Avoid barriers further down. */
+ lj_gc_anybarriert(L, tab);
+ tab->nomm = 0;
+
+ for (;;) {
+ uint32_t tag = *p++;
+ MSize len = tag & LIBINIT_LENMASK;
+ tag &= LIBINIT_TAGMASK;
+ if (tag != LIBINIT_STRING) {
+ const char *name;
+ MSize nuv = (MSize)(L->top - L->base - tpos);
+ GCfunc *fn = lj_func_newC(L, nuv, env);
+ if (nuv) {
+ L->top = L->base + tpos;
+ memcpy(fn->c.upvalue, L->top, sizeof(TValue)*nuv);
+ }
+ fn->c.ffid = (uint8_t)(ffid++);
+ name = (const char *)p;
+ p += len;
+ if (tag == LIBINIT_CF)
+ setmref(fn->c.pc, &G(L)->bc_cfunc_int);
+ else
+ setmref(fn->c.pc, bcff++);
+ if (tag == LIBINIT_ASM_)
+ fn->c.f = ofn->c.f; /* Copy handler from previous function. */
+ else
+ fn->c.f = *cf++; /* Get cf or handler from C function table. */
+ if (len) {
+ /* NOBARRIER: See above for common barrier. */
+ setfuncV(L, lj_tab_setstr(L, tab, lj_str_new(L, name, len)), fn);
+ }
+ ofn = fn;
+ } else {
+ switch (tag | len) {
+ case LIBINIT_SET:
+ L->top -= 2;
+ if (tvisstr(L->top+1) && strV(L->top+1)->len == 0)
+ env = tabV(L->top);
+ else /* NOBARRIER: See above for common barrier. */
+ copyTV(L, lj_tab_set(L, tab, L->top+1), L->top);
+ break;
+ case LIBINIT_NUMBER:
+ memcpy(&L->top->n, p, sizeof(double));
+ L->top++;
+ p += sizeof(double);
+ break;
+ case LIBINIT_COPY:
+ copyTV(L, L->top, L->top - *p++);
+ L->top++;
+ break;
+ case LIBINIT_LASTCL:
+ setfuncV(L, L->top++, ofn);
+ break;
+ case LIBINIT_FFID:
+ ffid++;
+ break;
+ case LIBINIT_END:
+ return;
+ default:
+ setstrV(L, L->top++, lj_str_new(L, (const char *)p, len));
+ p += len;
+ break;
+ }
+ }
+ }
+}
+
+/* -- Type checks --------------------------------------------------------- */
+
+TValue *lj_lib_checkany(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ return o;
+}
+
+GCstr *lj_lib_checkstr(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o < L->top) {
+ if (LJ_LIKELY(tvisstr(o))) {
+ return strV(o);
+ } else if (tvisnumber(o)) {
+ GCstr *s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ return s;
+ }
+ }
+ lj_err_argt(L, narg, LUA_TSTRING);
+ return NULL; /* unreachable */
+}
+
+GCstr *lj_lib_optstr(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ return (o < L->top && !tvisnil(o)) ? lj_lib_checkstr(L, narg) : NULL;
+}
+
+#if LJ_DUALNUM
+void lj_lib_checknumber(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+}
+#endif
+
+lua_Number lj_lib_checknum(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top &&
+ (tvisnumber(o) || (tvisstr(o) && lj_strscan_num(strV(o), o)))))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_UNLIKELY(tvisint(o))) {
+ lua_Number n = (lua_Number)intV(o);
+ setnumV(o, n);
+ return n;
+ } else {
+ return numV(o);
+ }
+}
+
+int32_t lj_lib_checkint(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else {
+ int32_t i = lj_num2int(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return i;
+ }
+}
+
+int32_t lj_lib_optint(lua_State *L, int narg, int32_t def)
+{
+ TValue *o = L->base + narg-1;
+ return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def;
+}
+
+int32_t lj_lib_checkbit(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else {
+ int32_t i = lj_num2bit(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return i;
+ }
+}
+
+GCfunc *lj_lib_checkfunc(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvisfunc(o)))
+ lj_err_argt(L, narg, LUA_TFUNCTION);
+ return funcV(o);
+}
+
+GCtab *lj_lib_checktab(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvistab(o)))
+ lj_err_argt(L, narg, LUA_TTABLE);
+ return tabV(o);
+}
+
+GCtab *lj_lib_checktabornil(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o < L->top) {
+ if (tvistab(o))
+ return tabV(o);
+ else if (tvisnil(o))
+ return NULL;
+ }
+ lj_err_arg(L, narg, LJ_ERR_NOTABN);
+ return NULL; /* unreachable */
+}
+
+int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst)
+{
+ GCstr *s = def >= 0 ? lj_lib_optstr(L, narg) : lj_lib_checkstr(L, narg);
+ if (s) {
+ const char *opt = strdata(s);
+ MSize len = s->len;
+ int i;
+ for (i = 0; *(const uint8_t *)lst; i++) {
+ if (*(const uint8_t *)lst == len && memcmp(opt, lst+1, len) == 0)
+ return i;
+ lst += 1+*(const uint8_t *)lst;
+ }
+ lj_err_argv(L, narg, LJ_ERR_INVOPTM, opt);
+ }
+ return def;
+}
+
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_lib.h
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_lib.h b/lib/luajit/src/lj_lib.h
new file mode 100644
index 0000000..9320f34
--- /dev/null
+++ b/lib/luajit/src/lj_lib.h
@@ -0,0 +1,112 @@
+/*
+** Library function support.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_LIB_H
+#define _LJ_LIB_H
+
+#include "lj_obj.h"
+
+/*
+** A fallback handler is called by the assembler VM if the fast path fails:
+**
+** - too few arguments: unrecoverable.
+** - wrong argument type: recoverable, if coercion succeeds.
+** - bad argument value: unrecoverable.
+** - stack overflow: recoverable, if stack reallocation succeeds.
+** - extra handling: recoverable.
+**
+** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(),
+** lj_err_caller() or lj_err_callermsg().
+** The recoverable cases return 0 or the number of results + 1.
+** The assembler VM retries the fast path only if 0 is returned.
+** This time the fallback must not be called again or it gets stuck in a loop.
+*/
+
+/* Return values from fallback handler. */
+#define FFH_RETRY 0
+#define FFH_UNREACHABLE FFH_RETRY
+#define FFH_RES(n) ((n)+1)
+#define FFH_TAILCALL (-1)
+
+LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg);
+LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg);
+LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg);
+#if LJ_DUALNUM
+LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg);
+#else
+#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg))
+#endif
+LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def);
+LJ_FUNC int32_t lj_lib_checkbit(lua_State *L, int narg);
+LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg);
+LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg);
+LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg);
+LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst);
+
+/* Avoid including lj_frame.h. */
+#define lj_lib_upvalue(L, n) \
+ (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1])
+
+#if LJ_TARGET_WINDOWS
+#define lj_lib_checkfpu(L) \
+ do { setnumV(L->top++, (lua_Number)1437217655); \
+ if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \
+ L->top--; } while (0)
+#else
+#define lj_lib_checkfpu(L) UNUSED(L)
+#endif
+
+/* Push internal function on the stack. */
+static LJ_AINLINE void lj_lib_pushcc(lua_State *L, lua_CFunction f,
+ int id, int n)
+{
+ GCfunc *fn;
+ lua_pushcclosure(L, f, n);
+ fn = funcV(L->top-1);
+ fn->c.ffid = (uint8_t)id;
+ setmref(fn->c.pc, &G(L)->bc_cfunc_int);
+}
+
+#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0))
+
+/* Library function declarations. Scanned by buildvm. */
+#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L)
+#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L)
+#define LJLIB_ASM_(name)
+#define LJLIB_SET(name)
+#define LJLIB_PUSH(arg)
+#define LJLIB_REC(handler)
+#define LJLIB_NOREGUV
+#define LJLIB_NOREG
+
+#define LJ_LIB_REG(L, regname, name) \
+ lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name)
+
+LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
+ const uint8_t *init, const lua_CFunction *cf);
+
+/* Library init data tags. */
+#define LIBINIT_LENMASK 0x3f
+#define LIBINIT_TAGMASK 0xc0
+#define LIBINIT_CF 0x00
+#define LIBINIT_ASM 0x40
+#define LIBINIT_ASM_ 0x80
+#define LIBINIT_STRING 0xc0
+#define LIBINIT_MAXSTR 0x39
+#define LIBINIT_SET 0xfa
+#define LIBINIT_NUMBER 0xfb
+#define LIBINIT_COPY 0xfc
+#define LIBINIT_LASTCL 0xfd
+#define LIBINIT_FFID 0xfe
+#define LIBINIT_END 0xff
+
+/* Exported library functions. */
+
+typedef struct RandomState RandomState;
+LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs);
+
+#endif
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_load.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_load.c b/lib/luajit/src/lj_load.c
new file mode 100644
index 0000000..ff7b851
--- /dev/null
+++ b/lib/luajit/src/lj_load.c
@@ -0,0 +1,168 @@
+/*
+** Load and dump code.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <errno.h>
+#include <stdio.h>
+
+#define lj_load_c
+#define LUA_CORE
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_func.h"
+#include "lj_frame.h"
+#include "lj_vm.h"
+#include "lj_lex.h"
+#include "lj_bcdump.h"
+#include "lj_parse.h"
+
+/* -- Load Lua source code and bytecode ----------------------------------- */
+
+static TValue *cpparser(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ LexState *ls = (LexState *)ud;
+ GCproto *pt;
+ GCfunc *fn;
+ int bc;
+ UNUSED(dummy);
+ cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
+ bc = lj_lex_setup(L, ls);
+ if (ls->mode && !strchr(ls->mode, bc ? 'b' : 't')) {
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_XMODE));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+ }
+ pt = bc ? lj_bcread(ls) : lj_parse(ls);
+ fn = lj_func_newL_empty(L, pt, tabref(L->env));
+ /* Don't combine above/below into one statement. */
+ setfuncV(L, L->top++, fn);
+ return NULL;
+}
+
+LUA_API int lua_loadx(lua_State *L, lua_Reader reader, void *data,
+ const char *chunkname, const char *mode)
+{
+ LexState ls;
+ int status;
+ ls.rfunc = reader;
+ ls.rdata = data;
+ ls.chunkarg = chunkname ? chunkname : "?";
+ ls.mode = mode;
+ lj_str_initbuf(&ls.sb);
+ status = lj_vm_cpcall(L, NULL, &ls, cpparser);
+ lj_lex_cleanup(L, &ls);
+ lj_gc_check(L);
+ return status;
+}
+
+LUA_API int lua_load(lua_State *L, lua_Reader reader, void *data,
+ const char *chunkname)
+{
+ return lua_loadx(L, reader, data, chunkname, NULL);
+}
+
+typedef struct FileReaderCtx {
+ FILE *fp;
+ char buf[LUAL_BUFFERSIZE];
+} FileReaderCtx;
+
+static const char *reader_file(lua_State *L, void *ud, size_t *size)
+{
+ FileReaderCtx *ctx = (FileReaderCtx *)ud;
+ UNUSED(L);
+ if (feof(ctx->fp)) return NULL;
+ *size = fread(ctx->buf, 1, sizeof(ctx->buf), ctx->fp);
+ return *size > 0 ? ctx->buf : NULL;
+}
+
+LUALIB_API int luaL_loadfilex(lua_State *L, const char *filename,
+ const char *mode)
+{
+ FileReaderCtx ctx;
+ int status;
+ const char *chunkname;
+ if (filename) {
+ ctx.fp = fopen(filename, "rb");
+ if (ctx.fp == NULL) {
+ lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno));
+ return LUA_ERRFILE;
+ }
+ chunkname = lua_pushfstring(L, "@%s", filename);
+ } else {
+ ctx.fp = stdin;
+ chunkname = "=stdin";
+ }
+ status = lua_loadx(L, reader_file, &ctx, chunkname, mode);
+ if (ferror(ctx.fp)) {
+ L->top -= filename ? 2 : 1;
+ lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno));
+ if (filename)
+ fclose(ctx.fp);
+ return LUA_ERRFILE;
+ }
+ if (filename) {
+ L->top--;
+ copyTV(L, L->top-1, L->top);
+ fclose(ctx.fp);
+ }
+ return status;
+}
+
+LUALIB_API int luaL_loadfile(lua_State *L, const char *filename)
+{
+ return luaL_loadfilex(L, filename, NULL);
+}
+
+typedef struct StringReaderCtx {
+ const char *str;
+ size_t size;
+} StringReaderCtx;
+
+static const char *reader_string(lua_State *L, void *ud, size_t *size)
+{
+ StringReaderCtx *ctx = (StringReaderCtx *)ud;
+ UNUSED(L);
+ if (ctx->size == 0) return NULL;
+ *size = ctx->size;
+ ctx->size = 0;
+ return ctx->str;
+}
+
+LUALIB_API int luaL_loadbufferx(lua_State *L, const char *buf, size_t size,
+ const char *name, const char *mode)
+{
+ StringReaderCtx ctx;
+ ctx.str = buf;
+ ctx.size = size;
+ return lua_loadx(L, reader_string, &ctx, name, mode);
+}
+
+LUALIB_API int luaL_loadbuffer(lua_State *L, const char *buf, size_t size,
+ const char *name)
+{
+ return luaL_loadbufferx(L, buf, size, name, NULL);
+}
+
+LUALIB_API int luaL_loadstring(lua_State *L, const char *s)
+{
+ return luaL_loadbuffer(L, s, strlen(s), s);
+}
+
+/* -- Dump bytecode ------------------------------------------------------- */
+
+LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data)
+{
+ cTValue *o = L->top-1;
+ api_check(L, L->top > L->base);
+ if (tvisfunc(o) && isluafunc(funcV(o)))
+ return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0);
+ else
+ return 1;
+}
+