You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2015/07/29 01:39:51 UTC

[13/62] [abbrv] trafficserver git commit: TS-3783 TS-3030 Add luajit v2.0.4 as a subtree

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_opt_mem.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_opt_mem.c b/lib/luajit/src/lj_opt_mem.c
new file mode 100644
index 0000000..a4d96fc
--- /dev/null
+++ b/lib/luajit/src/lj_opt_mem.c
@@ -0,0 +1,916 @@
+/*
+** Memory access optimizations.
+** AA: Alias Analysis using high-level semantic disambiguation.
+** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
+** DSE: Dead-Store Elimination.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_mem_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref)		(&J->cur.ir[(ref)])
+#define fins		(&J->fold.ins)
+#define fleft		(&J->fold.left)
+#define fright		(&J->fold.right)
+
+/*
+** Caveat #1: return value is not always a TRef -- only use with tref_ref().
+** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
+*/
+
+/* Return values from alias analysis. */
+typedef enum {
+  ALIAS_NO,	/* The two refs CANNOT alias (exact). */
+  ALIAS_MAY,	/* The two refs MAY alias (inexact). */
+  ALIAS_MUST	/* The two refs MUST alias (exact). */
+} AliasRet;
+
+/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
+
+/* Simplified escape analysis: check for intervening stores. */
+static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
+{
+  IRRef ref = (IRRef)(ir - J->cur.ir);  /* The ref that might be stored. */
+  for (ir++; ir < stop; ir++)
+    if (ir->op2 == ref &&
+	(ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
+	 ir->o == IR_USTORE || ir->o == IR_FSTORE))
+      return ALIAS_MAY;  /* Reference was stored and might alias. */
+  return ALIAS_NO;  /* Reference was not stored. */
+}
+
+/* Alias analysis for two different table references. */
+static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
+{
+  IRIns *taba = IR(ta), *tabb = IR(tb);
+  int newa, newb;
+  lua_assert(ta != tb);
+  lua_assert(irt_istab(taba->t) && irt_istab(tabb->t));
+  /* Disambiguate new allocations. */
+  newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
+  newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
+  if (newa && newb)
+    return ALIAS_NO;  /* Two different allocations never alias. */
+  if (newb) {  /* At least one allocation? */
+    IRIns *tmp = taba; taba = tabb; tabb = tmp;
+  } else if (!newa) {
+    return ALIAS_MAY;  /* Anything else: we just don't know. */
+  }
+  return aa_escape(J, taba, tabb);
+}
+
+/* Alias analysis for array and hash access using key-based disambiguation. */
+static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
+{
+  IRRef ka = refa->op2;
+  IRRef kb = refb->op2;
+  IRIns *keya, *keyb;
+  IRRef ta, tb;
+  if (refa == refb)
+    return ALIAS_MUST;  /* Shortcut for same refs. */
+  keya = IR(ka);
+  if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
+  keyb = IR(kb);
+  if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
+  ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
+  tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
+  if (ka == kb) {
+    /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
+    if (ta == tb)
+      return ALIAS_MUST;  /* Same key, same table. */
+    else
+      return aa_table(J, ta, tb);  /* Same key, possibly different table. */
+  }
+  if (irref_isk(ka) && irref_isk(kb))
+    return ALIAS_NO;  /* Different constant keys. */
+  if (refa->o == IR_AREF) {
+    /* Disambiguate array references based on index arithmetic. */
+    int32_t ofsa = 0, ofsb = 0;
+    IRRef basea = ka, baseb = kb;
+    lua_assert(refb->o == IR_AREF);
+    /* Gather base and offset from t[base] or t[base+-ofs]. */
+    if (keya->o == IR_ADD && irref_isk(keya->op2)) {
+      basea = keya->op1;
+      ofsa = IR(keya->op2)->i;
+      if (basea == kb && ofsa != 0)
+	return ALIAS_NO;  /* t[base+-ofs] vs. t[base]. */
+    }
+    if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
+      baseb = keyb->op1;
+      ofsb = IR(keyb->op2)->i;
+      if (ka == baseb && ofsb != 0)
+	return ALIAS_NO;  /* t[base] vs. t[base+-ofs]. */
+    }
+    if (basea == baseb && ofsa != ofsb)
+      return ALIAS_NO;  /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
+  } else {
+    /* Disambiguate hash references based on the type of their keys. */
+    lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
+	       (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF));
+    if (!irt_sametype(keya->t, keyb->t))
+      return ALIAS_NO;  /* Different key types. */
+  }
+  if (ta == tb)
+    return ALIAS_MAY;  /* Same table, cannot disambiguate keys. */
+  else
+    return aa_table(J, ta, tb);  /* Try to disambiguate tables. */
+}
+
+/* Array and hash load forwarding. */
+static TRef fwd_ahload(jit_State *J, IRRef xref)
+{
+  IRIns *xr = IR(xref);
+  IRRef lim = xref;  /* Search limit. */
+  IRRef ref;
+
+  /* Search for conflicting stores. */
+  ref = J->chain[fins->o+IRDELTA_L2S];
+  while (ref > xref) {
+    IRIns *store = IR(ref);
+    switch (aa_ahref(J, xr, IR(store->op1))) {
+    case ALIAS_NO:   break;  /* Continue searching. */
+    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
+    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
+    }
+    ref = store->prev;
+  }
+
+  /* No conflicting store (yet): const-fold loads from allocations. */
+  {
+    IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
+    IRRef tab = ir->op1;
+    ir = IR(tab);
+    if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) {
+      /* A NEWREF with a number key may end up pointing to the array part.
+      ** But it's referenced from HSTORE and not found in the ASTORE chain.
+      ** For now simply consider this a conflict without forwarding anything.
+      */
+      if (xr->o == IR_AREF) {
+	IRRef ref2 = J->chain[IR_NEWREF];
+	while (ref2 > tab) {
+	  IRIns *newref = IR(ref2);
+	  if (irt_isnum(IR(newref->op2)->t))
+	    goto cselim;
+	  ref2 = newref->prev;
+	}
+      }
+      /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
+      ** But the above search for conflicting stores was limited by xref.
+      ** So continue searching, limited by the TNEW/TDUP. Store forwarding
+      ** is ok, too. A conflict does NOT limit the search for a matching load.
+      */
+      while (ref > tab) {
+	IRIns *store = IR(ref);
+	switch (aa_ahref(J, xr, IR(store->op1))) {
+	case ALIAS_NO:   break;  /* Continue searching. */
+	case ALIAS_MAY:  goto cselim;  /* Conflicting store. */
+	case ALIAS_MUST: return store->op2;  /* Store forwarding. */
+	}
+	ref = store->prev;
+      }
+      lua_assert(ir->o != IR_TNEW || irt_isnil(fins->t));
+      if (irt_ispri(fins->t)) {
+	return TREF_PRI(irt_type(fins->t));
+      } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) ||
+		 irt_isstr(fins->t)) {
+	TValue keyv;
+	cTValue *tv;
+	IRIns *key = IR(xr->op2);
+	if (key->o == IR_KSLOT) key = IR(key->op1);
+	lj_ir_kvalue(J->L, &keyv, key);
+	tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
+	lua_assert(itype2irt(tv) == irt_type(fins->t));
+	if (irt_isnum(fins->t))
+	  return lj_ir_knum_u64(J, tv->u64);
+	else if (LJ_DUALNUM && irt_isint(fins->t))
+	  return lj_ir_kint(J, intV(tv));
+	else
+	  return lj_ir_kstr(J, strV(tv));
+      }
+      /* Othwerwise: don't intern as a constant. */
+    }
+  }
+
+cselim:
+  /* Try to find a matching load. Below the conflicting store, if any. */
+  ref = J->chain[fins->o];
+  while (ref > lim) {
+    IRIns *load = IR(ref);
+    if (load->op1 == xref)
+      return ref;  /* Load forwarding. */
+    ref = load->prev;
+  }
+  return 0;  /* Conflict or no match. */
+}
+
+/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
+static TRef fwd_aload_reassoc(jit_State *J)
+{
+  IRIns *irx = IR(fins->op1);
+  IRIns *key = IR(irx->op2);
+  if (key->o == IR_ADD && irref_isk(key->op2)) {
+    IRIns *add2 = IR(key->op1);
+    if (add2->o == IR_ADD && irref_isk(add2->op2) &&
+	IR(key->op2)->i == -IR(add2->op2)->i) {
+      IRRef ref = J->chain[IR_AREF];
+      IRRef lim = add2->op1;
+      if (irx->op1 > lim) lim = irx->op1;
+      while (ref > lim) {
+	IRIns *ir = IR(ref);
+	if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
+	  return fwd_ahload(J, ref);
+	ref = ir->prev;
+      }
+    }
+  }
+  return 0;
+}
+
+/* ALOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
+{
+  IRRef ref;
+  if ((ref = fwd_ahload(J, fins->op1)) ||
+      (ref = fwd_aload_reassoc(J)))
+    return ref;
+  return EMITFOLD;
+}
+
+/* HLOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
+{
+  IRRef ref = fwd_ahload(J, fins->op1);
+  if (ref)
+    return ref;
+  return EMITFOLD;
+}
+
+/* HREFK forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
+{
+  IRRef tab = fleft->op1;
+  IRRef ref = J->chain[IR_NEWREF];
+  while (ref > tab) {
+    IRIns *newref = IR(ref);
+    if (tab == newref->op1) {
+      if (fright->op1 == newref->op2)
+	return ref;  /* Forward from NEWREF. */
+      else
+	goto docse;
+    } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
+      goto docse;
+    }
+    ref = newref->prev;
+  }
+  /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
+  if (IR(tab)->o == IR_TDUP)
+    fins->t.irt &= ~IRT_GUARD;  /* Drop HREFK guard. */
+docse:
+  return CSEFOLD;
+}
+
+/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
+int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
+{
+  IRRef lim = fins->op1;  /* Search limit. */
+  IRRef ref;
+
+  /* The key for an ASTORE may end up in the hash part after a NEWREF. */
+  if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
+    ref = J->chain[IR_ASTORE];
+    while (ref > lim) {
+      if (ref < J->chain[IR_NEWREF])
+	return 0;  /* Conflict. */
+      ref = IR(ref)->prev;
+    }
+  }
+
+  /* Search for conflicting stores. */
+  ref = J->chain[IR_HSTORE];
+  while (ref > lim) {
+    IRIns *store = IR(ref);
+    if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
+      return 0;  /* Conflict. */
+    ref = store->prev;
+  }
+
+  return 1;  /* No conflict. Can fold to niltv. */
+}
+
+/* Check whether there's no aliasing NEWREF for the left operand. */
+int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
+{
+  IRRef ta = fins->op1;
+  IRRef ref = J->chain[IR_NEWREF];
+  while (ref > lim) {
+    IRIns *newref = IR(ref);
+    if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
+      return 0;  /* Conflict. */
+    ref = newref->prev;
+  }
+  return 1;  /* No conflict. Can safely FOLD/CSE. */
+}
+
+/* ASTORE/HSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
+{
+  IRRef xref = fins->op1;  /* xREF reference. */
+  IRRef val = fins->op2;  /* Stored value reference. */
+  IRIns *xr = IR(xref);
+  IRRef1 *refp = &J->chain[fins->o];
+  IRRef ref = *refp;
+  while (ref > xref) {  /* Search for redundant or conflicting stores. */
+    IRIns *store = IR(ref);
+    switch (aa_ahref(J, xr, IR(store->op1))) {
+    case ALIAS_NO:
+      break;  /* Continue searching. */
+    case ALIAS_MAY:	/* Store to MAYBE the same location. */
+      if (store->op2 != val)  /* Conflict if the value is different. */
+	goto doemit;
+      break;  /* Otherwise continue searching. */
+    case ALIAS_MUST:	/* Store to the same location. */
+      if (store->op2 == val)  /* Same value: drop the new store. */
+	return DROPFOLD;
+      /* Different value: try to eliminate the redundant store. */
+      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
+	IRIns *ir;
+	/* Check for any intervening guards (includes conflicting loads). */
+	for (ir = IR(J->cur.nins-1); ir > store; ir--)
+	  if (irt_isguard(ir->t) || ir->o == IR_CALLL)
+	    goto doemit;  /* No elimination possible. */
+	/* Remove redundant store from chain and replace with NOP. */
+	*refp = store->prev;
+	store->o = IR_NOP;
+	store->t.irt = IRT_NIL;
+	store->op1 = store->op2 = 0;
+	store->prev = 0;
+	/* Now emit the new store instead. */
+      }
+      goto doemit;
+    }
+    ref = *(refp = &store->prev);
+  }
+doemit:
+  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- ULOAD forwarding ---------------------------------------------------- */
+
+/* The current alias analysis for upvalues is very simplistic. It only
+** disambiguates between the unique upvalues of the same function.
+** This is good enough for now, since most upvalues are read-only.
+**
+** A more precise analysis would be feasible with the help of the parser:
+** generate a unique key for every upvalue, even across all prototypes.
+** Lacking a realistic use-case, it's unclear whether this is beneficial.
+*/
+static AliasRet aa_uref(IRIns *refa, IRIns *refb)
+{
+  if (refa->o != refb->o)
+    return ALIAS_NO;  /* Different UREFx type. */
+  if (refa->op1 == refb->op1) {  /* Same function. */
+    if (refa->op2 == refb->op2)
+      return ALIAS_MUST;  /* Same function, same upvalue idx. */
+    else
+      return ALIAS_NO;  /* Same function, different upvalue idx. */
+  } else {  /* Different functions, check disambiguation hash values. */
+    if (((refa->op2 ^ refb->op2) & 0xff))
+      return ALIAS_NO;  /* Upvalues with different hash values cannot alias. */
+    else
+      return ALIAS_MAY;  /* No conclusion can be drawn for same hash value. */
+  }
+}
+
+/* ULOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
+{
+  IRRef uref = fins->op1;
+  IRRef lim = REF_BASE;  /* Search limit. */
+  IRIns *xr = IR(uref);
+  IRRef ref;
+
+  /* Search for conflicting stores. */
+  ref = J->chain[IR_USTORE];
+  while (ref > lim) {
+    IRIns *store = IR(ref);
+    switch (aa_uref(xr, IR(store->op1))) {
+    case ALIAS_NO:   break;  /* Continue searching. */
+    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
+    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
+    }
+    ref = store->prev;
+  }
+
+cselim:
+  /* Try to find a matching load. Below the conflicting store, if any. */
+
+  ref = J->chain[IR_ULOAD];
+  while (ref > lim) {
+    IRIns *ir = IR(ref);
+    if (ir->op1 == uref ||
+	(IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
+      return ref;  /* Match for identical or equal UREFx (non-CSEable UREFO). */
+    ref = ir->prev;
+  }
+  return lj_ir_emit(J);
+}
+
+/* USTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
+{
+  IRRef xref = fins->op1;  /* xREF reference. */
+  IRRef val = fins->op2;  /* Stored value reference. */
+  IRIns *xr = IR(xref);
+  IRRef1 *refp = &J->chain[IR_USTORE];
+  IRRef ref = *refp;
+  while (ref > xref) {  /* Search for redundant or conflicting stores. */
+    IRIns *store = IR(ref);
+    switch (aa_uref(xr, IR(store->op1))) {
+    case ALIAS_NO:
+      break;  /* Continue searching. */
+    case ALIAS_MAY:	/* Store to MAYBE the same location. */
+      if (store->op2 != val)  /* Conflict if the value is different. */
+	goto doemit;
+      break;  /* Otherwise continue searching. */
+    case ALIAS_MUST:	/* Store to the same location. */
+      if (store->op2 == val)  /* Same value: drop the new store. */
+	return DROPFOLD;
+      /* Different value: try to eliminate the redundant store. */
+      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
+	IRIns *ir;
+	/* Check for any intervening guards (includes conflicting loads). */
+	for (ir = IR(J->cur.nins-1); ir > store; ir--)
+	  if (irt_isguard(ir->t))
+	    goto doemit;  /* No elimination possible. */
+	/* Remove redundant store from chain and replace with NOP. */
+	*refp = store->prev;
+	store->o = IR_NOP;
+	store->t.irt = IRT_NIL;
+	store->op1 = store->op2 = 0;
+	store->prev = 0;
+	if (ref+1 < J->cur.nins &&
+	    store[1].o == IR_OBAR && store[1].op1 == xref) {
+	  IRRef1 *bp = &J->chain[IR_OBAR];
+	  IRIns *obar;
+	  for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
+	    bp = &obar->prev;
+	  /* Remove OBAR, too. */
+	  *bp = obar->prev;
+	  obar->o = IR_NOP;
+	  obar->t.irt = IRT_NIL;
+	  obar->op1 = obar->op2 = 0;
+	  obar->prev = 0;
+	}
+	/* Now emit the new store instead. */
+      }
+      goto doemit;
+    }
+    ref = *(refp = &store->prev);
+  }
+doemit:
+  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
+
+/* Alias analysis for field access.
+** Field loads are cheap and field stores are rare.
+** Simple disambiguation based on field types is good enough.
+*/
+static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
+{
+  if (refa->op2 != refb->op2)
+    return ALIAS_NO;  /* Different fields. */
+  if (refa->op1 == refb->op1)
+    return ALIAS_MUST;  /* Same field, same object. */
+  else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
+    return aa_table(J, refa->op1, refb->op1);  /* Disambiguate tables. */
+  else
+    return ALIAS_MAY;  /* Same field, possibly different object. */
+}
+
+/* Only the loads for mutable fields end up here (see FOLD). */
+TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
+{
+  IRRef oref = fins->op1;  /* Object reference. */
+  IRRef fid = fins->op2;  /* Field ID. */
+  IRRef lim = oref;  /* Search limit. */
+  IRRef ref;
+
+  /* Search for conflicting stores. */
+  ref = J->chain[IR_FSTORE];
+  while (ref > oref) {
+    IRIns *store = IR(ref);
+    switch (aa_fref(J, fins, IR(store->op1))) {
+    case ALIAS_NO:   break;  /* Continue searching. */
+    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
+    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
+    }
+    ref = store->prev;
+  }
+
+  /* No conflicting store: const-fold field loads from allocations. */
+  if (fid == IRFL_TAB_META) {
+    IRIns *ir = IR(oref);
+    if (ir->o == IR_TNEW || ir->o == IR_TDUP)
+      return lj_ir_knull(J, IRT_TAB);
+  }
+
+cselim:
+  /* Try to find a matching load. Below the conflicting store, if any. */
+  return lj_opt_cselim(J, lim);
+}
+
+/* FSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
+{
+  IRRef fref = fins->op1;  /* FREF reference. */
+  IRRef val = fins->op2;  /* Stored value reference. */
+  IRIns *xr = IR(fref);
+  IRRef1 *refp = &J->chain[IR_FSTORE];
+  IRRef ref = *refp;
+  while (ref > fref) {  /* Search for redundant or conflicting stores. */
+    IRIns *store = IR(ref);
+    switch (aa_fref(J, xr, IR(store->op1))) {
+    case ALIAS_NO:
+      break;  /* Continue searching. */
+    case ALIAS_MAY:
+      if (store->op2 != val)  /* Conflict if the value is different. */
+	goto doemit;
+      break;  /* Otherwise continue searching. */
+    case ALIAS_MUST:
+      if (store->op2 == val)  /* Same value: drop the new store. */
+	return DROPFOLD;
+      /* Different value: try to eliminate the redundant store. */
+      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
+	IRIns *ir;
+	/* Check for any intervening guards or conflicting loads. */
+	for (ir = IR(J->cur.nins-1); ir > store; ir--)
+	  if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
+	    goto doemit;  /* No elimination possible. */
+	/* Remove redundant store from chain and replace with NOP. */
+	*refp = store->prev;
+	store->o = IR_NOP;
+	store->t.irt = IRT_NIL;
+	store->op1 = store->op2 = 0;
+	store->prev = 0;
+	/* Now emit the new store instead. */
+      }
+      goto doemit;
+    }
+    ref = *(refp = &store->prev);
+  }
+doemit:
+  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
+
+/* Find cdata allocation for a reference (if any). */
+static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
+{
+  while (ir->o == IR_ADD) {
+    if (!irref_isk(ir->op1)) {
+      IRIns *ir1 = aa_findcnew(J, IR(ir->op1));  /* Left-recursion. */
+      if (ir1) return ir1;
+    }
+    if (irref_isk(ir->op2)) return NULL;
+    ir = IR(ir->op2);  /* Flatten right-recursion. */
+  }
+  return ir->o == IR_CNEW ? ir : NULL;
+}
+
+/* Alias analysis for two cdata allocations. */
+static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
+{
+  IRIns *cnewa = aa_findcnew(J, refa);
+  IRIns *cnewb = aa_findcnew(J, refb);
+  if (cnewa == cnewb)
+    return ALIAS_MAY;  /* Same allocation or neither is an allocation. */
+  if (cnewa && cnewb)
+    return ALIAS_NO;  /* Two different allocations never alias. */
+  if (cnewb) { cnewa = cnewb; refb = refa; }
+  return aa_escape(J, cnewa, refb);
+}
+
+/* Alias analysis for XLOAD/XSTORE. */
+static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
+{
+  ptrdiff_t ofsa = 0, ofsb = 0;
+  IRIns *refb = IR(xb->op1);
+  IRIns *basea = refa, *baseb = refb;
+  if (refa == refb && irt_sametype(xa->t, xb->t))
+    return ALIAS_MUST;  /* Shortcut for same refs with identical type. */
+  /* Offset-based disambiguation. */
+  if (refa->o == IR_ADD && irref_isk(refa->op2)) {
+    IRIns *irk = IR(refa->op2);
+    basea = IR(refa->op1);
+    ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+					    (ptrdiff_t)irk->i;
+  }
+  if (refb->o == IR_ADD && irref_isk(refb->op2)) {
+    IRIns *irk = IR(refb->op2);
+    baseb = IR(refb->op1);
+    ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+					    (ptrdiff_t)irk->i;
+  }
+  /* Treat constified pointers like base vs. base+offset. */
+  if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
+    ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
+    baseb = basea;
+  }
+  /* This implements (very) strict aliasing rules.
+  ** Different types do NOT alias, except for differences in signedness.
+  ** Type punning through unions is allowed (but forces a reload).
+  */
+  if (basea == baseb) {
+    ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
+    if (ofsa == ofsb) {
+      if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
+	return ALIAS_MUST;  /* Same-sized, same-kind. May need to convert. */
+    } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
+      return ALIAS_NO;  /* Non-overlapping base+-o1 vs. base+-o2. */
+    }
+    /* NYI: extract, extend or reinterpret bits (int <-> fp). */
+    return ALIAS_MAY;  /* Overlapping or type punning: force reload. */
+  }
+  if (!irt_sametype(xa->t, xb->t) &&
+      !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
+	((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
+    return ALIAS_NO;
+  /* NYI: structural disambiguation. */
+  return aa_cnew(J, basea, baseb);  /* Try to disambiguate allocations. */
+}
+
+/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
+static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
+{
+  IRRef ref = J->chain[op];
+  IRRef lim = op1;
+  if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
+  while (ref > lim) {
+    IRIns *ir = IR(ref);
+    if (ir->op1 == op1 && ir->op2 == op2)
+      return ref;
+    ref = ir->prev;
+  }
+  return 0;
+}
+
+/* Reassociate index references. */
+static IRRef reassoc_xref(jit_State *J, IRIns *ir)
+{
+  ptrdiff_t ofs = 0;
+  if (ir->o == IR_ADD && irref_isk(ir->op2)) {  /* Get constant offset. */
+    IRIns *irk = IR(ir->op2);
+    ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+					   (ptrdiff_t)irk->i;
+    ir = IR(ir->op1);
+  }
+  if (ir->o == IR_ADD) {  /* Add of base + index. */
+    /* Index ref > base ref for loop-carried dependences. Only check op1. */
+    IRIns *ir2, *ir1 = IR(ir->op1);
+    int32_t shift = 0;
+    IRRef idxref;
+    /* Determine index shifts. Don't bother with IR_MUL here. */
+    if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
+      shift = IR(ir1->op2)->i;
+    else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
+      shift = 1;
+    else
+      ir1 = ir;
+    ir2 = IR(ir1->op1);
+    /* A non-reassociated add. Must be a loop-carried dependence. */
+    if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
+      ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
+    else
+      return 0;
+    idxref = ir2->op1;
+    /* Try to CSE the reassociated chain. Give up if not found. */
+    if (ir1 != ir &&
+	!(idxref = reassoc_trycse(J, ir1->o, idxref,
+				  ir1->o == IR_BSHL ? ir1->op2 : idxref)))
+      return 0;
+    if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
+      return 0;
+    if (ofs != 0) {
+      IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
+      if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
+	return 0;
+    }
+    return idxref;  /* Success, found a reassociated index reference. Phew. */
+  }
+  return 0;  /* Failure. */
+}
+
+/* XLOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
+{
+  IRRef xref = fins->op1;
+  IRIns *xr = IR(xref);
+  IRRef lim = xref;  /* Search limit. */
+  IRRef ref;
+
+  if ((fins->op2 & IRXLOAD_READONLY))
+    goto cselim;
+  if ((fins->op2 & IRXLOAD_VOLATILE))
+    goto doemit;
+
+  /* Search for conflicting stores. */
+  ref = J->chain[IR_XSTORE];
+retry:
+  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
+  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
+  while (ref > lim) {
+    IRIns *store = IR(ref);
+    switch (aa_xref(J, xr, fins, store)) {
+    case ALIAS_NO:   break;  /* Continue searching. */
+    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
+    case ALIAS_MUST:
+      /* Emit conversion if the loaded type doesn't match the forwarded type. */
+      if (!irt_sametype(fins->t, IR(store->op2)->t)) {
+	IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
+	if (dt == IRT_I8 || dt == IRT_I16) {  /* Trunc + sign-extend. */
+	  st = dt | IRCONV_SEXT;
+	  dt = IRT_INT;
+	} else if (dt == IRT_U8 || dt == IRT_U16) {  /* Trunc + zero-extend. */
+	  st = dt;
+	  dt = IRT_INT;
+	}
+	fins->ot = IRT(IR_CONV, dt);
+	fins->op1 = store->op2;
+	fins->op2 = (dt<<5)|st;
+	return RETRYFOLD;
+      }
+      return store->op2;  /* Store forwarding. */
+    }
+    ref = store->prev;
+  }
+
+cselim:
+  /* Try to find a matching load. Below the conflicting store, if any. */
+  ref = J->chain[IR_XLOAD];
+  while (ref > lim) {
+    /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
+    if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
+      return ref;
+    ref = IR(ref)->prev;
+  }
+
+  /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
+  if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
+      xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
+    ref = J->chain[IR_XSTORE];
+    while (ref > lim)  /* Skip stores that have already been checked. */
+      ref = IR(ref)->prev;
+    lim = xref;
+    xr = IR(xref);
+    goto retry;  /* Retry with the reassociated reference. */
+  }
+doemit:
+  return EMITFOLD;
+}
+
+/* XSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
+{
+  IRRef xref = fins->op1;
+  IRIns *xr = IR(xref);
+  IRRef lim = xref;  /* Search limit. */
+  IRRef val = fins->op2;  /* Stored value reference. */
+  IRRef1 *refp = &J->chain[IR_XSTORE];
+  IRRef ref = *refp;
+  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
+  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
+  if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
+  while (ref > lim) {  /* Search for redundant or conflicting stores. */
+    IRIns *store = IR(ref);
+    switch (aa_xref(J, xr, fins, store)) {
+    case ALIAS_NO:
+      break;  /* Continue searching. */
+    case ALIAS_MAY:
+      if (store->op2 != val)  /* Conflict if the value is different. */
+	goto doemit;
+      break;  /* Otherwise continue searching. */
+    case ALIAS_MUST:
+      if (store->op2 == val)  /* Same value: drop the new store. */
+	return DROPFOLD;
+      /* Different value: try to eliminate the redundant store. */
+      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
+	IRIns *ir;
+	/* Check for any intervening guards or any XLOADs (no AA performed). */
+	for (ir = IR(J->cur.nins-1); ir > store; ir--)
+	  if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
+	    goto doemit;  /* No elimination possible. */
+	/* Remove redundant store from chain and replace with NOP. */
+	*refp = store->prev;
+	store->o = IR_NOP;
+	store->t.irt = IRT_NIL;
+	store->op1 = store->op2 = 0;
+	store->prev = 0;
+	/* Now emit the new store instead. */
+      }
+      goto doemit;
+    }
+    ref = *(refp = &store->prev);
+  }
+doemit:
+  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- Forwarding of lj_tab_len -------------------------------------------- */
+
+/* This is rather simplistic right now, but better than nothing. */
+TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
+{
+  IRRef tab = fins->op1;  /* Table reference. */
+  IRRef lim = tab;  /* Search limit. */
+  IRRef ref;
+
+  /* Any ASTORE is a conflict and limits the search. */
+  if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
+
+  /* Search for conflicting HSTORE with numeric key. */
+  ref = J->chain[IR_HSTORE];
+  while (ref > lim) {
+    IRIns *store = IR(ref);
+    IRIns *href = IR(store->op1);
+    IRIns *key = IR(href->op2);
+    if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
+      lim = ref;  /* Conflicting store found, limits search for TLEN. */
+      break;
+    }
+    ref = store->prev;
+  }
+
+  /* Try to find a matching load. Below the conflicting store, if any. */
+  return lj_opt_cselim(J, lim);
+}
+
+/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
+
+/* Check whether the previous value for a table store is non-nil.
+** This can be derived either from a previous store or from a previous
+** load (because all loads from tables perform a type check).
+**
+** The result of the analysis can be used to avoid the metatable check
+** and the guard against HREF returning niltv. Both of these are cheap,
+** so let's not spend too much effort on the analysis.
+**
+** A result of 1 is exact: previous value CANNOT be nil.
+** A result of 0 is inexact: previous value MAY be nil.
+*/
+int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
+{
+  /* First check stores. */
+  IRRef ref = J->chain[loadop+IRDELTA_L2S];
+  while (ref > xref) {
+    IRIns *store = IR(ref);
+    if (store->op1 == xref) {  /* Same xREF. */
+      /* A nil store MAY alias, but a non-nil store MUST alias. */
+      return !irt_isnil(store->t);
+    } else if (irt_isnil(store->t)) {  /* Must check any nil store. */
+      IRRef skref = IR(store->op1)->op2;
+      IRRef xkref = IR(xref)->op2;
+      /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
+      if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
+	if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
+	  return 0;  /* A nil store with same const key or var key MAY alias. */
+	/* Different const keys CANNOT alias. */
+      }  /* Different key types CANNOT alias. */
+    }  /* Other non-nil stores MAY alias. */
+    ref = store->prev;
+  }
+
+  /* Check loads since nothing could be derived from stores. */
+  ref = J->chain[loadop];
+  while (ref > xref) {
+    IRIns *load = IR(ref);
+    if (load->op1 == xref) {  /* Same xREF. */
+      /* A nil load MAY alias, but a non-nil load MUST alias. */
+      return !irt_isnil(load->t);
+    }  /* Other non-nil loads MAY alias. */
+    ref = load->prev;
+  }
+  return 0;  /* Nothing derived at all, previous value MAY be nil. */
+}
+
+/* ------------------------------------------------------------------------ */
+
+#undef IR
+#undef fins
+#undef fleft
+#undef fright
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_opt_narrow.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_opt_narrow.c b/lib/luajit/src/lj_opt_narrow.c
new file mode 100644
index 0000000..58b3763
--- /dev/null
+++ b/lib/luajit/src/lj_opt_narrow.c
@@ -0,0 +1,656 @@
+/*
+** NARROW: Narrowing of numbers to integers (double to int32_t).
+** STRIPOV: Stripping of overflow checks.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_narrow_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+
+/* Rationale for narrowing optimizations:
+**
+** Lua has only a single number type and this is a FP double by default.
+** Narrowing doubles to integers does not pay off for the interpreter on a
+** current-generation x86/x64 machine. Most FP operations need the same
+** amount of execution resources as their integer counterparts, except
+** with slightly longer latencies. Longer latencies are a non-issue for
+** the interpreter, since they are usually hidden by other overhead.
+**
+** The total CPU execution bandwidth is the sum of the bandwidth of the FP
+** and the integer units, because they execute in parallel. The FP units
+** have an equal or higher bandwidth than the integer units. Not using
+** them means losing execution bandwidth. Moving work away from them to
+** the already quite busy integer units is a losing proposition.
+**
+** The situation for JIT-compiled code is a bit different: the higher code
+** density makes the extra latencies much more visible. Tight loops expose
+** the latencies for updating the induction variables. Array indexing
+** requires narrowing conversions with high latencies and additional
+** guards (to check that the index is really an integer). And many common
+** optimizations only work on integers.
+**
+** One solution would be speculative, eager narrowing of all number loads.
+** This causes many problems, like losing -0 or the need to resolve type
+** mismatches between traces. It also effectively forces the integer type
+** to have overflow-checking semantics. This impedes many basic
+** optimizations and requires adding overflow checks to all integer
+** arithmetic operations (whereas FP arithmetics can do without).
+**
+** Always replacing an FP op with an integer op plus an overflow check is
+** counter-productive on a current-generation super-scalar CPU. Although
+** the overflow check branches are highly predictable, they will clog the
+** execution port for the branch unit and tie up reorder buffers. This is
+** turning a pure data-flow dependency into a different data-flow
+** dependency (with slightly lower latency) *plus* a control dependency.
+** In general, you don't want to do this since latencies due to data-flow
+** dependencies can be well hidden by out-of-order execution.
+**
+** A better solution is to keep all numbers as FP values and only narrow
+** when it's beneficial to do so. LuaJIT uses predictive narrowing for
+** induction variables and demand-driven narrowing for index expressions,
+** integer arguments and bit operations. Additionally it can eliminate or
+** hoist most of the resulting overflow checks. Regular arithmetic
+** computations are never narrowed to integers.
+**
+** The integer type in the IR has convenient wrap-around semantics and
+** ignores overflow. Extra operations have been added for
+** overflow-checking arithmetic (ADDOV/SUBOV) instead of an extra type.
+** Apart from reducing overall complexity of the compiler, this also
+** nicely solves the problem where you want to apply algebraic
+** simplifications to ADD, but not to ADDOV. And the x86/x64 assembler can
+** use lea instead of an add for integer ADD, but not for ADDOV (lea does
+** not affect the flags, but it helps to avoid register moves).
+**
+**
+** All of the above has to be reconsidered for architectures with slow FP
+** operations or without a hardware FPU. The dual-number mode of LuaJIT
+** addresses this issue. Arithmetic operations are performed on integers
+** as far as possible and overflow checks are added as needed.
+**
+** This implies that narrowing for integer arguments and bit operations
+** should also strip overflow checks, e.g. replace ADDOV with ADD. The
+** original overflow guards are weak and can be eliminated by DCE, if
+** there's no other use.
+**
+** A slight twist is that it's usually beneficial to use overflow-checked
+** integer arithmetics if all inputs are already integers. This is the only
+** change that affects the single-number mode, too.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref)			(&J->cur.ir[(ref)])
+#define fins			(&J->fold.ins)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b)	(lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+#define emitir_raw(ot, a, b)	(lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- Elimination of narrowing type conversions --------------------------- */
+
+/* Narrowing of index expressions and bit operations is demand-driven. The
+** trace recorder emits a narrowing type conversion (CONV.int.num or TOBIT)
+** in all of these cases (e.g. array indexing or string indexing). FOLD
+** already takes care of eliminating simple redundant conversions like
+** CONV.int.num(CONV.num.int(x)) ==> x.
+**
+** But the surrounding code is FP-heavy and arithmetic operations are
+** performed on FP numbers (for the single-number mode). Consider a common
+** example such as 'x=t[i+1]', with 'i' already an integer (due to induction
+** variable narrowing). The index expression would be recorded as
+**   CONV.int.num(ADD(CONV.num.int(i), 1))
+** which is clearly suboptimal.
+**
+** One can do better by recursively backpropagating the narrowing type
+** conversion across FP arithmetic operations. This turns FP ops into
+** their corresponding integer counterparts. Depending on the semantics of
+** the conversion they also need to check for overflow. Currently only ADD
+** and SUB are supported.
+**
+** The above example can be rewritten as
+**   ADDOV(CONV.int.num(CONV.num.int(i)), 1)
+** and then into ADDOV(i, 1) after folding of the conversions. The original
+** FP ops remain in the IR and are eliminated by DCE since all references to
+** them are gone.
+**
+** [In dual-number mode the trace recorder already emits ADDOV etc., but
+** this can be further reduced. See below.]
+**
+** Special care has to be taken to avoid narrowing across an operation
+** which is potentially operating on non-integral operands. One obvious
+** case is when an expression contains a non-integral constant, but ends
+** up as an integer index at runtime (like t[x+1.5] with x=0.5).
+**
+** Operations with two non-constant operands illustrate a similar problem
+** (like t[a+b] with a=1.5 and b=2.5). Backpropagation has to stop there,
+** unless it can be proven that either operand is integral (e.g. by CSEing
+** a previous conversion). As a not-so-obvious corollary this logic also
+** applies for a whole expression tree (e.g. t[(a+1)+(b+1)]).
+**
+** Correctness of the transformation is guaranteed by avoiding to expand
+** the tree by adding more conversions than the one we would need to emit
+** if not backpropagating. TOBIT employs a more optimistic rule, because
+** the conversion has special semantics, designed to make the life of the
+** compiler writer easier. ;-)
+**
+** Using on-the-fly backpropagation of an expression tree doesn't work
+** because it's unknown whether the transform is correct until the end.
+** This either requires IR rollback and cache invalidation for every
+** subtree or a two-pass algorithm. The former didn't work out too well,
+** so the code now combines a recursive collector with a stack-based
+** emitter.
+**
+** [A recursive backpropagation algorithm with backtracking, employing
+** skip-list lookup and round-robin caching, emitting stack operations
+** on-the-fly for a stack-based interpreter -- and all of that in a meager
+** kilobyte? Yep, compilers are a great treasure chest. Throw away your
+** textbooks and read the codebase of a compiler today!]
+**
+** There's another optimization opportunity for array indexing: it's
+** always accompanied by an array bounds-check. The outermost overflow
+** check may be delegated to the ABC operation. This works because ABC is
+** an unsigned comparison and wrap-around due to overflow creates negative
+** numbers.
+**
+** But this optimization is only valid for constants that cannot overflow
+** an int32_t into the range of valid array indexes [0..2^27+1). A check
+** for +-2^30 is safe since -2^31 - 2^30 wraps to 2^30 and 2^31-1 + 2^30
+** wraps to -2^30-1.
+**
+** It's also good enough in practice, since e.g. t[i+1] or t[i-10] are
+** quite common. So the above example finally ends up as ADD(i, 1)!
+**
+** Later on, the assembler is able to fuse the whole array reference and
+** the ADD into the memory operands of loads and other instructions. This
+** is why LuaJIT is able to generate very pretty (and fast) machine code
+** for array indexing. And that, my dear, concludes another story about
+** one of the hidden secrets of LuaJIT ...
+*/
+
+/* Maximum backpropagation depth and maximum stack size. */
+#define NARROW_MAX_BACKPROP	100
+#define NARROW_MAX_STACK	256
+
+/* The stack machine has a 32 bit instruction format: [IROpT | IRRef1]
+** The lower 16 bits hold a reference (or 0). The upper 16 bits hold
+** the IR opcode + type or one of the following special opcodes:
+*/
+enum {
+  NARROW_REF,		/* Push ref. */
+  NARROW_CONV,		/* Push conversion of ref. */
+  NARROW_SEXT,		/* Push sign-extension of ref. */
+  NARROW_INT		/* Push KINT ref. The next code holds an int32_t. */
+};
+
+typedef uint32_t NarrowIns;
+
+#define NARROWINS(op, ref)	(((op) << 16) + (ref))
+#define narrow_op(ins)		((IROpT)((ins) >> 16))
+#define narrow_ref(ins)		((IRRef1)(ins))
+
+/* Context used for narrowing of type conversions. */
+typedef struct NarrowConv {
+  jit_State *J;		/* JIT compiler state. */
+  NarrowIns *sp;	/* Current stack pointer. */
+  NarrowIns *maxsp;	/* Maximum stack pointer minus redzone. */
+  int lim;		/* Limit on the number of emitted conversions. */
+  IRRef mode;		/* Conversion mode (IRCONV_*). */
+  IRType t;		/* Destination type: IRT_INT or IRT_I64. */
+  NarrowIns stack[NARROW_MAX_STACK];  /* Stack holding stack-machine code. */
+} NarrowConv;
+
+/* Lookup a reference in the backpropagation cache. */
+static BPropEntry *narrow_bpc_get(jit_State *J, IRRef1 key, IRRef mode)
+{
+  ptrdiff_t i;
+  for (i = 0; i < BPROP_SLOTS; i++) {
+    BPropEntry *bp = &J->bpropcache[i];
+    /* Stronger checks are ok, too. */
+    if (bp->key == key && bp->mode >= mode &&
+	((bp->mode ^ mode) & IRCONV_MODEMASK) == 0)
+      return bp;
+  }
+  return NULL;
+}
+
+/* Add an entry to the backpropagation cache. */
+static void narrow_bpc_set(jit_State *J, IRRef1 key, IRRef1 val, IRRef mode)
+{
+  uint32_t slot = J->bpropslot;
+  BPropEntry *bp = &J->bpropcache[slot];
+  J->bpropslot = (slot + 1) & (BPROP_SLOTS-1);
+  bp->key = key;
+  bp->val = val;
+  bp->mode = mode;
+}
+
+/* Backpropagate overflow stripping. */
+static void narrow_stripov_backprop(NarrowConv *nc, IRRef ref, int depth)
+{
+  jit_State *J = nc->J;
+  IRIns *ir = IR(ref);
+  if (ir->o == IR_ADDOV || ir->o == IR_SUBOV ||
+      (ir->o == IR_MULOV && (nc->mode & IRCONV_CONVMASK) == IRCONV_ANY)) {
+    BPropEntry *bp = narrow_bpc_get(nc->J, ref, IRCONV_TOBIT);
+    if (bp) {
+      ref = bp->val;
+    } else if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
+      NarrowIns *savesp = nc->sp;
+      narrow_stripov_backprop(nc, ir->op1, depth);
+      if (nc->sp < nc->maxsp) {
+	narrow_stripov_backprop(nc, ir->op2, depth);
+	if (nc->sp < nc->maxsp) {
+	  *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref);
+	  return;
+	}
+      }
+      nc->sp = savesp;  /* Path too deep, need to backtrack. */
+    }
+  }
+  *nc->sp++ = NARROWINS(NARROW_REF, ref);
+}
+
+/* Backpropagate narrowing conversion. Return number of needed conversions. */
+static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth)
+{
+  jit_State *J = nc->J;
+  IRIns *ir = IR(ref);
+  IRRef cref;
+
+  if (nc->sp >= nc->maxsp) return 10;  /* Path too deep. */
+
+  /* Check the easy cases first. */
+  if (ir->o == IR_CONV && (ir->op2 & IRCONV_SRCMASK) == IRT_INT) {
+    if ((nc->mode & IRCONV_CONVMASK) <= IRCONV_ANY)
+      narrow_stripov_backprop(nc, ir->op1, depth+1);
+    else
+      *nc->sp++ = NARROWINS(NARROW_REF, ir->op1);  /* Undo conversion. */
+    if (nc->t == IRT_I64)
+      *nc->sp++ = NARROWINS(NARROW_SEXT, 0);  /* Sign-extend integer. */
+    return 0;
+  } else if (ir->o == IR_KNUM) {  /* Narrow FP constant. */
+    lua_Number n = ir_knum(ir)->n;
+    if ((nc->mode & IRCONV_CONVMASK) == IRCONV_TOBIT) {
+      /* Allows a wider range of constants. */
+      int64_t k64 = (int64_t)n;
+      if (n == (lua_Number)k64) {  /* Only if const doesn't lose precision. */
+	*nc->sp++ = NARROWINS(NARROW_INT, 0);
+	*nc->sp++ = (NarrowIns)k64;  /* But always truncate to 32 bits. */
+	return 0;
+      }
+    } else {
+      int32_t k = lj_num2int(n);
+      /* Only if constant is a small integer. */
+      if (checki16(k) && n == (lua_Number)k) {
+	*nc->sp++ = NARROWINS(NARROW_INT, 0);
+	*nc->sp++ = (NarrowIns)k;
+	return 0;
+      }
+    }
+    return 10;  /* Never narrow other FP constants (this is rare). */
+  }
+
+  /* Try to CSE the conversion. Stronger checks are ok, too. */
+  cref = J->chain[fins->o];
+  while (cref > ref) {
+    IRIns *cr = IR(cref);
+    if (cr->op1 == ref &&
+	(fins->o == IR_TOBIT ||
+	 ((cr->op2 & IRCONV_MODEMASK) == (nc->mode & IRCONV_MODEMASK) &&
+	  irt_isguard(cr->t) >= irt_isguard(fins->t)))) {
+      *nc->sp++ = NARROWINS(NARROW_REF, cref);
+      return 0;  /* Already there, no additional conversion needed. */
+    }
+    cref = cr->prev;
+  }
+
+  /* Backpropagate across ADD/SUB. */
+  if (ir->o == IR_ADD || ir->o == IR_SUB) {
+    /* Try cache lookup first. */
+    IRRef mode = nc->mode;
+    BPropEntry *bp;
+    /* Inner conversions need a stronger check. */
+    if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX && depth > 0)
+      mode += IRCONV_CHECK-IRCONV_INDEX;
+    bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode);
+    if (bp) {
+      *nc->sp++ = NARROWINS(NARROW_REF, bp->val);
+      return 0;
+    } else if (nc->t == IRT_I64) {
+      /* Try sign-extending from an existing (checked) conversion to int. */
+      mode = (IRT_INT<<5)|IRT_NUM|IRCONV_INDEX;
+      bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode);
+      if (bp) {
+	*nc->sp++ = NARROWINS(NARROW_REF, bp->val);
+	*nc->sp++ = NARROWINS(NARROW_SEXT, 0);
+	return 0;
+      }
+    }
+    if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
+      NarrowIns *savesp = nc->sp;
+      int count = narrow_conv_backprop(nc, ir->op1, depth);
+      count += narrow_conv_backprop(nc, ir->op2, depth);
+      if (count <= nc->lim) {  /* Limit total number of conversions. */
+	*nc->sp++ = NARROWINS(IRT(ir->o, nc->t), ref);
+	return count;
+      }
+      nc->sp = savesp;  /* Too many conversions, need to backtrack. */
+    }
+  }
+
+  /* Otherwise add a conversion. */
+  *nc->sp++ = NARROWINS(NARROW_CONV, ref);
+  return 1;
+}
+
+/* Emit the conversions collected during backpropagation. */
+static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc)
+{
+  /* The fins fields must be saved now -- emitir() overwrites them. */
+  IROpT guardot = irt_isguard(fins->t) ? IRTG(IR_ADDOV-IR_ADD, 0) : 0;
+  IROpT convot = fins->ot;
+  IRRef1 convop2 = fins->op2;
+  NarrowIns *next = nc->stack;  /* List of instructions from backpropagation. */
+  NarrowIns *last = nc->sp;
+  NarrowIns *sp = nc->stack;  /* Recycle the stack to store operands. */
+  while (next < last) {  /* Simple stack machine to process the ins. list. */
+    NarrowIns ref = *next++;
+    IROpT op = narrow_op(ref);
+    if (op == NARROW_REF) {
+      *sp++ = ref;
+    } else if (op == NARROW_CONV) {
+      *sp++ = emitir_raw(convot, ref, convop2);  /* Raw emit avoids a loop. */
+    } else if (op == NARROW_SEXT) {
+      lua_assert(sp >= nc->stack+1);
+      sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1],
+		      (IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
+    } else if (op == NARROW_INT) {
+      lua_assert(next < last);
+      *sp++ = nc->t == IRT_I64 ?
+	      lj_ir_kint64(J, (int64_t)(int32_t)*next++) :
+	      lj_ir_kint(J, *next++);
+    } else {  /* Regular IROpT. Pops two operands and pushes one result. */
+      IRRef mode = nc->mode;
+      lua_assert(sp >= nc->stack+2);
+      sp--;
+      /* Omit some overflow checks for array indexing. See comments above. */
+      if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) {
+	if (next == last && irref_isk(narrow_ref(sp[0])) &&
+	  (uint32_t)IR(narrow_ref(sp[0]))->i + 0x40000000u < 0x80000000u)
+	  guardot = 0;
+	else  /* Otherwise cache a stronger check. */
+	  mode += IRCONV_CHECK-IRCONV_INDEX;
+      }
+      sp[-1] = emitir(op+guardot, sp[-1], sp[0]);
+      /* Add to cache. */
+      if (narrow_ref(ref))
+	narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode);
+    }
+  }
+  lua_assert(sp == nc->stack+1);
+  return nc->stack[0];
+}
+
+/* Narrow a type conversion of an arithmetic operation. */
+TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J)
+{
+  if ((J->flags & JIT_F_OPT_NARROW)) {
+    NarrowConv nc;
+    nc.J = J;
+    nc.sp = nc.stack;
+    nc.maxsp = &nc.stack[NARROW_MAX_STACK-4];
+    nc.t = irt_type(fins->t);
+    if (fins->o == IR_TOBIT) {
+      nc.mode = IRCONV_TOBIT;  /* Used only in the backpropagation cache. */
+      nc.lim = 2;  /* TOBIT can use a more optimistic rule. */
+    } else {
+      nc.mode = fins->op2;
+      nc.lim = 1;
+    }
+    if (narrow_conv_backprop(&nc, fins->op1, 0) <= nc.lim)
+      return narrow_conv_emit(J, &nc);
+  }
+  return NEXTFOLD;
+}
+
+/* -- Narrowing of implicit conversions ----------------------------------- */
+
+/* Recursively strip overflow checks. */
+static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode)
+{
+  IRRef ref = tref_ref(tr);
+  IRIns *ir = IR(ref);
+  int op = ir->o;
+  if (op >= IR_ADDOV && op <= lastop) {
+    BPropEntry *bp = narrow_bpc_get(J, ref, mode);
+    if (bp) {
+      return TREF(bp->val, irt_t(IR(bp->val)->t));
+    } else {
+      IRRef op1 = ir->op1, op2 = ir->op2;  /* The IR may be reallocated. */
+      op1 = narrow_stripov(J, op1, lastop, mode);
+      op2 = narrow_stripov(J, op2, lastop, mode);
+      tr = emitir(IRT(op - IR_ADDOV + IR_ADD,
+		      ((mode & IRCONV_DSTMASK) >> IRCONV_DSH)), op1, op2);
+      narrow_bpc_set(J, ref, tref_ref(tr), mode);
+    }
+  } else if (LJ_64 && (mode & IRCONV_SEXT) && !irt_is64(ir->t)) {
+    tr = emitir(IRT(IR_CONV, IRT_INTP), tr, mode);
+  }
+  return tr;
+}
+
+/* Narrow array index. */
+TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr)
+{
+  IRIns *ir;
+  lua_assert(tref_isnumber(tr));
+  if (tref_isnum(tr))  /* Conversion may be narrowed, too. See above. */
+    return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX);
+  /* Omit some overflow checks for array indexing. See comments above. */
+  ir = IR(tref_ref(tr));
+  if ((ir->o == IR_ADDOV || ir->o == IR_SUBOV) && irref_isk(ir->op2) &&
+      (uint32_t)IR(ir->op2)->i + 0x40000000u < 0x80000000u)
+    return emitir(IRTI(ir->o - IR_ADDOV + IR_ADD), ir->op1, ir->op2);
+  return tr;
+}
+
+/* Narrow conversion to integer operand (overflow undefined). */
+TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr)
+{
+  if (tref_isstr(tr))
+    tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+  if (tref_isnum(tr))  /* Conversion may be narrowed, too. See above. */
+    return emitir(IRTI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_ANY);
+  if (!tref_isinteger(tr))
+    lj_trace_err(J, LJ_TRERR_BADTYPE);
+  /*
+  ** Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV.
+  ** Use IRCONV_TOBIT for the cache entries, since the semantics are the same.
+  */
+  return narrow_stripov(J, tr, IR_MULOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT);
+}
+
+/* Narrow conversion to bitop operand (overflow wrapped). */
+TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr)
+{
+  if (tref_isstr(tr))
+    tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+  if (tref_isnum(tr))  /* Conversion may be narrowed, too. See above. */
+    return emitir(IRTI(IR_TOBIT), tr, lj_ir_knum_tobit(J));
+  if (!tref_isinteger(tr))
+    lj_trace_err(J, LJ_TRERR_BADTYPE);
+  /*
+  ** Wrapped overflow semantics allow stripping of ADDOV and SUBOV.
+  ** MULOV cannot be stripped due to precision widening.
+  */
+  return narrow_stripov(J, tr, IR_SUBOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT);
+}
+
+#if LJ_HASFFI
+/* Narrow C array index (overflow undefined). */
+TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr)
+{
+  lua_assert(tref_isnumber(tr));
+  if (tref_isnum(tr))
+    return emitir(IRT(IR_CONV, IRT_INTP), tr,
+		  (IRT_INTP<<5)|IRT_NUM|IRCONV_TRUNC|IRCONV_ANY);
+  /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */
+  return narrow_stripov(J, tr, IR_MULOV,
+			LJ_64 ? ((IRT_INTP<<5)|IRT_INT|IRCONV_SEXT) :
+				((IRT_INTP<<5)|IRT_INT|IRCONV_TOBIT));
+}
+#endif
+
+/* -- Narrowing of arithmetic operators ----------------------------------- */
+
+/* Check whether a number fits into an int32_t (-0 is ok, too). */
+static int numisint(lua_Number n)
+{
+  return (n == (lua_Number)lj_num2int(n));
+}
+
+/* Narrowing of arithmetic operations. */
+TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
+			 TValue *vb, TValue *vc, IROp op)
+{
+  if (tref_isstr(rb)) {
+    rb = emitir(IRTG(IR_STRTO, IRT_NUM), rb, 0);
+    lj_strscan_num(strV(vb), vb);
+  }
+  if (tref_isstr(rc)) {
+    rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0);
+    lj_strscan_num(strV(vc), vc);
+  }
+  /* Must not narrow MUL in non-DUALNUM variant, because it loses -0. */
+  if ((op >= IR_ADD && op <= (LJ_DUALNUM ? IR_MUL : IR_SUB)) &&
+      tref_isinteger(rb) && tref_isinteger(rc) &&
+      numisint(lj_vm_foldarith(numberVnum(vb), numberVnum(vc),
+			       (int)op - (int)IR_ADD)))
+    return emitir(IRTGI((int)op - (int)IR_ADD + (int)IR_ADDOV), rb, rc);
+  if (!tref_isnum(rb)) rb = emitir(IRTN(IR_CONV), rb, IRCONV_NUM_INT);
+  if (!tref_isnum(rc)) rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+  return emitir(IRTN(op), rb, rc);
+}
+
+/* Narrowing of unary minus operator. */
+TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc)
+{
+  if (tref_isstr(rc)) {
+    rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0);
+    lj_strscan_num(strV(vc), vc);
+  }
+  if (tref_isinteger(rc)) {
+    if ((uint32_t)numberVint(vc) != 0x80000000u)
+      return emitir(IRTGI(IR_SUBOV), lj_ir_kint(J, 0), rc);
+    rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+  }
+  return emitir(IRTN(IR_NEG), rc, lj_ir_knum_neg(J));
+}
+
+/* Narrowing of modulo operator. */
+TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc)
+{
+  TRef tmp;
+  if (tvisstr(vc) && !lj_strscan_num(strV(vc), vc))
+    lj_trace_err(J, LJ_TRERR_BADTYPE);
+  if ((LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) &&
+      tref_isinteger(rb) && tref_isinteger(rc) &&
+      (tvisint(vc) ? intV(vc) != 0 : !tviszero(vc))) {
+    emitir(IRTGI(IR_NE), rc, lj_ir_kint(J, 0));
+    return emitir(IRTI(IR_MOD), rb, rc);
+  }
+  /* b % c ==> b - floor(b/c)*c */
+  rb = lj_ir_tonum(J, rb);
+  rc = lj_ir_tonum(J, rc);
+  tmp = emitir(IRTN(IR_DIV), rb, rc);
+  tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR);
+  tmp = emitir(IRTN(IR_MUL), tmp, rc);
+  return emitir(IRTN(IR_SUB), rb, tmp);
+}
+
+/* Narrowing of power operator or math.pow. */
+TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc)
+{
+  if (tvisstr(vc) && !lj_strscan_num(strV(vc), vc))
+    lj_trace_err(J, LJ_TRERR_BADTYPE);
+  /* Narrowing must be unconditional to preserve (-x)^i semantics. */
+  if (tvisint(vc) || numisint(numV(vc))) {
+    int checkrange = 0;
+    /* Split pow is faster for bigger exponents. But do this only for (+k)^i. */
+    if (tref_isk(rb) && (int32_t)ir_knum(IR(tref_ref(rb)))->u32.hi >= 0) {
+      int32_t k = numberVint(vc);
+      if (!(k >= -65536 && k <= 65536)) goto split_pow;
+      checkrange = 1;
+    }
+    if (!tref_isinteger(rc)) {
+      if (tref_isstr(rc))
+	rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0);
+      /* Guarded conversion to integer! */
+      rc = emitir(IRTGI(IR_CONV), rc, IRCONV_INT_NUM|IRCONV_CHECK);
+    }
+    if (checkrange && !tref_isk(rc)) {  /* Range guard: -65536 <= i <= 65536 */
+      TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536));
+      emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536));
+    }
+    return emitir(IRTN(IR_POW), rb, rc);
+  }
+split_pow:
+  /* FOLD covers most cases, but some are easier to do here. */
+  if (tref_isk(rb) && tvispone(ir_knum(IR(tref_ref(rb)))))
+    return rb;  /* 1 ^ x ==> 1 */
+  rc = lj_ir_tonum(J, rc);
+  if (tref_isk(rc) && ir_knum(IR(tref_ref(rc)))->n == 0.5)
+    return emitir(IRTN(IR_FPMATH), rb, IRFPM_SQRT);  /* x ^ 0.5 ==> sqrt(x) */
+  /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */
+  rb = emitir(IRTN(IR_FPMATH), rb, IRFPM_LOG2);
+  rc = emitir(IRTN(IR_MUL), rb, rc);
+  return emitir(IRTN(IR_FPMATH), rc, IRFPM_EXP2);
+}
+
+/* -- Predictive narrowing of induction variables ------------------------- */
+
+/* Narrow a single runtime value. */
+static int narrow_forl(jit_State *J, cTValue *o)
+{
+  if (tvisint(o)) return 1;
+  if (LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) return numisint(numV(o));
+  return 0;
+}
+
+/* Narrow the FORL index type by looking at the runtime values. */
+IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv)
+{
+  lua_assert(tvisnumber(&tv[FORL_IDX]) &&
+	     tvisnumber(&tv[FORL_STOP]) &&
+	     tvisnumber(&tv[FORL_STEP]));
+  /* Narrow only if the runtime values of start/stop/step are all integers. */
+  if (narrow_forl(J, &tv[FORL_IDX]) &&
+      narrow_forl(J, &tv[FORL_STOP]) &&
+      narrow_forl(J, &tv[FORL_STEP])) {
+    /* And if the loop index can't possibly overflow. */
+    lua_Number step = numberVnum(&tv[FORL_STEP]);
+    lua_Number sum = numberVnum(&tv[FORL_STOP]) + step;
+    if (0 <= step ? (sum <= 2147483647.0) : (sum >= -2147483648.0))
+      return IRT_INT;
+  }
+  return IRT_NUM;
+}
+
+#undef IR
+#undef fins
+#undef emitir
+#undef emitir_raw
+
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1f27b840/lib/luajit/src/lj_opt_sink.c
----------------------------------------------------------------------
diff --git a/lib/luajit/src/lj_opt_sink.c b/lib/luajit/src/lj_opt_sink.c
new file mode 100644
index 0000000..a98e9df
--- /dev/null
+++ b/lib/luajit/src/lj_opt_sink.c
@@ -0,0 +1,245 @@
+/*
+** SINK: Allocation Sinking and Store Sinking.
+** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_sink_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_target.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref)		(&J->cur.ir[(ref)])
+
+/* Check whether the store ref points to an eligible allocation. */
+static IRIns *sink_checkalloc(jit_State *J, IRIns *irs)
+{
+  IRIns *ir = IR(irs->op1);
+  if (!irref_isk(ir->op2))
+    return NULL;  /* Non-constant key. */
+  if (ir->o == IR_HREFK || ir->o == IR_AREF)
+    ir = IR(ir->op1);
+  else if (!(ir->o == IR_HREF || ir->o == IR_NEWREF ||
+	     ir->o == IR_FREF || ir->o == IR_ADD))
+    return NULL;  /* Unhandled reference type (for XSTORE). */
+  ir = IR(ir->op1);
+  if (!(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW))
+    return NULL;  /* Not an allocation. */
+  return ir;  /* Return allocation. */
+}
+
+/* Recursively check whether a value depends on a PHI. */
+static int sink_phidep(jit_State *J, IRRef ref)
+{
+  IRIns *ir = IR(ref);
+  if (irt_isphi(ir->t)) return 1;
+  if (ir->op1 >= REF_FIRST && sink_phidep(J, ir->op1)) return 1;
+  if (ir->op2 >= REF_FIRST && sink_phidep(J, ir->op2)) return 1;
+  return 0;
+}
+
+/* Check whether a value is a sinkable PHI or loop-invariant. */
+static int sink_checkphi(jit_State *J, IRIns *ira, IRRef ref)
+{
+  if (ref >= REF_FIRST) {
+    IRIns *ir = IR(ref);
+    if (irt_isphi(ir->t) || (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT &&
+			     irt_isphi(IR(ir->op1)->t))) {
+      ira->prev++;
+      return 1;  /* Sinkable PHI. */
+    }
+    /* Otherwise the value must be loop-invariant. */
+    return ref < J->loopref && !sink_phidep(J, ref);
+  }
+  return 1;  /* Constant (non-PHI). */
+}
+
+/* Mark non-sinkable allocations using single-pass backward propagation.
+**
+** Roots for the marking process are:
+** - Some PHIs or snapshots (see below).
+** - Non-PHI, non-constant values stored to PHI allocations.
+** - All guards.
+** - Any remaining loads not eliminated by store-to-load forwarding.
+** - Stores with non-constant keys.
+** - All stored values.
+*/
+static void sink_mark_ins(jit_State *J)
+{
+  IRIns *ir, *irlast = IR(J->cur.nins-1);
+  for (ir = irlast ; ; ir--) {
+    switch (ir->o) {
+    case IR_BASE:
+      return;  /* Finished. */
+    case IR_CALLL:  /* IRCALL_lj_tab_len */
+    case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR:
+      irt_setmark(IR(ir->op1)->t);  /* Mark ref for remaining loads. */
+      break;
+    case IR_FLOAD:
+      if (irt_ismarked(ir->t) || ir->op2 == IRFL_TAB_META)
+	irt_setmark(IR(ir->op1)->t);  /* Mark table for remaining loads. */
+      break;
+    case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
+      IRIns *ira = sink_checkalloc(J, ir);
+      if (!ira || (irt_isphi(ira->t) && !sink_checkphi(J, ira, ir->op2)))
+	irt_setmark(IR(ir->op1)->t);  /* Mark ineligible ref. */
+      irt_setmark(IR(ir->op2)->t);  /* Mark stored value. */
+      break;
+      }
+#if LJ_HASFFI
+    case IR_CNEWI:
+      if (irt_isphi(ir->t) &&
+	  (!sink_checkphi(J, ir, ir->op2) ||
+	   (LJ_32 && ir+1 < irlast && (ir+1)->o == IR_HIOP &&
+	    !sink_checkphi(J, ir, (ir+1)->op2))))
+	irt_setmark(ir->t);  /* Mark ineligible allocation. */
+      /* fallthrough */
+#endif
+    case IR_USTORE:
+      irt_setmark(IR(ir->op2)->t);  /* Mark stored value. */
+      break;
+#if LJ_HASFFI
+    case IR_CALLXS:
+#endif
+    case IR_CALLS:
+      irt_setmark(IR(ir->op1)->t);  /* Mark (potentially) stored values. */
+      break;
+    case IR_PHI: {
+      IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+      irl->prev = irr->prev = 0;  /* Clear PHI value counts. */
+      if (irl->o == irr->o &&
+	  (irl->o == IR_TNEW || irl->o == IR_TDUP ||
+	   (LJ_HASFFI && (irl->o == IR_CNEW || irl->o == IR_CNEWI))))
+	break;
+      irt_setmark(irl->t);
+      irt_setmark(irr->t);
+      break;
+      }
+    default:
+      if (irt_ismarked(ir->t) || irt_isguard(ir->t)) {  /* Propagate mark. */
+	if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
+	if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
+      }
+      break;
+    }
+  }
+}
+
+/* Mark all instructions referenced by a snapshot. */
+static void sink_mark_snap(jit_State *J, SnapShot *snap)
+{
+  SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+  MSize n, nent = snap->nent;
+  for (n = 0; n < nent; n++) {
+    IRRef ref = snap_ref(map[n]);
+    if (!irref_isk(ref))
+      irt_setmark(IR(ref)->t);
+  }
+}
+
+/* Iteratively remark PHI refs with differing marks or PHI value counts. */
+static void sink_remark_phi(jit_State *J)
+{
+  IRIns *ir;
+  int remark;
+  do {
+    remark = 0;
+    for (ir = IR(J->cur.nins-1); ir->o == IR_PHI; ir--) {
+      IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+      if (((irl->t.irt ^ irr->t.irt) & IRT_MARK))
+	remark = 1;
+      else if (irl->prev == irr->prev)
+	continue;
+      irt_setmark(IR(ir->op1)->t);
+      irt_setmark(IR(ir->op2)->t);
+    }
+  } while (remark);
+}
+
+/* Sweep instructions and tag sunken allocations and stores. */
+static void sink_sweep_ins(jit_State *J)
+{
+  IRIns *ir, *irfirst = IR(J->cur.nk);
+  for (ir = IR(J->cur.nins-1) ; ir >= irfirst; ir--) {
+    switch (ir->o) {
+    case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
+      IRIns *ira = sink_checkalloc(J, ir);
+      if (ira && !irt_ismarked(ira->t)) {
+	int delta = (int)(ir - ira);
+	ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta);
+      } else {
+	ir->prev = REGSP_INIT;
+      }
+      break;
+      }
+    case IR_NEWREF:
+      if (!irt_ismarked(IR(ir->op1)->t)) {
+	ir->prev = REGSP(RID_SINK, 0);
+      } else {
+	irt_clearmark(ir->t);
+	ir->prev = REGSP_INIT;
+      }
+      break;
+#if LJ_HASFFI
+    case IR_CNEW: case IR_CNEWI:
+#endif
+    case IR_TNEW: case IR_TDUP:
+      if (!irt_ismarked(ir->t)) {
+	ir->t.irt &= ~IRT_GUARD;
+	ir->prev = REGSP(RID_SINK, 0);
+	J->cur.sinktags = 1;  /* Signal present SINK tags to assembler. */
+      } else {
+	irt_clearmark(ir->t);
+	ir->prev = REGSP_INIT;
+      }
+      break;
+    case IR_PHI: {
+      IRIns *ira = IR(ir->op2);
+      if (!irt_ismarked(ira->t) &&
+	  (ira->o == IR_TNEW || ira->o == IR_TDUP ||
+	   (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI)))) {
+	ir->prev = REGSP(RID_SINK, 0);
+      } else {
+	ir->prev = REGSP_INIT;
+      }
+      break;
+      }
+    default:
+      irt_clearmark(ir->t);
+      ir->prev = REGSP_INIT;
+      break;
+    }
+  }
+}
+
+/* Allocation sinking and store sinking.
+**
+** 1. Mark all non-sinkable allocations.
+** 2. Then sink all remaining allocations and the related stores.
+*/
+void lj_opt_sink(jit_State *J)
+{
+  const uint32_t need = (JIT_F_OPT_SINK|JIT_F_OPT_FWD|
+			 JIT_F_OPT_DCE|JIT_F_OPT_CSE|JIT_F_OPT_FOLD);
+  if ((J->flags & need) == need &&
+      (J->chain[IR_TNEW] || J->chain[IR_TDUP] ||
+       (LJ_HASFFI && (J->chain[IR_CNEW] || J->chain[IR_CNEWI])))) {
+    if (!J->loopref)
+      sink_mark_snap(J, &J->cur.snap[J->cur.nsnap-1]);
+    sink_mark_ins(J);
+    if (J->loopref)
+      sink_remark_phi(J);
+    sink_sweep_ins(J);
+  }
+}
+
+#undef IR
+
+#endif