You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2018/08/10 23:32:01 UTC

[01/25] hadoop git commit: YARN-8568. Replace the deprecated zk-address property in the HA config example in ResourceManagerHA.md (bsteinbach via rkanter)

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 cc6f80f46 -> 71a358131


YARN-8568. Replace the deprecated zk-address property in the HA config example in ResourceManagerHA.md (bsteinbach via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8478732b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8478732b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8478732b

Branch: refs/heads/HDFS-12943
Commit: 8478732bb28e9e71061d6b4a043a3a1b5c688902
Parents: 3214cd7
Author: Robert Kanter <rk...@apache.org>
Authored: Wed Aug 8 15:08:55 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Wed Aug 8 15:08:55 2018 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md        | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8478732b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
index da9f5a0..ff97328 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
@@ -111,7 +111,7 @@ Here is the sample of minimal setup for RM failover.
   <value>master2:8088</value>
 </property>
 <property>
-  <name>yarn.resourcemanager.zk-address</name>
+  <name>hadoop.zk.address</name>
   <value>zk1:2181,zk2:2181,zk3:2181</value>
 </property>
 ```


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/25] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
new file mode 100644
index 0000000..85dd817
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
@@ -0,0 +1,160 @@
+/*! DataTables 1.10.7
+ * ©2008-2015 SpryMedia Ltd - datatables.net/license
+ */
+(function(Ea,Q,k){var P=function(h){function W(a){var b,c,e={};h.each(a,function(d){if((b=d.match(/^([^A-Z]+?)([A-Z])/))&&-1!=="a aa ai ao as b fn i m o s ".indexOf(b[1]+" "))c=d.replace(b[0],b[2].toLowerCase()),e[c]=d,"o"===b[1]&&W(a[d])});a._hungarianMap=e}function H(a,b,c){a._hungarianMap||W(a);var e;h.each(b,function(d){e=a._hungarianMap[d];if(e!==k&&(c||b[e]===k))"o"===e.charAt(0)?(b[e]||(b[e]={}),h.extend(!0,b[e],b[d]),H(a[e],b[e],c)):b[e]=b[d]})}function P(a){var b=m.defaults.oLanguage,c=a.sZeroRecords;
+!a.sEmptyTable&&(c&&"No data available in table"===b.sEmptyTable)&&E(a,a,"sZeroRecords","sEmptyTable");!a.sLoadingRecords&&(c&&"Loading..."===b.sLoadingRecords)&&E(a,a,"sZeroRecords","sLoadingRecords");a.sInfoThousands&&(a.sThousands=a.sInfoThousands);(a=a.sDecimal)&&db(a)}function eb(a){A(a,"ordering","bSort");A(a,"orderMulti","bSortMulti");A(a,"orderClasses","bSortClasses");A(a,"orderCellsTop","bSortCellsTop");A(a,"order","aaSorting");A(a,"orderFixed","aaSortingFixed");A(a,"paging","bPaginate");
+A(a,"pagingType","sPaginationType");A(a,"pageLength","iDisplayLength");A(a,"searching","bFilter");if(a=a.aoSearchCols)for(var b=0,c=a.length;b<c;b++)a[b]&&H(m.models.oSearch,a[b])}function fb(a){A(a,"orderable","bSortable");A(a,"orderData","aDataSort");A(a,"orderSequence","asSorting");A(a,"orderDataType","sortDataType");var b=a.aDataSort;b&&!h.isArray(b)&&(a.aDataSort=[b])}function gb(a){var a=a.oBrowser,b=h("<div/>").css({position:"absolute",top:0,left:0,height:1,width:1,overflow:"hidden"}).append(h("<div/>").css({position:"absolute",
+top:1,left:1,width:100,overflow:"scroll"}).append(h('<div class="test"/>').css({width:"100%",height:10}))).appendTo("body"),c=b.find(".test");a.bScrollOversize=100===c[0].offsetWidth;a.bScrollbarLeft=1!==Math.round(c.offset().left);b.remove()}function hb(a,b,c,e,d,f){var g,j=!1;c!==k&&(g=c,j=!0);for(;e!==d;)a.hasOwnProperty(e)&&(g=j?b(g,a[e],e,a):a[e],j=!0,e+=f);return g}function Fa(a,b){var c=m.defaults.column,e=a.aoColumns.length,c=h.extend({},m.models.oColumn,c,{nTh:b?b:Q.createElement("th"),sTitle:c.sTitle?
+c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[e],mData:c.mData?c.mData:e,idx:e});a.aoColumns.push(c);c=a.aoPreSearchCols;c[e]=h.extend({},m.models.oSearch,c[e]);ka(a,e,h(b).data())}function ka(a,b,c){var b=a.aoColumns[b],e=a.oClasses,d=h(b.nTh);if(!b.sWidthOrig){b.sWidthOrig=d.attr("width")||null;var f=(d.attr("style")||"").match(/width:\s*(\d+[pxem%]+)/);f&&(b.sWidthOrig=f[1])}c!==k&&null!==c&&(fb(c),H(m.defaults.column,c),c.mDataProp!==k&&!c.mData&&(c.mData=c.mDataProp),c.sType&&
+(b._sManualType=c.sType),c.className&&!c.sClass&&(c.sClass=c.className),h.extend(b,c),E(b,c,"sWidth","sWidthOrig"),c.iDataSort!==k&&(b.aDataSort=[c.iDataSort]),E(b,c,"aDataSort"));var g=b.mData,j=R(g),i=b.mRender?R(b.mRender):null,c=function(a){return"string"===typeof a&&-1!==a.indexOf("@")};b._bAttrSrc=h.isPlainObject(g)&&(c(g.sort)||c(g.type)||c(g.filter));b.fnGetData=function(a,b,c){var e=j(a,b,k,c);return i&&b?i(e,b,a,c):e};b.fnSetData=function(a,b,c){return S(g)(a,b,c)};"number"!==typeof g&&
+(a._rowReadObject=!0);a.oFeatures.bSort||(b.bSortable=!1,d.addClass(e.sSortableNone));a=-1!==h.inArray("asc",b.asSorting);c=-1!==h.inArray("desc",b.asSorting);!b.bSortable||!a&&!c?(b.sSortingClass=e.sSortableNone,b.sSortingClassJUI=""):a&&!c?(b.sSortingClass=e.sSortableAsc,b.sSortingClassJUI=e.sSortJUIAscAllowed):!a&&c?(b.sSortingClass=e.sSortableDesc,b.sSortingClassJUI=e.sSortJUIDescAllowed):(b.sSortingClass=e.sSortable,b.sSortingClassJUI=e.sSortJUI)}function X(a){if(!1!==a.oFeatures.bAutoWidth){var b=
+a.aoColumns;Ga(a);for(var c=0,e=b.length;c<e;c++)b[c].nTh.style.width=b[c].sWidth}b=a.oScroll;(""!==b.sY||""!==b.sX)&&Y(a);w(a,null,"column-sizing",[a])}function la(a,b){var c=Z(a,"bVisible");return"number"===typeof c[b]?c[b]:null}function $(a,b){var c=Z(a,"bVisible"),c=h.inArray(b,c);return-1!==c?c:null}function aa(a){return Z(a,"bVisible").length}function Z(a,b){var c=[];h.map(a.aoColumns,function(a,d){a[b]&&c.push(d)});return c}function Ha(a){var b=a.aoColumns,c=a.aoData,e=m.ext.type.detect,d,
+f,g,j,i,h,l,q,n;d=0;for(f=b.length;d<f;d++)if(l=b[d],n=[],!l.sType&&l._sManualType)l.sType=l._sManualType;else if(!l.sType){g=0;for(j=e.length;g<j;g++){i=0;for(h=c.length;i<h;i++){n[i]===k&&(n[i]=x(a,i,d,"type"));q=e[g](n[i],a);if(!q&&g!==e.length-1)break;if("html"===q)break}if(q){l.sType=q;break}}l.sType||(l.sType="string")}}function ib(a,b,c,e){var d,f,g,j,i,o,l=a.aoColumns;if(b)for(d=b.length-1;0<=d;d--){o=b[d];var q=o.targets!==k?o.targets:o.aTargets;h.isArray(q)||(q=[q]);f=0;for(g=q.length;f<
+g;f++)if("number"===typeof q[f]&&0<=q[f]){for(;l.length<=q[f];)Fa(a);e(q[f],o)}else if("number"===typeof q[f]&&0>q[f])e(l.length+q[f],o);else if("string"===typeof q[f]){j=0;for(i=l.length;j<i;j++)("_all"==q[f]||h(l[j].nTh).hasClass(q[f]))&&e(j,o)}}if(c){d=0;for(a=c.length;d<a;d++)e(d,c[d])}}function K(a,b,c,e){var d=a.aoData.length,f=h.extend(!0,{},m.models.oRow,{src:c?"dom":"data"});f._aData=b;a.aoData.push(f);for(var b=a.aoColumns,f=0,g=b.length;f<g;f++)c&&Ia(a,d,f,x(a,d,f)),b[f].sType=null;a.aiDisplayMaster.push(d);
+(c||!a.oFeatures.bDeferRender)&&Ja(a,d,c,e);return d}function ma(a,b){var c;b instanceof h||(b=h(b));return b.map(function(b,d){c=na(a,d);return K(a,c.data,d,c.cells)})}function x(a,b,c,e){var d=a.iDraw,f=a.aoColumns[c],g=a.aoData[b]._aData,j=f.sDefaultContent,c=f.fnGetData(g,e,{settings:a,row:b,col:c});if(c===k)return a.iDrawError!=d&&null===j&&(I(a,0,"Requested unknown parameter "+("function"==typeof f.mData?"{function}":"'"+f.mData+"'")+" for row "+b,4),a.iDrawError=d),j;if((c===g||null===c)&&
+null!==j)c=j;else if("function"===typeof c)return c.call(g);return null===c&&"display"==e?"":c}function Ia(a,b,c,e){a.aoColumns[c].fnSetData(a.aoData[b]._aData,e,{settings:a,row:b,col:c})}function Ka(a){return h.map(a.match(/(\\.|[^\.])+/g),function(a){return a.replace(/\\./g,".")})}function R(a){if(h.isPlainObject(a)){var b={};h.each(a,function(a,c){c&&(b[a]=R(c))});return function(a,c,f,g){var j=b[c]||b._;return j!==k?j(a,c,f,g):a}}if(null===a)return function(a){return a};if("function"===typeof a)return function(b,
+c,f,g){return a(b,c,f,g)};if("string"===typeof a&&(-1!==a.indexOf(".")||-1!==a.indexOf("[")||-1!==a.indexOf("("))){var c=function(a,b,f){var g,j;if(""!==f){j=Ka(f);for(var i=0,h=j.length;i<h;i++){f=j[i].match(ba);g=j[i].match(T);if(f){j[i]=j[i].replace(ba,"");""!==j[i]&&(a=a[j[i]]);g=[];j.splice(0,i+1);j=j.join(".");i=0;for(h=a.length;i<h;i++)g.push(c(a[i],b,j));a=f[0].substring(1,f[0].length-1);a=""===a?g:g.join(a);break}else if(g){j[i]=j[i].replace(T,"");a=a[j[i]]();continue}if(null===a||a[j[i]]===
+k)return k;a=a[j[i]]}}return a};return function(b,d){return c(b,d,a)}}return function(b){return b[a]}}function S(a){if(h.isPlainObject(a))return S(a._);if(null===a)return function(){};if("function"===typeof a)return function(b,e,d){a(b,"set",e,d)};if("string"===typeof a&&(-1!==a.indexOf(".")||-1!==a.indexOf("[")||-1!==a.indexOf("("))){var b=function(a,e,d){var d=Ka(d),f;f=d[d.length-1];for(var g,j,i=0,h=d.length-1;i<h;i++){g=d[i].match(ba);j=d[i].match(T);if(g){d[i]=d[i].replace(ba,"");a[d[i]]=[];
+f=d.slice();f.splice(0,i+1);g=f.join(".");j=0;for(h=e.length;j<h;j++)f={},b(f,e[j],g),a[d[i]].push(f);return}j&&(d[i]=d[i].replace(T,""),a=a[d[i]](e));if(null===a[d[i]]||a[d[i]]===k)a[d[i]]={};a=a[d[i]]}if(f.match(T))a[f.replace(T,"")](e);else a[f.replace(ba,"")]=e};return function(c,e){return b(c,e,a)}}return function(b,e){b[a]=e}}function La(a){return D(a.aoData,"_aData")}function oa(a){a.aoData.length=0;a.aiDisplayMaster.length=0;a.aiDisplay.length=0}function pa(a,b,c){for(var e=-1,d=0,f=a.length;d<
+f;d++)a[d]==b?e=d:a[d]>b&&a[d]--; -1!=e&&c===k&&a.splice(e,1)}function ca(a,b,c,e){var d=a.aoData[b],f,g=function(c,f){for(;c.childNodes.length;)c.removeChild(c.firstChild);c.innerHTML=x(a,b,f,"display")};if("dom"===c||(!c||"auto"===c)&&"dom"===d.src)d._aData=na(a,d,e,e===k?k:d._aData).data;else{var j=d.anCells;if(j)if(e!==k)g(j[e],e);else{c=0;for(f=j.length;c<f;c++)g(j[c],c)}}d._aSortData=null;d._aFilterData=null;g=a.aoColumns;if(e!==k)g[e].sType=null;else{c=0;for(f=g.length;c<f;c++)g[c].sType=null;
+Ma(d)}}function na(a,b,c,e){var d=[],f=b.firstChild,g,j=0,i,o=a.aoColumns,l=a._rowReadObject,e=e||l?{}:[],q=function(a,b){if("string"===typeof a){var c=a.indexOf("@");-1!==c&&(c=a.substring(c+1),S(a)(e,b.getAttribute(c)))}},a=function(a){if(c===k||c===j)g=o[j],i=h.trim(a.innerHTML),g&&g._bAttrSrc?(S(g.mData._)(e,i),q(g.mData.sort,a),q(g.mData.type,a),q(g.mData.filter,a)):l?(g._setter||(g._setter=S(g.mData)),g._setter(e,i)):e[j]=i;j++};if(f)for(;f;){b=f.nodeName.toUpperCase();if("TD"==b||"TH"==b)a(f),
+d.push(f);f=f.nextSibling}else{d=b.anCells;f=0;for(b=d.length;f<b;f++)a(d[f])}return{data:e,cells:d}}function Ja(a,b,c,e){var d=a.aoData[b],f=d._aData,g=[],j,i,h,l,q;if(null===d.nTr){j=c||Q.createElement("tr");d.nTr=j;d.anCells=g;j._DT_RowIndex=b;Ma(d);l=0;for(q=a.aoColumns.length;l<q;l++){h=a.aoColumns[l];i=c?e[l]:Q.createElement(h.sCellType);g.push(i);if(!c||h.mRender||h.mData!==l)i.innerHTML=x(a,b,l,"display");h.sClass&&(i.className+=" "+h.sClass);h.bVisible&&!c?j.appendChild(i):!h.bVisible&&c&&
+i.parentNode.removeChild(i);h.fnCreatedCell&&h.fnCreatedCell.call(a.oInstance,i,x(a,b,l),f,b,l)}w(a,"aoRowCreatedCallback",null,[j,f,b])}d.nTr.setAttribute("role","row")}function Ma(a){var b=a.nTr,c=a._aData;if(b){c.DT_RowId&&(b.id=c.DT_RowId);if(c.DT_RowClass){var e=c.DT_RowClass.split(" ");a.__rowc=a.__rowc?Na(a.__rowc.concat(e)):e;h(b).removeClass(a.__rowc.join(" ")).addClass(c.DT_RowClass)}c.DT_RowAttr&&h(b).attr(c.DT_RowAttr);c.DT_RowData&&h(b).data(c.DT_RowData)}}function jb(a){var b,c,e,d,
+f,g=a.nTHead,j=a.nTFoot,i=0===h("th, td",g).length,o=a.oClasses,l=a.aoColumns;i&&(d=h("<tr/>").appendTo(g));b=0;for(c=l.length;b<c;b++)f=l[b],e=h(f.nTh).addClass(f.sClass),i&&e.appendTo(d),a.oFeatures.bSort&&(e.addClass(f.sSortingClass),!1!==f.bSortable&&(e.attr("tabindex",a.iTabIndex).attr("aria-controls",a.sTableId),Oa(a,f.nTh,b))),f.sTitle!=e.html()&&e.html(f.sTitle),Pa(a,"header")(a,e,f,o);i&&da(a.aoHeader,g);h(g).find(">tr").attr("role","row");h(g).find(">tr>th, >tr>td").addClass(o.sHeaderTH);
+h(j).find(">tr>th, >tr>td").addClass(o.sFooterTH);if(null!==j){a=a.aoFooter[0];b=0;for(c=a.length;b<c;b++)f=l[b],f.nTf=a[b].cell,f.sClass&&h(f.nTf).addClass(f.sClass)}}function ea(a,b,c){var e,d,f,g=[],j=[],i=a.aoColumns.length,o;if(b){c===k&&(c=!1);e=0;for(d=b.length;e<d;e++){g[e]=b[e].slice();g[e].nTr=b[e].nTr;for(f=i-1;0<=f;f--)!a.aoColumns[f].bVisible&&!c&&g[e].splice(f,1);j.push([])}e=0;for(d=g.length;e<d;e++){if(a=g[e].nTr)for(;f=a.firstChild;)a.removeChild(f);f=0;for(b=g[e].length;f<b;f++)if(o=
+i=1,j[e][f]===k){a.appendChild(g[e][f].cell);for(j[e][f]=1;g[e+i]!==k&&g[e][f].cell==g[e+i][f].cell;)j[e+i][f]=1,i++;for(;g[e][f+o]!==k&&g[e][f].cell==g[e][f+o].cell;){for(c=0;c<i;c++)j[e+c][f+o]=1;o++}h(g[e][f].cell).attr("rowspan",i).attr("colspan",o)}}}}function M(a){var b=w(a,"aoPreDrawCallback","preDraw",[a]);if(-1!==h.inArray(!1,b))C(a,!1);else{var b=[],c=0,e=a.asStripeClasses,d=e.length,f=a.oLanguage,g=a.iInitDisplayStart,j="ssp"==B(a),i=a.aiDisplay;a.bDrawing=!0;g!==k&&-1!==g&&(a._iDisplayStart=
+j?g:g>=a.fnRecordsDisplay()?0:g,a.iInitDisplayStart=-1);var g=a._iDisplayStart,o=a.fnDisplayEnd();if(a.bDeferLoading)a.bDeferLoading=!1,a.iDraw++,C(a,!1);else if(j){if(!a.bDestroying&&!kb(a))return}else a.iDraw++;if(0!==i.length){f=j?a.aoData.length:o;for(j=j?0:g;j<f;j++){var l=i[j],q=a.aoData[l];null===q.nTr&&Ja(a,l);l=q.nTr;if(0!==d){var n=e[c%d];q._sRowStripe!=n&&(h(l).removeClass(q._sRowStripe).addClass(n),q._sRowStripe=n)}w(a,"aoRowCallback",null,[l,q._aData,c,j]);b.push(l);c++}}else c=f.sZeroRecords,
+1==a.iDraw&&"ajax"==B(a)?c=f.sLoadingRecords:f.sEmptyTable&&0===a.fnRecordsTotal()&&(c=f.sEmptyTable),b[0]=h("<tr/>",{"class":d?e[0]:""}).append(h("<td />",{valign:"top",colSpan:aa(a),"class":a.oClasses.sRowEmpty}).html(c))[0];w(a,"aoHeaderCallback","header",[h(a.nTHead).children("tr")[0],La(a),g,o,i]);w(a,"aoFooterCallback","footer",[h(a.nTFoot).children("tr")[0],La(a),g,o,i]);e=h(a.nTBody);e.children().detach();e.append(h(b));w(a,"aoDrawCallback","draw",[a]);a.bSorted=!1;a.bFiltered=!1;a.bDrawing=
+!1}}function N(a,b){var c=a.oFeatures,e=c.bFilter;c.bSort&&lb(a);e?fa(a,a.oPreviousSearch):a.aiDisplay=a.aiDisplayMaster.slice();!0!==b&&(a._iDisplayStart=0);a._drawHold=b;M(a);a._drawHold=!1}function mb(a){var b=a.oClasses,c=h(a.nTable),c=h("<div/>").insertBefore(c),e=a.oFeatures,d=h("<div/>",{id:a.sTableId+"_wrapper","class":b.sWrapper+(a.nTFoot?"":" "+b.sNoFooter)});a.nHolding=c[0];a.nTableWrapper=d[0];a.nTableReinsertBefore=a.nTable.nextSibling;for(var f=a.sDom.split(""),g,j,i,o,l,q,n=0;n<f.length;n++){g=
+null;j=f[n];if("<"==j){i=h("<div/>")[0];o=f[n+1];if("'"==o||'"'==o){l="";for(q=2;f[n+q]!=o;)l+=f[n+q],q++;"H"==l?l=b.sJUIHeader:"F"==l&&(l=b.sJUIFooter);-1!=l.indexOf(".")?(o=l.split("."),i.id=o[0].substr(1,o[0].length-1),i.className=o[1]):"#"==l.charAt(0)?i.id=l.substr(1,l.length-1):i.className=l;n+=q}d.append(i);d=h(i)}else if(">"==j)d=d.parent();else if("l"==j&&e.bPaginate&&e.bLengthChange)g=nb(a);else if("f"==j&&e.bFilter)g=ob(a);else if("r"==j&&e.bProcessing)g=pb(a);else if("t"==j)g=qb(a);else if("i"==
+j&&e.bInfo)g=rb(a);else if("p"==j&&e.bPaginate)g=sb(a);else if(0!==m.ext.feature.length){i=m.ext.feature;q=0;for(o=i.length;q<o;q++)if(j==i[q].cFeature){g=i[q].fnInit(a);break}}g&&(i=a.aanFeatures,i[j]||(i[j]=[]),i[j].push(g),d.append(g))}c.replaceWith(d)}function da(a,b){var c=h(b).children("tr"),e,d,f,g,j,i,o,l,q,n;a.splice(0,a.length);f=0;for(i=c.length;f<i;f++)a.push([]);f=0;for(i=c.length;f<i;f++){e=c[f];for(d=e.firstChild;d;){if("TD"==d.nodeName.toUpperCase()||"TH"==d.nodeName.toUpperCase()){l=
+1*d.getAttribute("colspan");q=1*d.getAttribute("rowspan");l=!l||0===l||1===l?1:l;q=!q||0===q||1===q?1:q;g=0;for(j=a[f];j[g];)g++;o=g;n=1===l?!0:!1;for(j=0;j<l;j++)for(g=0;g<q;g++)a[f+g][o+j]={cell:d,unique:n},a[f+g].nTr=e}d=d.nextSibling}}}function qa(a,b,c){var e=[];c||(c=a.aoHeader,b&&(c=[],da(c,b)));for(var b=0,d=c.length;b<d;b++)for(var f=0,g=c[b].length;f<g;f++)if(c[b][f].unique&&(!e[f]||!a.bSortCellsTop))e[f]=c[b][f].cell;return e}function ra(a,b,c){w(a,"aoServerParams","serverParams",[b]);
+if(b&&h.isArray(b)){var e={},d=/(.*?)\[\]$/;h.each(b,function(a,b){var c=b.name.match(d);c?(c=c[0],e[c]||(e[c]=[]),e[c].push(b.value)):e[b.name]=b.value});b=e}var f,g=a.ajax,j=a.oInstance,i=function(b){w(a,null,"xhr",[a,b,a.jqXHR]);c(b)};if(h.isPlainObject(g)&&g.data){f=g.data;var o=h.isFunction(f)?f(b,a):f,b=h.isFunction(f)&&o?o:h.extend(!0,b,o);delete g.data}o={data:b,success:function(b){var c=b.error||b.sError;c&&I(a,0,c);a.json=b;i(b)},dataType:"json",cache:!1,type:a.sServerMethod,error:function(b,
+c){var f=w(a,null,"xhr",[a,null,a.jqXHR]);-1===h.inArray(!0,f)&&("parsererror"==c?I(a,0,"Invalid JSON response",1):4===b.readyState&&I(a,0,"Ajax error",7));C(a,!1)}};a.oAjaxData=b;w(a,null,"preXhr",[a,b]);a.fnServerData?a.fnServerData.call(j,a.sAjaxSource,h.map(b,function(a,b){return{name:b,value:a}}),i,a):a.sAjaxSource||"string"===typeof g?a.jqXHR=h.ajax(h.extend(o,{url:g||a.sAjaxSource})):h.isFunction(g)?a.jqXHR=g.call(j,b,i,a):(a.jqXHR=h.ajax(h.extend(o,g)),g.data=f)}function kb(a){return a.bAjaxDataGet?
+(a.iDraw++,C(a,!0),ra(a,tb(a),function(b){ub(a,b)}),!1):!0}function tb(a){var b=a.aoColumns,c=b.length,e=a.oFeatures,d=a.oPreviousSearch,f=a.aoPreSearchCols,g,j=[],i,o,l,q=U(a);g=a._iDisplayStart;i=!1!==e.bPaginate?a._iDisplayLength:-1;var n=function(a,b){j.push({name:a,value:b})};n("sEcho",a.iDraw);n("iColumns",c);n("sColumns",D(b,"sName").join(","));n("iDisplayStart",g);n("iDisplayLength",i);var k={draw:a.iDraw,columns:[],order:[],start:g,length:i,search:{value:d.sSearch,regex:d.bRegex}};for(g=
+0;g<c;g++)o=b[g],l=f[g],i="function"==typeof o.mData?"function":o.mData,k.columns.push({data:i,name:o.sName,searchable:o.bSearchable,orderable:o.bSortable,search:{value:l.sSearch,regex:l.bRegex}}),n("mDataProp_"+g,i),e.bFilter&&(n("sSearch_"+g,l.sSearch),n("bRegex_"+g,l.bRegex),n("bSearchable_"+g,o.bSearchable)),e.bSort&&n("bSortable_"+g,o.bSortable);e.bFilter&&(n("sSearch",d.sSearch),n("bRegex",d.bRegex));e.bSort&&(h.each(q,function(a,b){k.order.push({column:b.col,dir:b.dir});n("iSortCol_"+a,b.col);
+n("sSortDir_"+a,b.dir)}),n("iSortingCols",q.length));b=m.ext.legacy.ajax;return null===b?a.sAjaxSource?j:k:b?j:k}function ub(a,b){var c=sa(a,b),e=b.sEcho!==k?b.sEcho:b.draw,d=b.iTotalRecords!==k?b.iTotalRecords:b.recordsTotal,f=b.iTotalDisplayRecords!==k?b.iTotalDisplayRecords:b.recordsFiltered;if(e){if(1*e<a.iDraw)return;a.iDraw=1*e}oa(a);a._iRecordsTotal=parseInt(d,10);a._iRecordsDisplay=parseInt(f,10);e=0;for(d=c.length;e<d;e++)K(a,c[e]);a.aiDisplay=a.aiDisplayMaster.slice();a.bAjaxDataGet=!1;
+M(a);a._bInitComplete||ta(a,b);a.bAjaxDataGet=!0;C(a,!1)}function sa(a,b){var c=h.isPlainObject(a.ajax)&&a.ajax.dataSrc!==k?a.ajax.dataSrc:a.sAjaxDataProp;return"data"===c?b.aaData||b[c]:""!==c?R(c)(b):b}function ob(a){var b=a.oClasses,c=a.sTableId,e=a.oLanguage,d=a.oPreviousSearch,f=a.aanFeatures,g='<input type="search" class="'+b.sFilterInput+'"/>',j=e.sSearch,j=j.match(/_INPUT_/)?j.replace("_INPUT_",g):j+g,b=h("<div/>",{id:!f.f?c+"_filter":null,"class":b.sFilter}).append(h("<label/>").append(j)),
+f=function(){var b=!this.value?"":this.value;b!=d.sSearch&&(fa(a,{sSearch:b,bRegex:d.bRegex,bSmart:d.bSmart,bCaseInsensitive:d.bCaseInsensitive}),a._iDisplayStart=0,M(a))},g=null!==a.searchDelay?a.searchDelay:"ssp"===B(a)?400:0,i=h("input",b).val(d.sSearch).attr("placeholder",e.sSearchPlaceholder).bind("keyup.DT search.DT input.DT paste.DT cut.DT",g?ua(f,g):f).bind("keypress.DT",function(a){if(13==a.keyCode)return!1}).attr("aria-controls",c);h(a.nTable).on("search.dt.DT",function(b,c){if(a===c)try{i[0]!==
+Q.activeElement&&i.val(d.sSearch)}catch(f){}});return b[0]}function fa(a,b,c){var e=a.oPreviousSearch,d=a.aoPreSearchCols,f=function(a){e.sSearch=a.sSearch;e.bRegex=a.bRegex;e.bSmart=a.bSmart;e.bCaseInsensitive=a.bCaseInsensitive};Ha(a);if("ssp"!=B(a)){vb(a,b.sSearch,c,b.bEscapeRegex!==k?!b.bEscapeRegex:b.bRegex,b.bSmart,b.bCaseInsensitive);f(b);for(b=0;b<d.length;b++)wb(a,d[b].sSearch,b,d[b].bEscapeRegex!==k?!d[b].bEscapeRegex:d[b].bRegex,d[b].bSmart,d[b].bCaseInsensitive);xb(a)}else f(b);a.bFiltered=
+!0;w(a,null,"search",[a])}function xb(a){for(var b=m.ext.search,c=a.aiDisplay,e,d,f=0,g=b.length;f<g;f++){for(var j=[],i=0,h=c.length;i<h;i++)d=c[i],e=a.aoData[d],b[f](a,e._aFilterData,d,e._aData,i)&&j.push(d);c.length=0;c.push.apply(c,j)}}function wb(a,b,c,e,d,f){if(""!==b)for(var g=a.aiDisplay,e=Qa(b,e,d,f),d=g.length-1;0<=d;d--)b=a.aoData[g[d]]._aFilterData[c],e.test(b)||g.splice(d,1)}function vb(a,b,c,e,d,f){var e=Qa(b,e,d,f),d=a.oPreviousSearch.sSearch,f=a.aiDisplayMaster,g;0!==m.ext.search.length&&
+(c=!0);g=yb(a);if(0>=b.length)a.aiDisplay=f.slice();else{if(g||c||d.length>b.length||0!==b.indexOf(d)||a.bSorted)a.aiDisplay=f.slice();b=a.aiDisplay;for(c=b.length-1;0<=c;c--)e.test(a.aoData[b[c]]._sFilterRow)||b.splice(c,1)}}function Qa(a,b,c,e){a=b?a:va(a);c&&(a="^(?=.*?"+h.map(a.match(/"[^"]+"|[^ ]+/g)||[""],function(a){if('"'===a.charAt(0))var b=a.match(/^"(.*)"$/),a=b?b[1]:a;return a.replace('"',"")}).join(")(?=.*?")+").*$");return RegExp(a,e?"i":"")}function va(a){return a.replace(Yb,"\\$1")}
+function yb(a){var b=a.aoColumns,c,e,d,f,g,j,i,h,l=m.ext.type.search;c=!1;e=0;for(f=a.aoData.length;e<f;e++)if(h=a.aoData[e],!h._aFilterData){j=[];d=0;for(g=b.length;d<g;d++)c=b[d],c.bSearchable?(i=x(a,e,d,"filter"),l[c.sType]&&(i=l[c.sType](i)),null===i&&(i=""),"string"!==typeof i&&i.toString&&(i=i.toString())):i="",i.indexOf&&-1!==i.indexOf("&")&&(wa.innerHTML=i,i=Zb?wa.textContent:wa.innerText),i.replace&&(i=i.replace(/[\r\n]/g,"")),j.push(i);h._aFilterData=j;h._sFilterRow=j.join("  ");c=!0}return c}
+function zb(a){return{search:a.sSearch,smart:a.bSmart,regex:a.bRegex,caseInsensitive:a.bCaseInsensitive}}function Ab(a){return{sSearch:a.search,bSmart:a.smart,bRegex:a.regex,bCaseInsensitive:a.caseInsensitive}}function rb(a){var b=a.sTableId,c=a.aanFeatures.i,e=h("<div/>",{"class":a.oClasses.sInfo,id:!c?b+"_info":null});c||(a.aoDrawCallback.push({fn:Bb,sName:"information"}),e.attr("role","status").attr("aria-live","polite"),h(a.nTable).attr("aria-describedby",b+"_info"));return e[0]}function Bb(a){var b=
+a.aanFeatures.i;if(0!==b.length){var c=a.oLanguage,e=a._iDisplayStart+1,d=a.fnDisplayEnd(),f=a.fnRecordsTotal(),g=a.fnRecordsDisplay(),j=g?c.sInfo:c.sInfoEmpty;g!==f&&(j+=" "+c.sInfoFiltered);j+=c.sInfoPostFix;j=Cb(a,j);c=c.fnInfoCallback;null!==c&&(j=c.call(a.oInstance,a,e,d,f,g,j));h(b).html(j)}}function Cb(a,b){var c=a.fnFormatNumber,e=a._iDisplayStart+1,d=a._iDisplayLength,f=a.fnRecordsDisplay(),g=-1===d;return b.replace(/_START_/g,c.call(a,e)).replace(/_END_/g,c.call(a,a.fnDisplayEnd())).replace(/_MAX_/g,
+c.call(a,a.fnRecordsTotal())).replace(/_TOTAL_/g,c.call(a,f)).replace(/_PAGE_/g,c.call(a,g?1:Math.ceil(e/d))).replace(/_PAGES_/g,c.call(a,g?1:Math.ceil(f/d)))}function ga(a){var b,c,e=a.iInitDisplayStart,d=a.aoColumns,f;c=a.oFeatures;if(a.bInitialised){mb(a);jb(a);ea(a,a.aoHeader);ea(a,a.aoFooter);C(a,!0);c.bAutoWidth&&Ga(a);b=0;for(c=d.length;b<c;b++)f=d[b],f.sWidth&&(f.nTh.style.width=s(f.sWidth));N(a);d=B(a);"ssp"!=d&&("ajax"==d?ra(a,[],function(c){var f=sa(a,c);for(b=0;b<f.length;b++)K(a,f[b]);
+a.iInitDisplayStart=e;N(a);C(a,!1);ta(a,c)},a):(C(a,!1),ta(a)))}else setTimeout(function(){ga(a)},200)}function ta(a,b){a._bInitComplete=!0;b&&X(a);w(a,"aoInitComplete","init",[a,b])}function Ra(a,b){var c=parseInt(b,10);a._iDisplayLength=c;Sa(a);w(a,null,"length",[a,c])}function nb(a){for(var b=a.oClasses,c=a.sTableId,e=a.aLengthMenu,d=h.isArray(e[0]),f=d?e[0]:e,e=d?e[1]:e,d=h("<select/>",{name:c+"_length","aria-controls":c,"class":b.sLengthSelect}),g=0,j=f.length;g<j;g++)d[0][g]=new Option(e[g],
+f[g]);var i=h("<div><label/></div>").addClass(b.sLength);a.aanFeatures.l||(i[0].id=c+"_length");i.children().append(a.oLanguage.sLengthMenu.replace("_MENU_",d[0].outerHTML));h("select",i).val(a._iDisplayLength).bind("change.DT",function(){Ra(a,h(this).val());M(a)});h(a.nTable).bind("length.dt.DT",function(b,c,f){a===c&&h("select",i).val(f)});return i[0]}function sb(a){var b=a.sPaginationType,c=m.ext.pager[b],e="function"===typeof c,d=function(a){M(a)},b=h("<div/>").addClass(a.oClasses.sPaging+b)[0],
+f=a.aanFeatures;e||c.fnInit(a,b,d);f.p||(b.id=a.sTableId+"_paginate",a.aoDrawCallback.push({fn:function(a){if(e){var b=a._iDisplayStart,i=a._iDisplayLength,h=a.fnRecordsDisplay(),l=-1===i,b=l?0:Math.ceil(b/i),i=l?1:Math.ceil(h/i),h=c(b,i),q,l=0;for(q=f.p.length;l<q;l++)Pa(a,"pageButton")(a,f.p[l],l,h,b,i)}else c.fnUpdate(a,d)},sName:"pagination"}));return b}function Ta(a,b,c){var e=a._iDisplayStart,d=a._iDisplayLength,f=a.fnRecordsDisplay();0===f||-1===d?e=0:"number"===typeof b?(e=b*d,e>f&&(e=0)):
+"first"==b?e=0:"previous"==b?(e=0<=d?e-d:0,0>e&&(e=0)):"next"==b?e+d<f&&(e+=d):"last"==b?e=Math.floor((f-1)/d)*d:I(a,0,"Unknown paging action: "+b,5);b=a._iDisplayStart!==e;a._iDisplayStart=e;b&&(w(a,null,"page",[a]),c&&M(a));return b}function pb(a){return h("<div/>",{id:!a.aanFeatures.r?a.sTableId+"_processing":null,"class":a.oClasses.sProcessing}).html(a.oLanguage.sProcessing).insertBefore(a.nTable)[0]}function C(a,b){a.oFeatures.bProcessing&&h(a.aanFeatures.r).css("display",b?"block":"none");w(a,
+null,"processing",[a,b])}function qb(a){var b=h(a.nTable);b.attr("role","grid");var c=a.oScroll;if(""===c.sX&&""===c.sY)return a.nTable;var e=c.sX,d=c.sY,f=a.oClasses,g=b.children("caption"),j=g.length?g[0]._captionSide:null,i=h(b[0].cloneNode(!1)),o=h(b[0].cloneNode(!1)),l=b.children("tfoot");c.sX&&"100%"===b.attr("width")&&b.removeAttr("width");l.length||(l=null);c=h("<div/>",{"class":f.sScrollWrapper}).append(h("<div/>",{"class":f.sScrollHead}).css({overflow:"hidden",position:"relative",border:0,
+width:e?!e?null:s(e):"100%"}).append(h("<div/>",{"class":f.sScrollHeadInner}).css({"box-sizing":"content-box",width:c.sXInner||"100%"}).append(i.removeAttr("id").css("margin-left",0).append("top"===j?g:null).append(b.children("thead"))))).append(h("<div/>",{"class":f.sScrollBody}).css({overflow:"auto",height:!d?null:s(d),width:!e?null:s(e)}).append(b));l&&c.append(h("<div/>",{"class":f.sScrollFoot}).css({overflow:"hidden",border:0,width:e?!e?null:s(e):"100%"}).append(h("<div/>",{"class":f.sScrollFootInner}).append(o.removeAttr("id").css("margin-left",
+0).append("bottom"===j?g:null).append(b.children("tfoot")))));var b=c.children(),q=b[0],f=b[1],n=l?b[2]:null;if(e)h(f).on("scroll.DT",function(){var a=this.scrollLeft;q.scrollLeft=a;l&&(n.scrollLeft=a)});a.nScrollHead=q;a.nScrollBody=f;a.nScrollFoot=n;a.aoDrawCallback.push({fn:Y,sName:"scrolling"});return c[0]}function Y(a){var b=a.oScroll,c=b.sX,e=b.sXInner,d=b.sY,f=b.iBarWidth,g=h(a.nScrollHead),j=g[0].style,i=g.children("div"),o=i[0].style,l=i.children("table"),i=a.nScrollBody,q=h(i),n=i.style,
+k=h(a.nScrollFoot).children("div"),p=k.children("table"),m=h(a.nTHead),r=h(a.nTable),t=r[0],O=t.style,L=a.nTFoot?h(a.nTFoot):null,ha=a.oBrowser,w=ha.bScrollOversize,v,u,y,x,z,A=[],B=[],C=[],D,E=function(a){a=a.style;a.paddingTop="0";a.paddingBottom="0";a.borderTopWidth="0";a.borderBottomWidth="0";a.height=0};r.children("thead, tfoot").remove();z=m.clone().prependTo(r);v=m.find("tr");y=z.find("tr");z.find("th, td").removeAttr("tabindex");L&&(x=L.clone().prependTo(r),u=L.find("tr"),x=x.find("tr"));
+c||(n.width="100%",g[0].style.width="100%");h.each(qa(a,z),function(b,c){D=la(a,b);c.style.width=a.aoColumns[D].sWidth});L&&G(function(a){a.style.width=""},x);b.bCollapse&&""!==d&&(n.height=q[0].offsetHeight+m[0].offsetHeight+"px");g=r.outerWidth();if(""===c){if(O.width="100%",w&&(r.find("tbody").height()>i.offsetHeight||"scroll"==q.css("overflow-y")))O.width=s(r.outerWidth()-f)}else""!==e?O.width=s(e):g==q.width()&&q.height()<r.height()?(O.width=s(g-f),r.outerWidth()>g-f&&(O.width=s(g))):O.width=
+s(g);g=r.outerWidth();G(E,y);G(function(a){C.push(a.innerHTML);A.push(s(h(a).css("width")))},y);G(function(a,b){a.style.width=A[b]},v);h(y).height(0);L&&(G(E,x),G(function(a){B.push(s(h(a).css("width")))},x),G(function(a,b){a.style.width=B[b]},u),h(x).height(0));G(function(a,b){a.innerHTML='<div class="dataTables_sizing" style="height:0;overflow:hidden;">'+C[b]+"</div>";a.style.width=A[b]},y);L&&G(function(a,b){a.innerHTML="";a.style.width=B[b]},x);if(r.outerWidth()<g){u=i.scrollHeight>i.offsetHeight||
+"scroll"==q.css("overflow-y")?g+f:g;if(w&&(i.scrollHeight>i.offsetHeight||"scroll"==q.css("overflow-y")))O.width=s(u-f);(""===c||""!==e)&&I(a,1,"Possible column misalignment",6)}else u="100%";n.width=s(u);j.width=s(u);L&&(a.nScrollFoot.style.width=s(u));!d&&w&&(n.height=s(t.offsetHeight+f));d&&b.bCollapse&&(n.height=s(d),b=c&&t.offsetWidth>i.offsetWidth?f:0,t.offsetHeight<i.offsetHeight&&(n.height=s(t.offsetHeight+b)));b=r.outerWidth();l[0].style.width=s(b);o.width=s(b);l=r.height()>i.clientHeight||
+"scroll"==q.css("overflow-y");ha="padding"+(ha.bScrollbarLeft?"Left":"Right");o[ha]=l?f+"px":"0px";L&&(p[0].style.width=s(b),k[0].style.width=s(b),k[0].style[ha]=l?f+"px":"0px");q.scroll();if((a.bSorted||a.bFiltered)&&!a._drawHold)i.scrollTop=0}function G(a,b,c){for(var e=0,d=0,f=b.length,g,j;d<f;){g=b[d].firstChild;for(j=c?c[d].firstChild:null;g;)1===g.nodeType&&(c?a(g,j,e):a(g,e),e++),g=g.nextSibling,j=c?j.nextSibling:null;d++}}function Ga(a){var b=a.nTable,c=a.aoColumns,e=a.oScroll,d=e.sY,f=e.sX,
+g=e.sXInner,j=c.length,e=Z(a,"bVisible"),i=h("th",a.nTHead),o=b.getAttribute("width"),l=b.parentNode,k=!1,n,m;(n=b.style.width)&&-1!==n.indexOf("%")&&(o=n);for(n=0;n<e.length;n++)m=c[e[n]],null!==m.sWidth&&(m.sWidth=Db(m.sWidthOrig,l),k=!0);if(!k&&!f&&!d&&j==aa(a)&&j==i.length)for(n=0;n<j;n++)c[n].sWidth=s(i.eq(n).width());else{j=h(b).clone().css("visibility","hidden").removeAttr("id");j.find("tbody tr").remove();var p=h("<tr/>").appendTo(j.find("tbody"));j.find("tfoot th, tfoot td").css("width",
+"");i=qa(a,j.find("thead")[0]);for(n=0;n<e.length;n++)m=c[e[n]],i[n].style.width=null!==m.sWidthOrig&&""!==m.sWidthOrig?s(m.sWidthOrig):"";if(a.aoData.length)for(n=0;n<e.length;n++)k=e[n],m=c[k],h(Eb(a,k)).clone(!1).append(m.sContentPadding).appendTo(p);j.appendTo(l);f&&g?j.width(g):f?(j.css("width","auto"),j.width()<l.offsetWidth&&j.width(l.offsetWidth)):d?j.width(l.offsetWidth):o&&j.width(o);Fb(a,j[0]);if(f){for(n=g=0;n<e.length;n++)m=c[e[n]],d=h(i[n]).outerWidth(),g+=null===m.sWidthOrig?d:parseInt(m.sWidth,
+10)+d-h(i[n]).width();j.width(s(g));b.style.width=s(g)}for(n=0;n<e.length;n++)if(m=c[e[n]],d=h(i[n]).width())m.sWidth=s(d);b.style.width=s(j.css("width"));j.remove()}o&&(b.style.width=s(o));if((o||f)&&!a._reszEvt)b=function(){h(Ea).bind("resize.DT-"+a.sInstance,ua(function(){X(a)}))},a.oBrowser.bScrollOversize?setTimeout(b,1E3):b(),a._reszEvt=!0}function ua(a,b){var c=b!==k?b:200,e,d;return function(){var b=this,g=+new Date,j=arguments;e&&g<e+c?(clearTimeout(d),d=setTimeout(function(){e=k;a.apply(b,
+j)},c)):(e=g,a.apply(b,j))}}function Db(a,b){if(!a)return 0;var c=h("<div/>").css("width",s(a)).appendTo(b||Q.body),e=c[0].offsetWidth;c.remove();return e}function Fb(a,b){var c=a.oScroll;if(c.sX||c.sY)c=!c.sX?c.iBarWidth:0,b.style.width=s(h(b).outerWidth()-c)}function Eb(a,b){var c=Gb(a,b);if(0>c)return null;var e=a.aoData[c];return!e.nTr?h("<td/>").html(x(a,c,b,"display"))[0]:e.anCells[b]}function Gb(a,b){for(var c,e=-1,d=-1,f=0,g=a.aoData.length;f<g;f++)c=x(a,f,b,"display")+"",c=c.replace($b,""),
+c.length>e&&(e=c.length,d=f);return d}function s(a){return null===a?"0px":"number"==typeof a?0>a?"0px":a+"px":a.match(/\d$/)?a+"px":a}function Hb(){var a=m.__scrollbarWidth;if(a===k){var b=h("<p/>").css({position:"absolute",top:0,left:0,width:"100%",height:150,padding:0,overflow:"scroll",visibility:"hidden"}).appendTo("body"),a=b[0].offsetWidth-b[0].clientWidth;m.__scrollbarWidth=a;b.remove()}return a}function U(a){var b,c,e=[],d=a.aoColumns,f,g,j,i;b=a.aaSortingFixed;c=h.isPlainObject(b);var o=[];
+f=function(a){a.length&&!h.isArray(a[0])?o.push(a):o.push.apply(o,a)};h.isArray(b)&&f(b);c&&b.pre&&f(b.pre);f(a.aaSorting);c&&b.post&&f(b.post);for(a=0;a<o.length;a++){i=o[a][0];f=d[i].aDataSort;b=0;for(c=f.length;b<c;b++)g=f[b],j=d[g].sType||"string",o[a]._idx===k&&(o[a]._idx=h.inArray(o[a][1],d[g].asSorting)),e.push({src:i,col:g,dir:o[a][1],index:o[a]._idx,type:j,formatter:m.ext.type.order[j+"-pre"]})}return e}function lb(a){var b,c,e=[],d=m.ext.type.order,f=a.aoData,g=0,j,i=a.aiDisplayMaster,h;
+Ha(a);h=U(a);b=0;for(c=h.length;b<c;b++)j=h[b],j.formatter&&g++,Ib(a,j.col);if("ssp"!=B(a)&&0!==h.length){b=0;for(c=i.length;b<c;b++)e[i[b]]=b;g===h.length?i.sort(function(a,b){var c,d,g,j,i=h.length,k=f[a]._aSortData,m=f[b]._aSortData;for(g=0;g<i;g++)if(j=h[g],c=k[j.col],d=m[j.col],c=c<d?-1:c>d?1:0,0!==c)return"asc"===j.dir?c:-c;c=e[a];d=e[b];return c<d?-1:c>d?1:0}):i.sort(function(a,b){var c,g,j,i,k=h.length,m=f[a]._aSortData,r=f[b]._aSortData;for(j=0;j<k;j++)if(i=h[j],c=m[i.col],g=r[i.col],i=d[i.type+
+"-"+i.dir]||d["string-"+i.dir],c=i(c,g),0!==c)return c;c=e[a];g=e[b];return c<g?-1:c>g?1:0})}a.bSorted=!0}function Jb(a){for(var b,c,e=a.aoColumns,d=U(a),a=a.oLanguage.oAria,f=0,g=e.length;f<g;f++){c=e[f];var j=c.asSorting;b=c.sTitle.replace(/<.*?>/g,"");var i=c.nTh;i.removeAttribute("aria-sort");c.bSortable&&(0<d.length&&d[0].col==f?(i.setAttribute("aria-sort","asc"==d[0].dir?"ascending":"descending"),c=j[d[0].index+1]||j[0]):c=j[0],b+="asc"===c?a.sSortAscending:a.sSortDescending);i.setAttribute("aria-label",
+b)}}function Ua(a,b,c,e){var d=a.aaSorting,f=a.aoColumns[b].asSorting,g=function(a,b){var c=a._idx;c===k&&(c=h.inArray(a[1],f));return c+1<f.length?c+1:b?null:0};"number"===typeof d[0]&&(d=a.aaSorting=[d]);c&&a.oFeatures.bSortMulti?(c=h.inArray(b,D(d,"0")),-1!==c?(b=g(d[c],!0),null===b&&1===d.length&&(b=0),null===b?d.splice(c,1):(d[c][1]=f[b],d[c]._idx=b)):(d.push([b,f[0],0]),d[d.length-1]._idx=0)):d.length&&d[0][0]==b?(b=g(d[0]),d.length=1,d[0][1]=f[b],d[0]._idx=b):(d.length=0,d.push([b,f[0]]),d[0]._idx=
+0);N(a);"function"==typeof e&&e(a)}function Oa(a,b,c,e){var d=a.aoColumns[c];Va(b,{},function(b){!1!==d.bSortable&&(a.oFeatures.bProcessing?(C(a,!0),setTimeout(function(){Ua(a,c,b.shiftKey,e);"ssp"!==B(a)&&C(a,!1)},0)):Ua(a,c,b.shiftKey,e))})}function xa(a){var b=a.aLastSort,c=a.oClasses.sSortColumn,e=U(a),d=a.oFeatures,f,g;if(d.bSort&&d.bSortClasses){d=0;for(f=b.length;d<f;d++)g=b[d].src,h(D(a.aoData,"anCells",g)).removeClass(c+(2>d?d+1:3));d=0;for(f=e.length;d<f;d++)g=e[d].src,h(D(a.aoData,"anCells",
+g)).addClass(c+(2>d?d+1:3))}a.aLastSort=e}function Ib(a,b){var c=a.aoColumns[b],e=m.ext.order[c.sSortDataType],d;e&&(d=e.call(a.oInstance,a,b,$(a,b)));for(var f,g=m.ext.type.order[c.sType+"-pre"],j=0,i=a.aoData.length;j<i;j++)if(c=a.aoData[j],c._aSortData||(c._aSortData=[]),!c._aSortData[b]||e)f=e?d[j]:x(a,j,b,"sort"),c._aSortData[b]=g?g(f):f}function ya(a){if(a.oFeatures.bStateSave&&!a.bDestroying){var b={time:+new Date,start:a._iDisplayStart,length:a._iDisplayLength,order:h.extend(!0,[],a.aaSorting),
+search:zb(a.oPreviousSearch),columns:h.map(a.aoColumns,function(b,e){return{visible:b.bVisible,search:zb(a.aoPreSearchCols[e])}})};w(a,"aoStateSaveParams","stateSaveParams",[a,b]);a.oSavedState=b;a.fnStateSaveCallback.call(a.oInstance,a,b)}}function Kb(a){var b,c,e=a.aoColumns;if(a.oFeatures.bStateSave){var d=a.fnStateLoadCallback.call(a.oInstance,a);if(d&&d.time&&(b=w(a,"aoStateLoadParams","stateLoadParams",[a,d]),-1===h.inArray(!1,b)&&(b=a.iStateDuration,!(0<b&&d.time<+new Date-1E3*b)&&e.length===
+d.columns.length))){a.oLoadedState=h.extend(!0,{},d);d.start!==k&&(a._iDisplayStart=d.start,a.iInitDisplayStart=d.start);d.length!==k&&(a._iDisplayLength=d.length);d.order!==k&&(a.aaSorting=[],h.each(d.order,function(b,c){a.aaSorting.push(c[0]>=e.length?[0,c[1]]:c)}));d.search!==k&&h.extend(a.oPreviousSearch,Ab(d.search));b=0;for(c=d.columns.length;b<c;b++){var f=d.columns[b];f.visible!==k&&(e[b].bVisible=f.visible);f.search!==k&&h.extend(a.aoPreSearchCols[b],Ab(f.search))}w(a,"aoStateLoaded","stateLoaded",
+[a,d])}}}function za(a){var b=m.settings,a=h.inArray(a,D(b,"nTable"));return-1!==a?b[a]:null}function I(a,b,c,e){c="DataTables warning: "+(null!==a?"table id="+a.sTableId+" - ":"")+c;e&&(c+=". For more information about this error, please see http://datatables.net/tn/"+e);if(b)Ea.console&&console.log&&console.log(c);else if(b=m.ext,b=b.sErrMode||b.errMode,w(a,null,"error",[a,e,c]),"alert"==b)alert(c);else{if("throw"==b)throw Error(c);"function"==typeof b&&b(a,e,c)}}function E(a,b,c,e){h.isArray(c)?
+h.each(c,function(c,f){h.isArray(f)?E(a,b,f[0],f[1]):E(a,b,f)}):(e===k&&(e=c),b[c]!==k&&(a[e]=b[c]))}function Lb(a,b,c){var e,d;for(d in b)b.hasOwnProperty(d)&&(e=b[d],h.isPlainObject(e)?(h.isPlainObject(a[d])||(a[d]={}),h.extend(!0,a[d],e)):a[d]=c&&"data"!==d&&"aaData"!==d&&h.isArray(e)?e.slice():e);return a}function Va(a,b,c){h(a).bind("click.DT",b,function(b){a.blur();c(b)}).bind("keypress.DT",b,function(a){13===a.which&&(a.preventDefault(),c(a))}).bind("selectstart.DT",function(){return!1})}function z(a,
+b,c,e){c&&a[b].push({fn:c,sName:e})}function w(a,b,c,e){var d=[];b&&(d=h.map(a[b].slice().reverse(),function(b){return b.fn.apply(a.oInstance,e)}));null!==c&&(b=h.Event(c+".dt"),h(a.nTable).trigger(b,e),d.push(b.result));return d}function Sa(a){var b=a._iDisplayStart,c=a.fnDisplayEnd(),e=a._iDisplayLength;b>=c&&(b=c-e);b-=b%e;if(-1===e||0>b)b=0;a._iDisplayStart=b}function Pa(a,b){var c=a.renderer,e=m.ext.renderer[b];return h.isPlainObject(c)&&c[b]?e[c[b]]||e._:"string"===typeof c?e[c]||e._:e._}function B(a){return a.oFeatures.bServerSide?
+"ssp":a.ajax||a.sAjaxSource?"ajax":"dom"}function Wa(a,b){var c=[],c=Mb.numbers_length,e=Math.floor(c/2);b<=c?c=V(0,b):a<=e?(c=V(0,c-2),c.push("ellipsis"),c.push(b-1)):(a>=b-1-e?c=V(b-(c-2),b):(c=V(a-e+2,a+e-1),c.push("ellipsis"),c.push(b-1)),c.splice(0,0,"ellipsis"),c.splice(0,0,0));c.DT_el="span";return c}function db(a){h.each({num:function(b){return Aa(b,a)},"num-fmt":function(b){return Aa(b,a,Xa)},"html-num":function(b){return Aa(b,a,Ba)},"html-num-fmt":function(b){return Aa(b,a,Ba,Xa)}},function(b,
+c){u.type.order[b+a+"-pre"]=c;b.match(/^html\-/)&&(u.type.search[b+a]=u.type.search.html)})}function Nb(a){return function(){var b=[za(this[m.ext.iApiIndex])].concat(Array.prototype.slice.call(arguments));return m.ext.internal[a].apply(this,b)}}var m,u,t,r,v,Ya={},Ob=/[\r\n]/g,Ba=/<.*?>/g,ac=/^[\w\+\-]/,bc=/[\w\+\-]$/,Yb=RegExp("(\\/|\\.|\\*|\\+|\\?|\\||\\(|\\)|\\[|\\]|\\{|\\}|\\\\|\\$|\\^|\\-)","g"),Xa=/[',$\u00a3\u20ac\u00a5%\u2009\u202F\u20BD\u20a9\u20BArfk]/gi,J=function(a){return!a||!0===a||
+"-"===a?!0:!1},Pb=function(a){var b=parseInt(a,10);return!isNaN(b)&&isFinite(a)?b:null},Qb=function(a,b){Ya[b]||(Ya[b]=RegExp(va(b),"g"));return"string"===typeof a&&"."!==b?a.replace(/\./g,"").replace(Ya[b],"."):a},Za=function(a,b,c){var e="string"===typeof a;if(J(a))return!0;b&&e&&(a=Qb(a,b));c&&e&&(a=a.replace(Xa,""));return!isNaN(parseFloat(a))&&isFinite(a)},Rb=function(a,b,c){return J(a)?!0:!(J(a)||"string"===typeof a)?null:Za(a.replace(Ba,""),b,c)?!0:null},D=function(a,b,c){var e=[],d=0,f=a.length;
+if(c!==k)for(;d<f;d++)a[d]&&a[d][b]&&e.push(a[d][b][c]);else for(;d<f;d++)a[d]&&e.push(a[d][b]);return e},ia=function(a,b,c,e){var d=[],f=0,g=b.length;if(e!==k)for(;f<g;f++)a[b[f]][c]&&d.push(a[b[f]][c][e]);else for(;f<g;f++)d.push(a[b[f]][c]);return d},V=function(a,b){var c=[],e;b===k?(b=0,e=a):(e=b,b=a);for(var d=b;d<e;d++)c.push(d);return c},Sb=function(a){for(var b=[],c=0,e=a.length;c<e;c++)a[c]&&b.push(a[c]);return b},Na=function(a){var b=[],c,e,d=a.length,f,g=0;e=0;a:for(;e<d;e++){c=a[e];for(f=
+0;f<g;f++)if(b[f]===c)continue a;b.push(c);g++}return b},A=function(a,b,c){a[b]!==k&&(a[c]=a[b])},ba=/\[.*?\]$/,T=/\(\)$/,wa=h("<div>")[0],Zb=wa.textContent!==k,$b=/<.*?>/g;m=function(a){this.$=function(a,b){return this.api(!0).$(a,b)};this._=function(a,b){return this.api(!0).rows(a,b).data()};this.api=function(a){return a?new t(za(this[u.iApiIndex])):new t(this)};this.fnAddData=function(a,b){var c=this.api(!0),e=h.isArray(a)&&(h.isArray(a[0])||h.isPlainObject(a[0]))?c.rows.add(a):c.row.add(a);(b===
+k||b)&&c.draw();return e.flatten().toArray()};this.fnAdjustColumnSizing=function(a){var b=this.api(!0).columns.adjust(),c=b.settings()[0],e=c.oScroll;a===k||a?b.draw(!1):(""!==e.sX||""!==e.sY)&&Y(c)};this.fnClearTable=function(a){var b=this.api(!0).clear();(a===k||a)&&b.draw()};this.fnClose=function(a){this.api(!0).row(a).child.hide()};this.fnDeleteRow=function(a,b,c){var e=this.api(!0),a=e.rows(a),d=a.settings()[0],h=d.aoData[a[0][0]];a.remove();b&&b.call(this,d,h);(c===k||c)&&e.draw();return h};
+this.fnDestroy=function(a){this.api(!0).destroy(a)};this.fnDraw=function(a){this.api(!0).draw(a)};this.fnFilter=function(a,b,c,e,d,h){d=this.api(!0);null===b||b===k?d.search(a,c,e,h):d.column(b).search(a,c,e,h);d.draw()};this.fnGetData=function(a,b){var c=this.api(!0);if(a!==k){var e=a.nodeName?a.nodeName.toLowerCase():"";return b!==k||"td"==e||"th"==e?c.cell(a,b).data():c.row(a).data()||null}return c.data().toArray()};this.fnGetNodes=function(a){var b=this.api(!0);return a!==k?b.row(a).node():b.rows().nodes().flatten().toArray()};
+this.fnGetPosition=function(a){var b=this.api(!0),c=a.nodeName.toUpperCase();return"TR"==c?b.row(a).index():"TD"==c||"TH"==c?(a=b.cell(a).index(),[a.row,a.columnVisible,a.column]):null};this.fnIsOpen=function(a){return this.api(!0).row(a).child.isShown()};this.fnOpen=function(a,b,c){return this.api(!0).row(a).child(b,c).show().child()[0]};this.fnPageChange=function(a,b){var c=this.api(!0).page(a);(b===k||b)&&c.draw(!1)};this.fnSetColumnVis=function(a,b,c){a=this.api(!0).column(a).visible(b);(c===
+k||c)&&a.columns.adjust().draw()};this.fnSettings=function(){return za(this[u.iApiIndex])};this.fnSort=function(a){this.api(!0).order(a).draw()};this.fnSortListener=function(a,b,c){this.api(!0).order.listener(a,b,c)};this.fnUpdate=function(a,b,c,e,d){var h=this.api(!0);c===k||null===c?h.row(b).data(a):h.cell(b,c).data(a);(d===k||d)&&h.columns.adjust();(e===k||e)&&h.draw();return 0};this.fnVersionCheck=u.fnVersionCheck;var b=this,c=a===k,e=this.length;c&&(a={});this.oApi=this.internal=u.internal;for(var d in m.ext.internal)d&&
+(this[d]=Nb(d));this.each(function(){var d={},d=1<e?Lb(d,a,!0):a,g=0,j,i=this.getAttribute("id"),o=!1,l=m.defaults,q=h(this);if("table"!=this.nodeName.toLowerCase())I(null,0,"Non-table node initialisation ("+this.nodeName+")",2);else{eb(l);fb(l.column);H(l,l,!0);H(l.column,l.column,!0);H(l,h.extend(d,q.data()));var n=m.settings,g=0;for(j=n.length;g<j;g++){var r=n[g];if(r.nTable==this||r.nTHead.parentNode==this||r.nTFoot&&r.nTFoot.parentNode==this){g=d.bRetrieve!==k?d.bRetrieve:l.bRetrieve;if(c||g)return r.oInstance;
+if(d.bDestroy!==k?d.bDestroy:l.bDestroy){r.oInstance.fnDestroy();break}else{I(r,0,"Cannot reinitialise DataTable",3);return}}if(r.sTableId==this.id){n.splice(g,1);break}}if(null===i||""===i)this.id=i="DataTables_Table_"+m.ext._unique++;var p=h.extend(!0,{},m.models.oSettings,{sDestroyWidth:q[0].style.width,sInstance:i,sTableId:i});p.nTable=this;p.oApi=b.internal;p.oInit=d;n.push(p);p.oInstance=1===b.length?b:q.dataTable();eb(d);d.oLanguage&&P(d.oLanguage);d.aLengthMenu&&!d.iDisplayLength&&(d.iDisplayLength=
+h.isArray(d.aLengthMenu[0])?d.aLengthMenu[0][0]:d.aLengthMenu[0]);d=Lb(h.extend(!0,{},l),d);E(p.oFeatures,d,"bPaginate bLengthChange bFilter bSort bSortMulti bInfo bProcessing bAutoWidth bSortClasses bServerSide bDeferRender".split(" "));E(p,d,["asStripeClasses","ajax","fnServerData","fnFormatNumber","sServerMethod","aaSorting","aaSortingFixed","aLengthMenu","sPaginationType","sAjaxSource","sAjaxDataProp","iStateDuration","sDom","bSortCellsTop","iTabIndex","fnStateLoadCallback","fnStateSaveCallback",
+"renderer","searchDelay",["iCookieDuration","iStateDuration"],["oSearch","oPreviousSearch"],["aoSearchCols","aoPreSearchCols"],["iDisplayLength","_iDisplayLength"],["bJQueryUI","bJUI"]]);E(p.oScroll,d,[["sScrollX","sX"],["sScrollXInner","sXInner"],["sScrollY","sY"],["bScrollCollapse","bCollapse"]]);E(p.oLanguage,d,"fnInfoCallback");z(p,"aoDrawCallback",d.fnDrawCallback,"user");z(p,"aoServerParams",d.fnServerParams,"user");z(p,"aoStateSaveParams",d.fnStateSaveParams,"user");z(p,"aoStateLoadParams",
+d.fnStateLoadParams,"user");z(p,"aoStateLoaded",d.fnStateLoaded,"user");z(p,"aoRowCallback",d.fnRowCallback,"user");z(p,"aoRowCreatedCallback",d.fnCreatedRow,"user");z(p,"aoHeaderCallback",d.fnHeaderCallback,"user");z(p,"aoFooterCallback",d.fnFooterCallback,"user");z(p,"aoInitComplete",d.fnInitComplete,"user");z(p,"aoPreDrawCallback",d.fnPreDrawCallback,"user");i=p.oClasses;d.bJQueryUI?(h.extend(i,m.ext.oJUIClasses,d.oClasses),d.sDom===l.sDom&&"lfrtip"===l.sDom&&(p.sDom='<"H"lfr>t<"F"ip>'),p.renderer)?
+h.isPlainObject(p.renderer)&&!p.renderer.header&&(p.renderer.header="jqueryui"):p.renderer="jqueryui":h.extend(i,m.ext.classes,d.oClasses);q.addClass(i.sTable);if(""!==p.oScroll.sX||""!==p.oScroll.sY)p.oScroll.iBarWidth=Hb();!0===p.oScroll.sX&&(p.oScroll.sX="100%");p.iInitDisplayStart===k&&(p.iInitDisplayStart=d.iDisplayStart,p._iDisplayStart=d.iDisplayStart);null!==d.iDeferLoading&&(p.bDeferLoading=!0,g=h.isArray(d.iDeferLoading),p._iRecordsDisplay=g?d.iDeferLoading[0]:d.iDeferLoading,p._iRecordsTotal=
+g?d.iDeferLoading[1]:d.iDeferLoading);var t=p.oLanguage;h.extend(!0,t,d.oLanguage);""!==t.sUrl&&(h.ajax({dataType:"json",url:t.sUrl,success:function(a){P(a);H(l.oLanguage,a);h.extend(true,t,a);ga(p)},error:function(){ga(p)}}),o=!0);null===d.asStripeClasses&&(p.asStripeClasses=[i.sStripeOdd,i.sStripeEven]);var g=p.asStripeClasses,s=q.children("tbody").find("tr").eq(0);-1!==h.inArray(!0,h.map(g,function(a){return s.hasClass(a)}))&&(h("tbody tr",this).removeClass(g.join(" ")),p.asDestroyStripes=g.slice());
+n=[];g=this.getElementsByTagName("thead");0!==g.length&&(da(p.aoHeader,g[0]),n=qa(p));if(null===d.aoColumns){r=[];g=0;for(j=n.length;g<j;g++)r.push(null)}else r=d.aoColumns;g=0;for(j=r.length;g<j;g++)Fa(p,n?n[g]:null);ib(p,d.aoColumnDefs,r,function(a,b){ka(p,a,b)});if(s.length){var u=function(a,b){return a.getAttribute("data-"+b)!==null?b:null};h.each(na(p,s[0]).cells,function(a,b){var c=p.aoColumns[a];if(c.mData===a){var d=u(b,"sort")||u(b,"order"),e=u(b,"filter")||u(b,"search");if(d!==null||e!==
+null){c.mData={_:a+".display",sort:d!==null?a+".@data-"+d:k,type:d!==null?a+".@data-"+d:k,filter:e!==null?a+".@data-"+e:k};ka(p,a)}}})}var v=p.oFeatures;d.bStateSave&&(v.bStateSave=!0,Kb(p,d),z(p,"aoDrawCallback",ya,"state_save"));if(d.aaSorting===k){n=p.aaSorting;g=0;for(j=n.length;g<j;g++)n[g][1]=p.aoColumns[g].asSorting[0]}xa(p);v.bSort&&z(p,"aoDrawCallback",function(){if(p.bSorted){var a=U(p),b={};h.each(a,function(a,c){b[c.src]=c.dir});w(p,null,"order",[p,a,b]);Jb(p)}});z(p,"aoDrawCallback",
+function(){(p.bSorted||B(p)==="ssp"||v.bDeferRender)&&xa(p)},"sc");gb(p);g=q.children("caption").each(function(){this._captionSide=q.css("caption-side")});j=q.children("thead");0===j.length&&(j=h("<thead/>").appendTo(this));p.nTHead=j[0];j=q.children("tbody");0===j.length&&(j=h("<tbody/>").appendTo(this));p.nTBody=j[0];j=q.children("tfoot");if(0===j.length&&0<g.length&&(""!==p.oScroll.sX||""!==p.oScroll.sY))j=h("<tfoot/>").appendTo(this);0===j.length||0===j.children().length?q.addClass(i.sNoFooter):
+0<j.length&&(p.nTFoot=j[0],da(p.aoFooter,p.nTFoot));if(d.aaData)for(g=0;g<d.aaData.length;g++)K(p,d.aaData[g]);else(p.bDeferLoading||"dom"==B(p))&&ma(p,h(p.nTBody).children("tr"));p.aiDisplay=p.aiDisplayMaster.slice();p.bInitialised=!0;!1===o&&ga(p)}});b=null;return this};var Tb=[],y=Array.prototype,cc=function(a){var b,c,e=m.settings,d=h.map(e,function(a){return a.nTable});if(a){if(a.nTable&&a.oApi)return[a];if(a.nodeName&&"table"===a.nodeName.toLowerCase())return b=h.inArray(a,d),-1!==b?[e[b]]:
+null;if(a&&"function"===typeof a.settings)return a.settings().toArray();"string"===typeof a?c=h(a):a instanceof h&&(c=a)}else return[];if(c)return c.map(function(){b=h.inArray(this,d);return-1!==b?e[b]:null}).toArray()};t=function(a,b){if(!(this instanceof t))return new t(a,b);var c=[],e=function(a){(a=cc(a))&&c.push.apply(c,a)};if(h.isArray(a))for(var d=0,f=a.length;d<f;d++)e(a[d]);else e(a);this.context=Na(c);b&&this.push.apply(this,b.toArray?b.toArray():b);this.selector={rows:null,cols:null,opts:null};
+t.extend(this,this,Tb)};m.Api=t;t.prototype={any:function(){return 0!==this.flatten().length},concat:y.concat,context:[],each:function(a){for(var b=0,c=this.length;b<c;b++)a.call(this,this[b],b,this);return this},eq:function(a){var b=this.context;return b.length>a?new t(b[a],this[a]):null},filter:function(a){var b=[];if(y.filter)b=y.filter.call(this,a,this);else for(var c=0,e=this.length;c<e;c++)a.call(this,this[c],c,this)&&b.push(this[c]);return new t(this.context,b)},flatten:function(){var a=[];
+return new t(this.context,a.concat.apply(a,this.toArray()))},join:y.join,indexOf:y.indexOf||function(a,b){for(var c=b||0,e=this.length;c<e;c++)if(this[c]===a)return c;return-1},iterator:function(a,b,c,e){var d=[],f,g,h,i,o,l=this.context,q,n,m=this.selector;"string"===typeof a&&(e=c,c=b,b=a,a=!1);g=0;for(h=l.length;g<h;g++){var p=new t(l[g]);if("table"===b)f=c.call(p,l[g],g),f!==k&&d.push(f);else if("columns"===b||"rows"===b)f=c.call(p,l[g],this[g],g),f!==k&&d.push(f);else if("column"===b||"column-rows"===
+b||"row"===b||"cell"===b){n=this[g];"column-rows"===b&&(q=Ca(l[g],m.opts));i=0;for(o=n.length;i<o;i++)f=n[i],f="cell"===b?c.call(p,l[g],f.row,f.column,g,i):c.call(p,l[g],f,g,i,q),f!==k&&d.push(f)}}return d.length||e?(a=new t(l,a?d.concat.apply([],d):d),b=a.selector,b.rows=m.rows,b.cols=m.cols,b.opts=m.opts,a):this},lastIndexOf:y.lastIndexOf||function(a,b){return this.indexOf.apply(this.toArray.reverse(),arguments)},length:0,map:function(a){var b=[];if(y.map)b=y.map.call(this,a,this);else for(var c=
+0,e=this.length;c<e;c++)b.push(a.call(this,this[c],c));return new t(this.context,b)},pluck:function(a){return this.map(function(b){return b[a]})},pop:y.pop,push:y.push,reduce:y.reduce||function(a,b){return hb(this,a,b,0,this.length,1)},reduceRight:y.reduceRight||function(a,b){return hb(this,a,b,this.length-1,-1,-1)},reverse:y.reverse,selector:null,shift:y.shift,sort:y.sort,splice:y.splice,toArray:function(){return y.slice.call(this)},to$:function(){return h(this)},toJQuery:function(){return h(this)},
+unique:function(){return new t(this.context,Na(this))},unshift:y.unshift};t.extend=function(a,b,c){if(c.length&&b&&(b instanceof t||b.__dt_wrapper)){var e,d,f,g=function(a,b,c){return function(){var d=b.apply(a,arguments);t.extend(d,d,c.methodExt);return d}};e=0;for(d=c.length;e<d;e++)f=c[e],b[f.name]="function"===typeof f.val?g(a,f.val,f):h.isPlainObject(f.val)?{}:f.val,b[f.name].__dt_wrapper=!0,t.extend(a,b[f.name],f.propExt)}};t.register=r=function(a,b){if(h.isArray(a))for(var c=0,e=a.length;c<
+e;c++)t.register(a[c],b);else for(var d=a.split("."),f=Tb,g,j,c=0,e=d.length;c<e;c++){g=(j=-1!==d[c].indexOf("()"))?d[c].replace("()",""):d[c];var i;a:{i=0;for(var o=f.length;i<o;i++)if(f[i].name===g){i=f[i];break a}i=null}i||(i={name:g,val:{},methodExt:[],propExt:[]},f.push(i));c===e-1?i.val=b:f=j?i.methodExt:i.propExt}};t.registerPlural=v=function(a,b,c){t.register(a,c);t.register(b,function(){var a=c.apply(this,arguments);return a===this?this:a instanceof t?a.length?h.isArray(a[0])?new t(a.context,
+a[0]):a[0]:k:a})};r("tables()",function(a){var b;if(a){b=t;var c=this.context;if("number"===typeof a)a=[c[a]];else var e=h.map(c,function(a){return a.nTable}),a=h(e).filter(a).map(function(){var a=h.inArray(this,e);return c[a]}).toArray();b=new b(a)}else b=this;return b});r("table()",function(a){var a=this.tables(a),b=a.context;return b.length?new t(b[0]):a});v("tables().nodes()","table().node()",function(){return this.iterator("table",function(a){return a.nTable},1)});v("tables().body()","table().body()",
+function(){return this.iterator("table",function(a){return a.nTBody},1)});v("tables().header()","table().header()",function(){return this.iterator("table",function(a){return a.nTHead},1)});v("tables().footer()","table().footer()",function(){return this.iterator("table",function(a){return a.nTFoot},1)});v("tables().containers()","table().container()",function(){return this.iterator("table",function(a){return a.nTableWrapper},1)});r("draw()",function(a){return this.iterator("table",function(b){N(b,
+!1===a)})});r("page()",function(a){return a===k?this.page.info().page:this.iterator("table",function(b){Ta(b,a)})});r("page.info()",function(){if(0===this.context.length)return k;var a=this.context[0],b=a._iDisplayStart,c=a._iDisplayLength,e=a.fnRecordsDisplay(),d=-1===c;return{page:d?0:Math.floor(b/c),pages:d?1:Math.ceil(e/c),start:b,end:a.fnDisplayEnd(),length:c,recordsTotal:a.fnRecordsTotal(),recordsDisplay:e}});r("page.len()",function(a){return a===k?0!==this.context.length?this.context[0]._iDisplayLength:
+k:this.iterator("table",function(b){Ra(b,a)})});var Ub=function(a,b,c){if(c){var e=new t(a);e.one("draw",function(){c(e.ajax.json())})}"ssp"==B(a)?N(a,b):(C(a,!0),ra(a,[],function(c){oa(a);for(var c=sa(a,c),e=0,g=c.length;e<g;e++)K(a,c[e]);N(a,b);C(a,!1)}))};r("ajax.json()",function(){var a=this.context;if(0<a.length)return a[0].json});r("ajax.params()",function(){var a=this.context;if(0<a.length)return a[0].oAjaxData});r("ajax.reload()",function(a,b){return this.iterator("table",function(c){Ub(c,
+!1===b,a)})});r("ajax.url()",function(a){var b=this.context;if(a===k){if(0===b.length)return k;b=b[0];return b.ajax?h.isPlainObject(b.ajax)?b.ajax.url:b.ajax:b.sAjaxSource}return this.iterator("table",function(b){h.isPlainObject(b.ajax)?b.ajax.url=a:b.ajax=a})});r("ajax.url().load()",function(a,b){return this.iterator("table",function(c){Ub(c,!1===b,a)})});var $a=function(a,b,c,e,d){var f=[],g,j,i,o,l,q;i=typeof b;if(!b||"string"===i||"function"===i||b.length===k)b=[b];i=0;for(o=b.length;i<o;i++){j=
+b[i]&&b[i].split?b[i].split(","):[b[i]];l=0;for(q=j.length;l<q;l++)(g=c("string"===typeof j[l]?h.trim(j[l]):j[l]))&&g.length&&f.push.apply(f,g)}a=u.selector[a];if(a.length){i=0;for(o=a.length;i<o;i++)f=a[i](e,d,f)}return f},ab=function(a){a||(a={});a.filter&&a.search===k&&(a.search=a.filter);return h.extend({search:"none",order:"current",page:"all"},a)},bb=function(a){for(var b=0,c=a.length;b<c;b++)if(0<a[b].length)return a[0]=a[b],a[0].length=1,a.length=1,a.context=[a.context[b]],a;a.length=0;return a},
+Ca=function(a,b){var c,e,d,f=[],g=a.aiDisplay;c=a.aiDisplayMaster;var j=b.search;e=b.order;d=b.page;if("ssp"==B(a))return"removed"===j?[]:V(0,c.length);if("current"==d){c=a._iDisplayStart;for(e=a.fnDisplayEnd();c<e;c++)f.push(g[c])}else if("current"==e||"applied"==e)f="none"==j?c.slice():"applied"==j?g.slice():h.map(c,function(a){return-1===h.inArray(a,g)?a:null});else if("index"==e||"original"==e){c=0;for(e=a.aoData.length;c<e;c++)"none"==j?f.push(c):(d=h.inArray(c,g),(-1===d&&"removed"==j||0<=d&&
+"applied"==j)&&f.push(c))}return f};r("rows()",function(a,b){a===k?a="":h.isPlainObject(a)&&(b=a,a="");var b=ab(b),c=this.iterator("table",function(c){var d=b;return $a("row",a,function(a){var b=Pb(a);if(b!==null&&!d)return[b];var j=Ca(c,d);if(b!==null&&h.inArray(b,j)!==-1)return[b];if(!a)return j;if(typeof a==="function")return h.map(j,function(b){var d=c.aoData[b];return a(b,d._aData,d.nTr)?b:null});b=Sb(ia(c.aoData,j,"nTr"));return a.nodeName&&h.inArray(a,b)!==-1?[a._DT_RowIndex]:h(b).filter(a).map(function(){return this._DT_RowIndex}).toArray()},
+c,d)},1);c.selector.rows=a;c.selector.opts=b;return c});r("rows().nodes()",function(){return this.iterator("row",function(a,b){return a.aoData[b].nTr||k},1)});r("rows().data()",function(){return this.iterator(!0,"rows",function(a,b){return ia(a.aoData,b,"_aData")},1)});v("rows().cache()","row().cache()",function(a){return this.iterator("row",function(b,c){var e=b.aoData[c];return"search"===a?e._aFilterData:e._aSortData},1)});v("rows().invalidate()","row().invalidate()",function(a){return this.iterator("row",
+function(b,c){ca(b,c,a)})});v("rows().indexes()","row().index()",function(){return this.iterator("row",function(a,b){return b},1)});v("rows().remove()","row().remove()",function(){var a=this;return this.iterator("row",function(b,c,e){var d=b.aoData;d.splice(c,1);for(var f=0,g=d.length;f<g;f++)null!==d[f].nTr&&(d[f].nTr._DT_RowIndex=f);h.inArray(c,b.aiDisplay);pa(b.aiDisplayMaster,c);pa(b.aiDisplay,c);pa(a[e],c,!1);Sa(b)})});r("rows.add()",function(a){var b=this.iterator("table",function(b){var c,
+f,g,h=[];f=0;for(g=a.length;f<g;f++)c=a[f],c.nodeName&&"TR"===c.nodeName.toUpperCase()?h.push(ma(b,c)[0]):h.push(K(b,c));return h},1),c=this.rows(-1);c.pop();c.push.apply(c,b.toArray());return c});r("row()",function(a,b){return bb(this.rows(a,b))});r("row().data()",function(a){var b=this.context;if(a===k)return b.length&&this.length?b[0].aoData[this[0]]._aData:k;b[0].aoData[this[0]]._aData=a;ca(b[0],this[0],"data");return this});r("row().node()",function(){var a=this.context;return a.length&&this.length?
+a[0].aoData[this[0]].nTr||null:null});r("row.add()",function(a){a instanceof h&&a.length&&(a=a[0]);var b=this.iterator("table",function(b){return a.nodeName&&"TR"===a.nodeName.toUpperCase()?ma(b,a)[0]:K(b,a)});return this.row(b[0])});var cb=function(a,b){var c=a.context;c.length&&(c=c[0].aoData[b!==k?b:a[0]],c._details&&(c._details.remove(),c._detailsShow=k,c._details=k))},Vb=function(a,b){var c=a.context;if(c.length&&a.length){var e=c[0].aoData[a[0]];if(e._details){(e._detailsShow=b)?e._details.insertAfter(e.nTr):
+e._details.detach();var d=c[0],f=new t(d),g=d.aoData;f.off("draw.dt.DT_details column-visibility.dt.DT_details destroy.dt.DT_details");0<D(g,"_details").length&&(f.on("draw.dt.DT_details",function(a,b){d===b&&f.rows({page:"current"}).eq(0).each(function(a){a=g[a];a._detailsShow&&a._details.insertAfter(a.nTr)})}),f.on("column-visibility.dt.DT_details",function(a,b){if(d===b)for(var c,e=aa(b),f=0,h=g.length;f<h;f++)c=g[f],c._details&&c._details.children("td[colspan]").attr("colspan",e)}),f.on("destroy.dt.DT_details",
+function(a,b){if(d===b)for(var c=0,e=g.length;c<e;c++)g[c]._details&&cb(f,c)}))}}};r("row().child()",function(a,b){var c=this.context;if(a===k)return c.length&&this.length?c[0].aoData[this[0]]._details:k;if(!0===a)this.child.show();else if(!1===a)cb(this);else if(c.length&&this.length){var e=c[0],c=c[0].aoData[this[0]],d=[],f=function(a,b){if(h.isArray(a)||a instanceof h)for(var c=0,k=a.length;c<k;c++)f(a[c],b);else a.nodeName&&"tr"===a.nodeName.toLowerCase()?d.push(a):(c=h("<tr><td/></tr>").addClass(b),
+h("td",c).addClass(b).html(a)[0].colSpan=aa(e),d.push(c[0]))};f(a,b);c._details&&c._details.remove();c._details=h(d);c._detailsShow&&c._details.insertAfter(c.nTr)}return this});r(["row().child.show()","row().child().show()"],function(){Vb(this,!0);return this});r(["row().child.hide()","row().child().hide()"],function(){Vb(this,!1);return this});r(["row().child.remove()","row().child().remove()"],function(){cb(this);return this});r("row().child.isShown()",function(){var a=this.context;return a.length&&
+this.length?a[0].aoData[this[0]]._detailsShow||!1:!1});var dc=/^(.+):(name|visIdx|visible)$/,Wb=function(a,b,c,e,d){for(var c=[],e=0,f=d.length;e<f;e++)c.push(x(a,d[e],b));return c};r("columns()",function(a,b){a===k?a="":h.isPlainObject(a)&&(b=a,a="");var b=ab(b),c=this.iterator("table",function(c){var d=a,f=b,g=c.aoColumns,j=D(g,"sName"),i=D(g,"nTh");return $a("column",d,function(a){var b=Pb(a);if(a==="")return V(g.length);if(b!==null)return[b>=0?b:g.length+b];if(typeof a==="function"){var d=Ca(c,
+f);return h.map(g,function(b,f){return a(f,Wb(c,f,0,0,d),i[f])?f:null})}var k=typeof a==="string"?a.match(dc):"";if(k)switch(k[2]){case "visIdx":case "visible":b=parseInt(k[1],10);if(b<0){var m=h.map(g,function(a,b){return a.bVisible?b:null});return[m[m.length+b]]}return[la(c,b)];case "name":return h.map(j,function(a,b){return a===k[1]?b:null})}else return h(i).filter(a).map(function(){return h.inArray(this,i)}).toArray()},c,f)},1);c.selector.cols=a;c.selector.opts=b;return c});v("columns().header()",
+"column().header()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].nTh},1)});v("columns().footer()","column().footer()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].nTf},1)});v("columns().data()","column().data()",function(){return this.iterator("column-rows",Wb,1)});v("columns().dataSrc()","column().dataSrc()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].mData},1)});v("columns().cache()","column().cache()",
+function(a){return this.iterator("column-rows",function(b,c,e,d,f){return ia(b.aoData,f,"search"===a?"_aFilterData":"_aSortData",c)},1)});v("columns().nodes()","column().nodes()",function(){return this.iterator("column-rows",function(a,b,c,e,d){return ia(a.aoData,d,"anCells",b)},1)});v("columns().visible()","column().visible()",function(a,b){return this.iterator("column",function(c,e){if(a===k)return c.aoColumns[e].bVisible;var d=c.aoColumns,f=d[e],g=c.aoData,j,i,m;if(a!==k&&f.bVisible!==a){if(a){var l=
+h.inArray(!0,D(d,"bVisible"),e+1);j=0;for(i=g.length;j<i;j++)m=g[j].nTr,d=g[j].anCells,m&&m.insertBefore(d[e],d[l]||null)}else h(D(c.aoData,"anCells",e)).detach();f.bVisible=a;ea(c,c.aoHeader);ea(c,c.aoFooter);if(b===k||b)X(c),(c.oScroll.sX||c.oScroll.sY)&&Y(c);w(c,null,"column-visibility",[c,e,a]);ya(c)}})});v("columns().indexes()","column().index()",function(a){return this.iterator("column",function(b,c){return"visible"===a?$(b,c):c},1)});r("columns.adjust()",function(){return this.iterator("table",
+function(a){X(a)},1)});r("column.index()",function(a,b){if(0!==this.context.length){var c=this.context[0];if("fromVisible"===a||"toData"===a)return la(c,b);if("fromData"===a||"toVisible"===a)return $(c,b)}});r("column()",function(a,b){return bb(this.columns(a,b))});r("cells()",function(a,b,c){h.isPlainObject(a)&&(a.row===k?(c=a,a=null):(c=b,b=null));h.isPlainObject(b)&&(c=b,b=null);if(null===b||b===k)return this.iterator("table",function(b){var d=a,e=ab(c),f=b.aoData,g=Ca(b,e),i=Sb(ia(f,g,"anCells")),
+j=h([].concat.apply([],i)),l,m=b.aoColumns.length,o,r,t,s,u,v;return $a("cell",d,function(a){var c=typeof a==="function";if(a===null||a===k||c){o=[];r=0;for(t=g.length;r<t;r++){l=g[r];for(s=0;s<m;s++){u={row:l,column:s};if(c){v=b.aoData[l];a(u,x(b,l,s),v.anCells?v.anCells[s]:null)&&o.push(u)}else o.push(u)}}return o}return h.isPlainObject(a)?[a]:j.filter(a).map(function(a,b){l=b.parentNode._DT_RowIndex;return{row:l,column:h.inArray(b,f[l].anCells)}}).toArray()},b,e)});var e=this.columns(b,c),d=this.rows(a,
+c),f,g,j,i,m,l=this.iterator("table",function(a,b){f=[];g=0;for(j=d[b].length;g<j;g++){i=0;for(m=e[b].length;i<m;i++)f.push({row:d[b][g],column:e[b][i]})}return f},1);h.extend(l.selector,{cols:b,rows:a,opts:c});return l});v("cells().nodes()","cell().node()",function(){return this.iterator("cell",function(a,b,c){return(a=a.aoData[b].anCells)?a[c]:k},1)});r("cells().data()",function(){return this.iterator("cell",function(a,b,c){return x(a,b,c)},1)});v("cells().cache()","cell().cache()",function(a){a=
+"search"===a?"_aFilterData":"_aSortData";return this.iterator("cell",function(b,c,e){return b.aoData[c][a][e]},1)});v("cells().render()","cell().render()",function(a){return this.iterator("cell",function(b,c,e){return x(b,c,e,a)},1)});v("cells().indexes()","cell().index()",function(){return this.iterator("cell",function(a,b,c){return{row:b,column:c,columnVisible:$(a,c)}},1)});v("cells().invalidate()","cell().invalidate()",function(a){return this.iterator("cell",function(b,c,e){ca(b,c,a,e)})});r("cell()",
+function(a,b,c){return bb(this.cells(a,b,c))});r("cell().data()",function(a){var b=this.context,c=this[0];if(a===k)return b.length&&c.length?x(b[0],c[0].row,c[0].column):k;Ia(b[0],c[0].row,c[0].column,a);ca(b[0],c[0].row,"data",c[0].column);return this});r("order()",function(a,b){var c=this.context;if(a===k)return 0!==c.length?c[0].aaSorting:k;"number"===typeof a?a=[[a,b]]:h.isArray(a[0])||(a=Array.prototype.slice.call(arguments));return this.iterator("table",function(b){b.aaSorting=a.slice()})});
+r("order.listener()",function(a,b,c){return this.iterator("table",function(e){Oa(e,a,b,c)})});r(["columns().order()","column().order()"],function(a){var b=this;return this.iterator("table",function(c,e){var d=[];h.each(b[e],function(b,c){d.push([c,a])});c.aaSorting=d})});r("search()",function(a,b,c,e){var d=this.context;return a===k?0!==d.length?d[0].oPreviousSearch.sSearch:k:this.iterator("table",function(d){d.oFeatures.bFilter&&fa(d,h.extend({},d.oPreviousSearch,{sSearch:a+"",bRegex:null===b?!1:
+b,bSmart:null===c?!0:c,bCaseInsensitive:null===e?!0:e}),1)})});v("columns().search()","column().search()",function(a,b,c,e){return this.iterator("column",function(d,f){var g=d.aoPreSearchCols;if(a===k)return g[f].sSearch;d.oFeatures.bFilter&&(h.extend(g[f],{sSearch:a+"",bRegex:null===b?!1:b,bSmart:null===c?!0:c,bCaseInsensitive:null===e?!0:e}),fa(d,d.oPreviousSearch,1))})});r("state()",function(){return this.context.length?this.context[0].oSavedState:null});r("state.clear()",function(){return this.iterator("table",
+function(a){a.fnStateSaveCallback.call(a.oInstance,a,{})})});r("state.loaded()",function(){return this.context.length?this.context[0].oLoadedState:null});r("state.save()",function(){return this.iterator("table",function(a){ya(a)})});m.versionCheck=m.fnVersionCheck=function(a){for(var b=m.version.split("."),a=a.split("."),c,e,d=0,f=a.length;d<f;d++)if(c=parseInt(b[d],10)||0,e=parseInt(a[d],10)||0,c!==e)return c>e;return!0};m.isDataTable=m.fnIsDataTable=function(a){var b=h(a).get(0),c=!1;h.each(m.settings,
+function(a,d){var f=d.nScrollHead?h("table",d.nScrollHead)[0]:null,g=d.nScrollFoot?h("table",d.nScrollFoot)[0]:null;if(d.nTable===b||f===b||g===b)c=!0});return c};m.tables=m.fnTables=function(a){return h.map(m.settings,function(b){if(!a||a&&h(b.nTable).is(":visible"))return b.nTable})};m.util={throttle:ua,escapeRegex:va};m.camelToHungarian=H;r("$()",function(a,b){var c=this.rows(b).nodes(),c=h(c);return h([].concat(c.filter(a).toArray(),c.find(a).toArray()))});h.each(["on","one","off"],function(a,
+b){r(b+"()",function(){var a=Array.prototype.slice.call(arguments);a[0].match(/\.dt\b/)||(a[0]+=".dt");var e=h(this.tables().nodes());e[b].apply(e,a);return this})});r("clear()",function(){return this.iterator("table",function(a){oa(a)})});r("settings()",function(){return new t(this.context,this.context)});r("init()",function(){var a=this.context;return a.length?a[0].oInit:null});r("data()",function(){return this.iterator("table",function(a){return D(a.aoData,"_aData")}).flatten()});r("destroy()",
+function(a){a=a||!1;return this.iterator("table",function(b){var c=b.nTableWrapper.parentNode,e=b.oClasses,d=b.nTable,f=b.nTBody,g=b.nTHead,j=b.nTFoot,i=h(d),f=h(f),k=h(b.nTableWrapper),l=h.map(b.aoData,function(a){return a.nTr}),q;b.bDestroying=!0;w(b,"aoDestroyCallback","destroy",[b]);a||(new t(b)).columns().visible(!0);k.unbind(".DT").find(":not(tbody *)").unbind(".DT");h(Ea).unbind(".DT-"+b.sInstance);d!=g.parentNode&&(i.children("thead").detach(),i.append(g));j&&d!=j.parentNode&&(i.children("tfoot").detach(),
+i.append(j));i.detach();k.detach();b.aaSorting=[];b.aaSortingFixed=[];xa(b);h(l).removeClass(b.asStripeClasses.join(" "));h("th, td",g).removeClass(e.sSortable+" "+e.sSortableAsc+" "+e.sSortableDesc+" "+e.sSortableNone);b.bJUI&&(h("th span."+e.sSortIcon+", td span."+e.sSortIcon,g).detach(),h("th, td",g).each(function(){var a=h("div."+e.sSortJUIWrapper,this);h(this).append(a.contents());a.detach()}));!a&&c&&c.insertBefore(d,b.nTableReinsertBefore);f.children().detach();f.append(l);i.css("width",b.sDestroyWidth).removeClass(e.sTable);
+(q=b.asDestroyStripes.length)&&f.children().each(function(a){h(this).addClass(b.asDestroyStripes[a%q])});c=h.inArray(b,m.settings);-1!==c&&m.settings.splice(c,1)})});h.each(["column","row","cell"],function(a,b){r(b+"s().every()",function(a){return this.iterator(b,function(e,d,f){a.call((new t(e))[b](d,f))})})});r("i18n()",function(a,b,c){var e=this.context[0],a=R(a)(e.oLanguage);a===k&&(a=b);c!==k&&h.isPlainObject(a)&&(a=a[c]!==k?a[c]:a._);return a.replace("%d",c)});m.version="1.10.7";m.settings=
+[];m.models={};m.models.oSearch={bCaseInsensitive:!0,sSearch:"",bRegex:!1,bSmart:!0};m.models.oRow={nTr:null,anCells:null,_aData:[],_aSortData:null,_aFilterData:null,_sFilterRow:null,_sRowStripe:"",src:null};m.models.oColumn={idx:null,aDataSort:null,asSorting:null,bSearchable:null,bSortable:null,bVisible:null,_sManualType:null,_bAttrSrc:!1,fnCreatedCell:null,fnGetData:null,fnSetData:null,mData:null,mRender:null,nTh:null,nTf:null,sClass:null,sContentPadding:null,sDefaultContent:null,sName:null,sSortDataType:"std",
+sSortingClass:null,sSortingClassJUI:null,sTitle:null,sType:null,sWidth:null,sWidthOrig:null};m.defaults={aaData:null,aaSorting:[[0,"asc"]],aaSortingFixed:[],ajax:null,aLengthMenu:[10,25,50,100],aoColumns:null,aoColumnDefs:null,aoSearchCols:[],asStripeClasses:null,bAutoWidth:!0,bDeferRender:!1,bDestroy:!1,bFilter:!0,bInfo:!0,bJQueryUI:!1,bLengthChange:!0,bPaginate:!0,bProcessing:!1,bRetrieve:!1,bScrollCollapse:!1,bServerSide:!1,bSort:!0,bSortMulti:!0,bSortCellsTop:!1,bSortClasses:!0,bStateSave:!1,
+fnCreatedRow:null,fnDrawCallback:null,fnFooterCallback:null,fnFormatNumber:function(a){return a.toString().replace(/\B(?=(\d{3})+(?!\d))/g,this.oLanguage.sThousands)},fnHeaderCallback:null,fnInfoCallback:null,fnInitComplete:null,fnPreDrawCallback:null,fnRowCallback:null,fnServerData:null,fnServerParams:null,fnStateLoadCallback:function(a){try{return JSON.parse((-1===a.iStateDuration?sessionStorage:localStorage).getItem("DataTables_"+a.sInstance+"_"+location.pathname))}catch(b){}},fnStateLoadParams:null,
+fnStateLoaded:null,fnStateSaveCallback:function(a,b){try{(-1===a.iStateDuration?sessionStorage:localStorage).setItem("DataTables_"+a.sInstance+"_"+location.pathname,JSON.stringify(b))}catch(c){}},fnStateSaveParams:null,iStateDuration:7200,iDeferLoading:null,iDisplayLength:10,iDisplayStart:0,iTabIndex:0,oClasses:{},oLanguage:{oAria:{sSortAscending:": activate to sort column ascending",sSortDescending:": activate to sort column descending"},oPaginate:{sFirst:"First",sLast:"Last",sNext:"Next",sPrevious:"Previous"},
+sEmptyTable:"No data available in table",sInfo:"Showing _START_ to _END_ of _TOTAL_ entries",sInfoEmpty:"Showing 0 to 0 of 0 entries",sInfoFiltered:"(filtered from _MAX_ total entries)",sInfoPostFix:"",sDecimal:"",sThousands:",",sLengthMenu:"Show _MENU_ entries",sLoadingRecords:"Loading...",sProcessing:"Processing...",sSearch:"Search:",sSearchPlaceholder:"",sUrl:"",sZeroRecords:"No matching records found"},oSearch:h.extend({},m.models.oSearch),sAjaxDataProp:"data",sAjaxSource:null,sDom:"lfrtip",searchDelay:null,
+sPaginationType:"simple_numbers",sScrollX:"",sScrollXInner:"",sScrollY:"",sServerMethod:"GET",renderer:null};W(m.defaults);m.defaults.column={aDataSort:null,iDataSort:-1,asSorting:["asc","desc"],bSearchable:!0,bSortable:!0,bVisible:!0,fnCreatedCell:null,mData:null,mRender:null,sCellType:"td",sClass:"",sContentPadding:"",sDefaultContent:null,sName:"",sSortDataType:"std",sTitle:null,sType:null,sWidth:null};W(m.defaults.column);m.models.oSettings={oFeatures:{bAutoWidth:null,bDeferRender:null,bFilter:null,
+bInfo:null,bLengthChange:null,bPaginate:null,bProcessing:null,bServerSide:null,bSort:null,bSortMulti:null,bSortClasses:null,bStateSave:null},oScroll:{bCollapse:null,iBarWidth:0,sX:null,sXInner:null,sY:null},oLanguage:{fnInfoCallback:null},oBrowser:{bScrollOversize:!1,bScrollbarLeft:!1},ajax:null,aanFeatures:[],aoData:[],aiDisplay:[],aiDisplayMaster:[],aoColumns:[],aoHeader:[],aoFooter:[],oPreviousSearch:{},aoPreSearchCols:[],aaSorting:null,aaSortingFixed:[],asStripeClasses:null,asDestroyStripes:[],
+sDestroyWidth:0,aoRowCallback:[],aoHeaderCallback:[],aoFooterCallback:[],aoDrawCallback:[],aoRowCreatedCallback:[],aoPreDrawCallback:[],aoInitComplete:[],aoStateSaveParams:[],aoStateLoadParams:[],aoStateLoaded:[],sTableId:"",nTable:null,nTHead:null,nTFoot:null,nTBody:null,nTableWrapper:null,bDeferLoading:!1,bInitialised:!1,aoOpenRows:[],sDom:null,searchDelay:null,sPaginationType:"two_button",iStateDuration:0,aoStateSave:[],aoStateLoad:[],oSavedState:null,oLoadedState:null,sAjaxSource:null,sAjaxDataProp:null,
+bAjaxDataGet:!0,jqXHR:null,json:k,oAjaxData:k,fnServerData:null,aoServerParams:[],sServerMethod:null,fnFormatNumber:null,aLengthMenu:null,iDraw:0,bDrawing:!1,iDrawError:-1,_iDisplayLength:10,_iDisplayStart:0,_iRecordsTotal:0,_iRecordsDisplay:0,bJUI:null,oClasses:{},bFiltered:!1,bSorted:!1,bSortCellsTop:null,oInit:null,aoDestroyCallback:[],fnRecordsTotal:function(){return"ssp"==B(this)?1*this._iRecordsTotal:this.aiDisplayMaster.length},fnRecordsDisplay:function(){return"ssp"==B(this)?1*this._iRecordsDisplay:
+this.aiDisplay.length},fnDisplayEnd:function(){var a=this._iDisplayLength,b=this._iDisplayStart,c=b+a,e=this.aiDisplay.length,d=this.oFeatures,f=d.bPaginate;return d.bServerSide?!1===f||-1===a?b+e:Math.min(b+a,this._iRecordsDisplay):!f||c>e||-1===a?e:c},oInstance:null,sInstance:null,iTabIndex:0,nScrollHead:null,nScrollFoot:null,aLastSort:[],oPlugins:{}};m.ext=u={buttons:{},classes:{},errMode:"alert",feature:[],search:[],selector:{cell:[],column:[],row:[]},internal:{},legacy:{ajax:null},pager:{},renderer:{pageButton:{},
+header:{}},order:{},type:{detect:[],search:{},order:{}},_unique:0,fnVersionCheck:m.fnVersionCheck,iApiIndex:0,oJUIClasses:{},sVersion:m.version};h.extend(u,{afnFiltering:u.search,aTypes:u.type.detect,ofnSearch:u.type.search,oSort:u.type.order,afnSortData:u.order,aoFeatures:u.feature,oApi:u.internal,oStdClasses:u.classes,oPagination:u.pager});h.extend(m.ext.classes,{sTable:"dataTable",sNoFooter:"no-footer",sPageButton:"paginate_button",sPageButtonActive:"current",sPageButtonDisabled:"disabled",sStripeOdd:"odd",
+sStripeEven:"even",sRowEmpty:"dataTables_empty",sWrapper:"dataTables_wrapper",sFilter:"dataTables_filter",sInfo:"dataTables_info",sPaging:"dataTables_paginate paging_",sLength:"dataTables_length",sProcessing:"dataTables_processing",sSortAsc:"sorting_asc",sSortDesc:"sorting_desc",sSortable:"sorting",sSortableAsc:"sorting_asc_disabled",sSortableDesc:"sorting_desc_disabled",sSortableNone:"sorting_disabled",sSortColumn:"sorting_",sFilterInput:"",sLengthSelect:"",sScrollWrapper:"dataTables_scroll",sScrollHead:"dataTables_scrollHead",
+sScrollHeadInner:"dataTables_scrollHeadInner",sScrollBody:"dataTables_scrollBody",sScrollFoot:"dataTables_scrollFoot",sScrollFootInner:"dataTables_scrollFootInner",sHeaderTH:"",sFooterTH:"",sSortJUIAsc:"",sSortJUIDesc:"",sSortJUI:"",sSortJUIAscAllowed:"",sSortJUIDescAllowed:"",sSortJUIWrapper:"",sSortIcon:"",sJUIHeader:"",sJUIFooter:""});var Da="",Da="",F=Da+"ui-state-default",ja=Da+"css_right ui-icon ui-icon-",Xb=Da+"fg-toolbar ui-toolbar ui-widget-header ui-helper-clearfix";h.extend(m.ext.oJUIClasses,
+m.ext.classes,{sPageButton:"fg-button ui-button "+F,sPageButtonActive:"ui-state-disabled",sPageButtonDisabled:"ui-state-disabled",sPaging:"dataTables_paginate fg-buttonset ui-buttonset fg-buttonset-multi ui-buttonset-multi paging_",sSortAsc:F+" sorting_asc",sSortDesc:F+" sorting_desc",sSortable:F+" sorting",sSortableAsc:F+" sorting_asc_disabled",sSortableDesc:F+" sorting_desc_disabled",sSortableNone:F+" sorting_disabled",sSortJUIAsc:ja+"triangle-1-n",sSortJUIDesc:ja+"triangle-1-s",sSortJUI:ja+"carat-2-n-s",
+sSortJUIAscAllowed:ja+"carat-1-n",sSortJUIDescAllowed:ja+"carat-1-s",sSortJUIWrapper:"DataTables_sort_wrapper",sSortIcon:"DataTables_sort_icon",sScrollHead:"dataTables_scrollHead "+F,sScrollFoot:"dataTables_scrollFoot "+F,sHeaderTH:F,sFooterTH:F,sJUIHeader:Xb+" ui-corner-tl ui-corner-tr",sJUIFooter:Xb+" ui-corner-bl ui-corner-br"});var Mb=m.ext.pager;h.extend(Mb,{simple:function(){return["previous","next"]},full:function(){return["first","previous","next","last"]},simple_numbers:function(a,b){return["previous",
+Wa(a,b),"next"]},full_numbers:function(a,b){return["first","previous",Wa(a,b),"next","last"]},_numbers:Wa,numbers_length:7});h.extend(!0,m.ext.renderer,{pageButton:{_:function(a,b,c,e,d,f){var g=a.oClasses,j=a.oLanguage.oPaginate,i,k,l=0,m=function(b,e){var n,r,t,s,u=function(b){Ta(a,b.data.action,true)};n=0;for(r=e.length;n<r;n++){s=e[n];if(h.isArray(s)){t=h("<"+(s.DT_el||"div")+"/>").appendTo(b);m(t,s)}else{k=i="";switch(s){case "ellipsis":b.append('<span class="ellipsis">&#x2026;</span>');break;
+case "first":i=j.sFirst;k=s+(d>0?"":" "+g.sPageButtonDisabled);break;case "previous":i=j.sPrevious;k=s+(d>0?"":" "+g.sPageButtonDisabled);break;case "next":i=j.sNext;k=s+(d<f-1?"":" "+g.sPageButtonDisabled);break;case "last":i=j.sLast;k=s+(d<f-1?"":" "+g.sPageButtonDisabled);break;default:i=s+1;k=d===s?g.sPageButtonActive:""}if(i){t=h("<a>",{"class":g.sPageButton+" "+k,"aria-controls":a.sTableId,"data-dt-idx":l,tabindex:a.iTabIndex,id:c===0&&typeof s==="string"?a.sTableId+"_"+s:null}).html(i).appendTo(b);
+Va(t,{action:s},u);l++}}}},n;try{n=h(Q.activeElement).data("dt-idx")}catch(r){}m(h(b).empty(),e);n&&h(b).find("[data-dt-idx="+n+"]").focus()}}});h.extend(m.ext.type.detect,[function(a,b){var c=b.oLanguage.sDecimal;return Za(a,c)?"num"+c:null},function(a){if(a&&!(a instanceof Date)&&(!ac.test(a)||!bc.test(a)))return null;var b=Date.parse(a);return null!==b&&!isNaN(b)||J(a)?"date":null},function(a,b){var c=b.oLanguage.sDecimal;return Za(a,c,!0)?"num-fmt"+c:null},function(a,b){var c=b.oLanguage.sDecimal;
+return Rb(a,c)?"html-num"+c:null},function(a,b){var c=b.oLanguage.sDecimal;return Rb(a,c,!0)?"html-num-fmt"+c:null},function(a){return J(a)||"string"===typeof a&&-1!==a.indexOf("<")?"html":null}]);h.extend(m.ext.type.search,{html:function(a){return J(a)?a:"string"===typeof a?a.replace(Ob," ").replace(Ba,""):""},string:function(a){return J(a)?a:"string"===typeof a?a.replace(Ob," "):a}});var Aa=function(a,b,c,e){if(0!==a&&(!a||"-"===a))return-Infinity;b&&(a=Qb(a,b));a.replace&&(c&&(a=a.replace(c,"")),
+e&&(a=a.replace(e,"")));return 1*a};h.extend(u.type.order,{"date-pre":function(a){return Date.parse(a)||0},"html-pre":function(a){return J(a)?"":a.replace?a.replace(/<.*?>/g,"").toLowerCase():a+""},"string-pre":function(a){return J(a)?"":"string"===typeof a?a.toLowerCase():!a.toString?"":a.toString()},"string-asc":function(a,b){return a<b?-1:a>b?1:0},"string-desc":function(a,b){return a<b?1:a>b?-1:0}});db("");h.extend(!0,m.ext.renderer,{header:{_:function(a,b,c,e){h(a.nTable).on("order.dt.DT",function(d,
+f,g,h){if(a===f){d=c.idx;b.removeClass(c.sSortingClass+" "+e.sSortAsc+" "+e.sSortDesc).addClass(h[d]=="asc"?e.sSortAsc:h[d]=="desc"?e.sSortDesc:c.sSortingClass)}})},jqueryui:function(a,b,c,e){h("<div/>").addClass(e.sSortJUIWrapper).append(b.contents()).append(h("<span/>").addClass(e.sSortIcon+" "+c.sSortingClassJUI)).appendTo(b);h(a.nTable).on("order.dt.DT",function(d,f,g,h){if(a===f){d=c.idx;b.removeClass(e.sSortAsc+" "+e.sSortDesc).addClass(h[d]=="asc"?e.sSortAsc:h[d]=="desc"?e.sSortDesc:c.sSortingClass);
+b.find("span."+e.sSortIcon).removeClass(e.sSortJUIAsc+" "+e.sSortJUIDesc+" "+e.sSortJUI+" "+e.sSortJUIAscAllowed+" "+e.sSortJUIDescAllowed).addClass(h[d]=="asc"?e.sSortJUIAsc:h[d]=="desc"?e.sSortJUIDesc:c.sSortingClassJUI)}})}}});m.render={number:function(a,b,c,e){return{display:function(d){if("number"!==typeof d&&"string"!==typeof d)return d;var f=0>d?"-":"",d=Math.abs(parseFloat(d)),g=parseInt(d,10),d=c?b+(d-g).toFixed(c).substring(2):"";return f+(e||"")+g.toString().replace(/\B(?=(\d{3})+(?!\d))/g,
+a)+d}}}};h.extend(m.ext.internal,{_fnExternApiFunc:Nb,_fnBuildAjax:ra,_fnAjaxUpdate:kb,_fnAjaxParameters:tb,_fnAjaxUpdateDraw:ub,_fnAjaxDataSrc:sa,_fnAddColumn:Fa,_fnColumnOptions:ka,_fnAdjustColumnSizing:X,_fnVisibleToColumnIndex:la,_fnColumnIndexToVisible:$,_fnVisbleColumns:aa,_fnGetColumns:Z,_fnColumnTypes:Ha,_fnApplyColumnDefs:ib,_fnHungarianMap:W,_fnCamelToHungarian:H,_fnLanguageCompat:P,_fnBrowserDetect:gb,_fnAddData:K,_fnAddTr:ma,_fnNodeToDataIndex:function(a,b){return b._DT_RowIndex!==k?b._DT_RowIndex:
+null},_fnNodeToColumnIndex:function(a,b,c){return h.inArray(c,a.aoData[b].anCells)},_fnGetCellData:x,_fnSetCellData:Ia,_fnSplitObjNotation:Ka,_fnGetObjectDataFn:R,_fnSetObjectDataFn:S,_fnGetDataMaster:La,_fnClearTable:oa,_fnDeleteIndex:pa,_fnInvalidate:ca,_fnGetRowElements:na,_fnCreateTr:Ja,_fnBuildHead:jb,_fnDrawHead:ea,_fnDraw:M,_fnReDraw:N,_fnAddOptionsHtml:mb,_fnDetectHeader:da,_fnGetUniqueThs:qa,_fnFeatureHtmlFilter:ob,_fnFilterComplete:fa,_fnFilterCustom:xb,_fnFilterColumn:wb,_fnFilter:vb,_fnFilterCreateSearch:Qa,
+_fnEscapeRegex:va,_fnFilterData:yb,_fnFeatureHtmlInfo:rb,_fnUpdateInfo:Bb,_fnInfoMacros:Cb,_fnInitialise:ga,_fnInitComplete:ta,_fnLengthChange:Ra,_fnFeatureHtmlLength:nb,_fnFeatureHtmlPaginate:sb,_fnPageChange:Ta,_fnFeatureHtmlProcessing:pb,_fnProcessingDisplay:C,_fnFeatureHtmlTable:qb,_fnScrollDraw:Y,_fnApplyToChildren:G,_fnCalculateColumnWidths:Ga,_fnThrottle:ua,_fnConvertToWidth:Db,_fnScrollingWidthAdjust:Fb,_fnGetWidestNode:Eb,_fnGetMaxLenString:Gb,_fnStringToCss:s,_fnScrollBarWidth:Hb,_fnSortFlatten:U,
+_fnSort:lb,_fnSortAria:Jb,_fnSortListener:Ua,_fnSortAttachListener:Oa,_fnSortingClasses:xa,_fnSortData:Ib,_fnSaveState:ya,_fnLoadState:Kb,_fnSettingsFromNode:za,_fnLog:I,_fnMap:E,_fnBindAction:Va,_fnCallbackReg:z,_fnCallbackFire:w,_fnLengthOverflow:Sa,_fnRenderer:Pa,_fnDataSource:B,_fnRowAttributes:Ma,_fnCalculateEnd:function(){}});h.fn.dataTable=m;h.fn.dataTableSettings=m.settings;h.fn.dataTableExt=m.ext;h.fn.DataTable=function(a){return h(this).dataTable(a).api()};h.each(m,function(a,b){h.fn.DataTable[a]=
+b});return h.fn.dataTable};"function"===typeof define&&define.amd?define("datatables",["jquery"],P):"object"===typeof exports?module.exports=P(require("jquery")):jQuery&&!jQuery.fn.dataTable&&P(jQuery)})(window,document);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css
deleted file mode 100644
index b60ee7d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * General page setup
- */
-#dt_example {
-	font: 80%/1.45em "Lucida Grande", Verdana, Arial, Helvetica, sans-serif;
-	margin: 0;
-	padding: 0;
-	color: #333;
-	background-color: #fff;
-}
-
-
-#dt_example #container {
-	width: 800px;
-	margin: 30px auto;
-	padding: 0;
-}
-
-
-#dt_example #footer {
-	margin: 50px auto 0 auto;
-	padding: 0;
-}
-
-#dt_example #demo {
-	margin: 30px auto 0 auto;
-}
-
-#dt_example .demo_jui {
-	margin: 30px auto 0 auto;
-}
-
-#dt_example .big {
-	font-size: 1.3em;
-	font-weight: bold;
-	line-height: 1.6em;
-	color: #4E6CA3;
-}
-
-#dt_example .spacer {
-	height: 20px;
-	clear: both;
-}
-
-#dt_example .clear {
-	clear: both;
-}
-
-#dt_example pre {
-	padding: 15px;
-	background-color: #F5F5F5;
-	border: 1px solid #CCCCCC;
-}
-
-#dt_example h1 {
-	margin-top: 2em;
-	font-size: 1.3em;
-	font-weight: normal;
-	line-height: 1.6em;
-	color: #4E6CA3;
-	border-bottom: 1px solid #B0BED9;
-	clear: both;
-}
-
-#dt_example h2 {
-	font-size: 1.2em;
-	font-weight: normal;
-	line-height: 1.6em;
-	color: #4E6CA3;
-	clear: both;
-}
-
-#dt_example a {
-	color: #0063DC;
-	text-decoration: none;
-}
-
-#dt_example a:hover {
-	text-decoration: underline;
-}
-
-#dt_example ul {
-	color: #4E6CA3;
-}
-
-.css_right {
-	float: right;
-}
-
-.css_left {
-	float: left;
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/25] hadoop git commit: YARN-8331. Race condition in NM container launched after done. Contributed by Pradeep Ambati

Posted by su...@apache.org.
YARN-8331. Race condition in NM container launched after done. Contributed by Pradeep Ambati


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd04e954
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd04e954
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd04e954

Branch: refs/heads/HDFS-12943
Commit: cd04e954d2db27f0a15b7d1c492b7cdb656a51db
Parents: 778369e
Author: Jason Lowe <jl...@apache.org>
Authored: Thu Aug 9 10:17:34 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Thu Aug 9 10:17:34 2018 -0500

----------------------------------------------------------------------
 .../container/ContainerImpl.java                | 13 +++++-
 .../launcher/ContainerLaunch.java               | 12 ++---
 .../launcher/ContainersLauncher.java            | 14 +++++-
 .../container/TestContainer.java                | 46 ++++++++++++++++++--
 4 files changed, 71 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd04e954/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index f76e682..e4cbfdc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -384,7 +384,7 @@ public class ContainerImpl implements Container {
        UPDATE_DIAGNOSTICS_TRANSITION)
     .addTransition(ContainerState.SCHEDULED, ContainerState.KILLING,
         ContainerEventType.KILL_CONTAINER,
-        new KillBeforeRunningTransition())
+        new KillTransition())
     .addTransition(ContainerState.SCHEDULED, ContainerState.SCHEDULED,
         ContainerEventType.UPDATE_CONTAINER_TOKEN,
         new NotifyContainerSchedulerOfUpdateTransition())
@@ -618,6 +618,9 @@ public class ContainerImpl implements Container {
     .addTransition(ContainerState.EXITED_WITH_SUCCESS,
         ContainerState.EXITED_WITH_SUCCESS,
         ContainerEventType.UPDATE_CONTAINER_TOKEN)
+    .addTransition(ContainerState.EXITED_WITH_SUCCESS,
+        ContainerState.EXITED_WITH_SUCCESS,
+        ContainerEventType.CONTAINER_KILLED_ON_REQUEST)
 
     // From EXITED_WITH_FAILURE State
     .addTransition(ContainerState.EXITED_WITH_FAILURE, ContainerState.DONE,
@@ -635,6 +638,9 @@ public class ContainerImpl implements Container {
     .addTransition(ContainerState.EXITED_WITH_FAILURE,
         ContainerState.EXITED_WITH_FAILURE,
         ContainerEventType.UPDATE_CONTAINER_TOKEN)
+    .addTransition(ContainerState.EXITED_WITH_FAILURE,
+        ContainerState.EXITED_WITH_FAILURE,
+        ContainerEventType.CONTAINER_KILLED_ON_REQUEST)
 
     // From KILLING State.
     .addTransition(ContainerState.KILLING,
@@ -694,6 +700,9 @@ public class ContainerImpl implements Container {
     .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
         ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
         ContainerEventType.UPDATE_CONTAINER_TOKEN)
+    .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
+        ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
+        ContainerEventType.CONTAINER_KILLED_ON_REQUEST)
 
     // From DONE
     .addTransition(ContainerState.DONE, ContainerState.DONE,
@@ -714,6 +723,8 @@ public class ContainerImpl implements Container {
     // No transition - assuming container is on its way to completion
     .addTransition(ContainerState.DONE, ContainerState.DONE,
         ContainerEventType.UPDATE_CONTAINER_TOKEN)
+    .addTransition(ContainerState.DONE, ContainerState.DONE,
+        ContainerEventType.CONTAINER_KILLED_ON_REQUEST)
 
     // create the topology tables
     .installTopology();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd04e954/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 04295e1..23ad408 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -556,14 +556,10 @@ public class ContainerLaunch implements Callable<Integer> {
         || exitCode == ExitCode.TERMINATED.getExitCode()) {
       // If the process was killed, Send container_cleanedup_after_kill and
       // just break out of this method.
-
-      // If Container was killed before starting... NO need to do this.
-      if (!killedBeforeStart) {
-        dispatcher.getEventHandler().handle(
-            new ContainerExitEvent(containerId,
-                ContainerEventType.CONTAINER_KILLED_ON_REQUEST, exitCode,
-                diagnosticInfo.toString()));
-      }
+      dispatcher.getEventHandler().handle(
+          new ContainerExitEvent(containerId,
+              ContainerEventType.CONTAINER_KILLED_ON_REQUEST, exitCode,
+              diagnosticInfo.toString()));
     } else if (exitCode != 0) {
       handleContainerExitWithFailure(containerId, exitCode, containerLogDir,
           diagnosticInfo);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd04e954/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
index cfd5d6a..7870f86 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
@@ -23,6 +23,11 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
+
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -151,7 +156,14 @@ public class ContainersLauncher extends AbstractService
       case CLEANUP_CONTAINER_FOR_REINIT:
         ContainerLaunch launcher = running.remove(containerId);
         if (launcher == null) {
-          // Container not launched. So nothing needs to be done.
+          // Container not launched.
+          // triggering KILLING to CONTAINER_CLEANEDUP_AFTER_KILL transition.
+          dispatcher.getEventHandler().handle(
+              new ContainerExitEvent(containerId,
+                  ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
+                  Shell.WINDOWS ? ContainerExecutor.ExitCode.FORCE_KILLED.getExitCode() :
+                  ContainerExecutor.ExitCode.TERMINATED.getExitCode(),
+                  "Container terminated before launch."));
           return;
         }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd04e954/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index edf26d4..71cabdd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.argThat;
+import static org.mockito.Matchers.refEq;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.times;
@@ -664,6 +665,17 @@ public class TestContainer {
       ContainerLaunch launcher = wc.launcher.running.get(wc.c.getContainerId());
       wc.killContainer();
       assertEquals(ContainerState.KILLING, wc.c.getContainerState());
+
+      // check that container cleanup hasn't started at this point.
+      LocalizationCleanupMatcher cleanupResources =
+          new LocalizationCleanupMatcher(wc.c);
+      verify(wc.localizerBus, times(0)).handle(argThat(cleanupResources));
+
+      // check if containerlauncher cleans up the container launch.
+      verify(wc.launcherBus)
+          .handle(refEq(new ContainersLauncherEvent(wc.c,
+              ContainersLauncherEventType.CLEANUP_CONTAINER), "timestamp"));
+
       launcher.call();
       wc.drainDispatcherEvents();
       assertEquals(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
@@ -676,6 +688,7 @@ public class TestContainer {
       assertEquals(ContainerState.DONE, wc.c.getContainerState());
       assertEquals(killed + 1, metrics.getKilledContainers());
       assertEquals(0, metrics.getRunningContainers());
+      assertEquals(0, wc.launcher.running.size());
     } finally {
       if (wc != null) {
         wc.finished();
@@ -1145,7 +1158,7 @@ public class TestContainer {
     ResourcesReleasedMatcher matchesReq =
         new ResourcesReleasedMatcher(wc.localResources, EnumSet.of(
             LocalResourceVisibility.PUBLIC, LocalResourceVisibility.PRIVATE,
-            LocalResourceVisibility.APPLICATION));
+            LocalResourceVisibility.APPLICATION), wc.c);
     verify(wc.localizerBus, atLeastOnce()).handle(argThat(matchesReq));
   }
 
@@ -1161,13 +1174,35 @@ public class TestContainer {
             wc.c.getContainerId().toString())));
   }
 
-  private static class ResourcesReleasedMatcher extends
+  // Argument matcher for matching container localization cleanup event.
+  private static class LocalizationCleanupMatcher extends
       ArgumentMatcher<LocalizationEvent> {
+    Container c;
+
+    LocalizationCleanupMatcher(Container c){
+      this.c = c;
+    }
+
+    @Override
+    public boolean matches(Object o) {
+      if (!(o instanceof ContainerLocalizationCleanupEvent)) {
+        return false;
+      }
+      ContainerLocalizationCleanupEvent evt =
+          (ContainerLocalizationCleanupEvent) o;
+
+      return (evt.getContainer() == c);
+    }
+  }
+
+  private static class ResourcesReleasedMatcher extends
+      LocalizationCleanupMatcher {
     final HashSet<LocalResourceRequest> resources =
         new HashSet<LocalResourceRequest>();
 
     ResourcesReleasedMatcher(Map<String, LocalResource> allResources,
-        EnumSet<LocalResourceVisibility> vis) throws URISyntaxException {
+        EnumSet<LocalResourceVisibility> vis, Container c) throws URISyntaxException {
+      super(c);
       for (Entry<String, LocalResource> e : allResources.entrySet()) {
         if (vis.contains(e.getValue().getVisibility())) {
           resources.add(new LocalResourceRequest(e.getValue()));
@@ -1177,9 +1212,12 @@ public class TestContainer {
 
     @Override
     public boolean matches(Object o) {
-      if (!(o instanceof ContainerLocalizationCleanupEvent)) {
+      // match event type and container.
+      if(!super.matches(o)){
         return false;
       }
+
+      // match resources.
       ContainerLocalizationCleanupEvent evt =
           (ContainerLocalizationCleanupEvent) o;
       final HashSet<LocalResourceRequest> expected =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/25] hadoop git commit: HDDS-219. Genearate version-info.properties for hadoop and ozone. Contributed by Sandeep Nemuri.

Posted by su...@apache.org.
HDDS-219. Genearate version-info.properties for hadoop and ozone. Contributed by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d96bc6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d96bc6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d96bc6e

Branch: refs/heads/HDFS-12943
Commit: 3d96bc6e5ff098900cf07e4b30c642e961a39427
Parents: 00013d6
Author: Márton Elek <el...@apache.org>
Authored: Thu Aug 9 11:06:03 2018 +0200
Committer: Márton Elek <el...@apache.org>
Committed: Thu Aug 9 11:06:03 2018 +0200

----------------------------------------------------------------------
 hadoop-hdds/common/pom.xml                      |  34 +++
 .../apache/hadoop/utils/HddsVersionInfo.java    | 182 ++++++++++++++++
 .../main/resources/hdds-version-info.properties |  26 +++
 hadoop-ozone/common/pom.xml                     |  35 +++
 hadoop-ozone/common/src/main/bin/ozone          |   2 +-
 .../hadoop/ozone/util/OzoneVersionInfo.java     | 213 +++++++++++++++++++
 .../resources/ozone-version-info.properties     |  27 +++
 7 files changed, 518 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-hdds/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 4068522..ed29d31 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -29,10 +29,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <packaging>jar</packaging>
 
   <properties>
+    <hdds.version>0.2.1-SNAPSHOT</hdds.version>
     <hadoop.component>hdds</hadoop.component>
     <is.hadoop.component>true</is.hadoop.component>
     <log4j2.version>2.11.0</log4j2.version>
     <disruptor.version>3.4.2</disruptor.version>
+    <declared.hdds.version>${hdds.version}</declared.hdds.version>
   </properties>
 
   <dependencies>
@@ -102,6 +104,22 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </dependencies>
 
   <build>
+    <resources>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <excludes>
+          <exclude>hdds-version-info.properties</exclude>
+        </excludes>
+        <filtering>false</filtering>
+      </resource>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <includes>
+          <include>hdds-version-info.properties</include>
+        </includes>
+        <filtering>true</filtering>
+      </resource>
+    </resources>
     <extensions>
       <extension>
         <groupId>kr.motd.maven</groupId>
@@ -170,6 +188,22 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
           <execution>
+            <id>version-info</id>
+            <phase>generate-resources</phase>
+            <goals>
+              <goal>version-info</goal>
+            </goals>
+            <configuration>
+              <source>
+                <directory>${basedir}/../</directory>
+                <includes>
+                  <include>*/src/main/java/**/*.java</include>
+                  <include>*/src/main/proto/*.proto</include>
+                </includes>
+              </source>
+            </configuration>
+          </execution>
+          <execution>
             <id>compile-protoc</id>
             <goals>
               <goal>protoc</goal>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
new file mode 100644
index 0000000..59b9de6
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.ClassUtil;
+import org.apache.hadoop.util.ThreadUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+/**
+ * This class returns build information about Hadoop components.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class HddsVersionInfo {
+  private static final Logger LOG = LoggerFactory.getLogger(HddsVersionInfo.class);
+
+  private Properties info;
+
+  protected HddsVersionInfo(String component) {
+    info = new Properties();
+    String versionInfoFile = component + "-version-info.properties";
+    InputStream is = null;
+    try {
+      is = ThreadUtil.getResourceAsStream(HddsVersionInfo.class.getClassLoader(),
+          versionInfoFile);
+      info.load(is);
+    } catch (IOException ex) {
+      LoggerFactory.getLogger(getClass()).warn("Could not read '" +
+          versionInfoFile + "', " + ex.toString(), ex);
+    } finally {
+      IOUtils.closeStream(is);
+    }
+  }
+
+  protected String _getVersion() {
+    return info.getProperty("version", "Unknown");
+  }
+
+  protected String _getRevision() {
+    return info.getProperty("revision", "Unknown");
+  }
+
+  protected String _getBranch() {
+    return info.getProperty("branch", "Unknown");
+  }
+
+  protected String _getDate() {
+    return info.getProperty("date", "Unknown");
+  }
+
+  protected String _getUser() {
+    return info.getProperty("user", "Unknown");
+  }
+
+  protected String _getUrl() {
+    return info.getProperty("url", "Unknown");
+  }
+
+  protected String _getSrcChecksum() {
+    return info.getProperty("srcChecksum", "Unknown");
+  }
+
+  protected String _getBuildVersion(){
+    return _getVersion() +
+      " from " + _getRevision() +
+      " by " + _getUser() +
+      " source checksum " + _getSrcChecksum();
+  }
+
+  protected String _getProtocVersion() {
+    return info.getProperty("protocVersion", "Unknown");
+  }
+
+  private static HddsVersionInfo HDDS_VERSION_INFO = new HddsVersionInfo("hdds");
+  /**
+   * Get the HDDS version.
+   * @return the Hdds version string, eg. "0.6.3-dev"
+   */
+  public static String getVersion() {
+    return HDDS_VERSION_INFO._getVersion();
+  }
+
+  /**
+   * Get the Git commit hash of the repository when compiled.
+   * @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a"
+   */
+  public static String getRevision() {
+    return HDDS_VERSION_INFO._getRevision();
+  }
+
+  /**
+   * Get the branch on which this originated.
+   * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
+   */
+  public static String getBranch() {
+    return HDDS_VERSION_INFO._getBranch();
+  }
+
+  /**
+   * The date that HDDS was compiled.
+   * @return the compilation date in unix date format
+   */
+  public static String getDate() {
+    return HDDS_VERSION_INFO._getDate();
+  }
+
+  /**
+   * The user that compiled HDDS.
+   * @return the username of the user
+   */
+  public static String getUser() {
+    return HDDS_VERSION_INFO._getUser();
+  }
+
+  /**
+   * Get the URL for the HDDS repository.
+   * @return the URL of the Hdds repository
+   */
+  public static String getUrl() {
+    return HDDS_VERSION_INFO._getUrl();
+  }
+
+  /**
+   * Get the checksum of the source files from which HDDS was built.
+   * @return the checksum of the source files
+   */
+  public static String getSrcChecksum() {
+    return HDDS_VERSION_INFO._getSrcChecksum();
+  }
+
+  /**
+   * Returns the buildVersion which includes version,
+   * revision, user and date.
+   * @return the buildVersion
+   */
+  public static String getBuildVersion(){
+    return HDDS_VERSION_INFO._getBuildVersion();
+  }
+
+  /**
+   * Returns the protoc version used for the build.
+   * @return the protoc version
+   */
+  public static String getProtocVersion(){
+    return HDDS_VERSION_INFO._getProtocVersion();
+  }
+
+  public static void main(String[] args) {
+    System.out.println("Using HDDS " + getVersion());
+    System.out.println("Source code repository " + getUrl() + " -r " +
+        getRevision());
+    System.out.println("Compiled by " + getUser() + " on " + getDate());
+    System.out.println("Compiled with protoc " + getProtocVersion());
+    System.out.println("From source with checksum " + getSrcChecksum());
+    LOG.debug("This command was run using " +
+        ClassUtil.findContainingJar(HddsVersionInfo.class));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
new file mode 100644
index 0000000..2cbd817
--- /dev/null
+++ b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
@@ -0,0 +1,26 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version=${declared.hdds.version}
+revision=${version-info.scm.commit}
+branch=${version-info.scm.branch}
+user=${user.name}
+date=${version-info.build.time}
+url=${version-info.scm.uri}
+srcChecksum=${version-info.source.md5}
+protocVersion=${protobuf.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-ozone/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index 83d023e..ea5eb46 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -29,8 +29,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <packaging>jar</packaging>
 
   <properties>
+    <ozone.version>0.2.1-SNAPSHOT</ozone.version>
+    <ozone.release>Acadia</ozone.release>
     <hadoop.component>ozone</hadoop.component>
     <is.hadoop.component>true</is.hadoop.component>
+    <declared.ozone.version>${ozone.version}</declared.ozone.version>
   </properties>
 
   <dependencies>
@@ -38,12 +41,44 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </dependencies>
 
   <build>
+    <resources>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <excludes>
+          <exclude>ozone-version-info.properties</exclude>
+        </excludes>
+        <filtering>false</filtering>
+      </resource>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <includes>
+          <include>ozone-version-info.properties</include>
+        </includes>
+        <filtering>true</filtering>
+      </resource>
+    </resources>
     <plugins>
       <plugin>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
           <execution>
+            <id>version-info</id>
+            <phase>generate-resources</phase>
+            <goals>
+              <goal>version-info</goal>
+            </goals>
+            <configuration>
+              <source>
+                <directory>${basedir}/../</directory>
+                <includes>
+                  <include>*/src/main/java/**/*.java</include>
+                  <include>*/src/main/proto/*.proto</include>
+                </includes>
+              </source>
+            </configuration>
+          </execution>
+          <execution>
             <id>compile-protoc</id>
             <goals>
               <goal>protoc</goal>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 5d1b6bc..75ceeb7 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -117,7 +117,7 @@ function ozonecmd_case
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
     ;;
     version)
-      HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.util.OzoneVersionInfo
     ;;
     genconf)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
new file mode 100644
index 0000000..d476748
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.ClassUtil;
+import org.apache.hadoop.util.ThreadUtil;
+import org.apache.hadoop.utils.HddsVersionInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+/**
+ * This class returns build information about Hadoop components.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class OzoneVersionInfo {
+  private static final Logger LOG = LoggerFactory.getLogger(OzoneVersionInfo.class);
+
+  private Properties info;
+
+  protected OzoneVersionInfo(String component) {
+    info = new Properties();
+    String versionInfoFile = component + "-version-info.properties";
+    InputStream is = null;
+    try {
+      is = ThreadUtil.getResourceAsStream(OzoneVersionInfo.class.getClassLoader(),
+          versionInfoFile);
+      info.load(is);
+    } catch (IOException ex) {
+      LoggerFactory.getLogger(getClass()).warn("Could not read '" +
+          versionInfoFile + "', " + ex.toString(), ex);
+    } finally {
+      IOUtils.closeStream(is);
+    }
+  }
+
+  protected String _getVersion() {
+    return info.getProperty("version", "Unknown");
+  }
+
+  protected String _getRelease() {
+    return info.getProperty("release", "Unknown");
+  }
+
+  protected String _getRevision() {
+    return info.getProperty("revision", "Unknown");
+  }
+
+  protected String _getBranch() {
+    return info.getProperty("branch", "Unknown");
+  }
+
+  protected String _getDate() {
+    return info.getProperty("date", "Unknown");
+  }
+
+  protected String _getUser() {
+    return info.getProperty("user", "Unknown");
+  }
+
+  protected String _getUrl() {
+    return info.getProperty("url", "Unknown");
+  }
+
+  protected String _getSrcChecksum() {
+    return info.getProperty("srcChecksum", "Unknown");
+  }
+
+  protected String _getBuildVersion(){
+    return _getVersion() +
+      " from " + _getRevision() +
+      " by " + _getUser() +
+      " source checksum " + _getSrcChecksum();
+  }
+
+  protected String _getProtocVersion() {
+    return info.getProperty("protocVersion", "Unknown");
+  }
+
+  private static OzoneVersionInfo OZONE_VERSION_INFO = new OzoneVersionInfo("ozone");
+  /**
+   * Get the Ozone version.
+   * @return the Ozone version string, eg. "0.6.3-dev"
+   */
+  public static String getVersion() {
+    return OZONE_VERSION_INFO._getVersion();
+  }
+
+  /**
+   * Get the Ozone release name.
+   * @return the Ozone release string, eg. "Acadia"
+   */
+  public static String getRelease() {
+    return OZONE_VERSION_INFO._getRelease();
+  }
+
+  /**
+   * Get the Git commit hash of the repository when compiled.
+   * @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a"
+   */
+  public static String getRevision() {
+    return OZONE_VERSION_INFO._getRevision();
+  }
+
+  /**
+   * Get the branch on which this originated.
+   * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
+   */
+  public static String getBranch() {
+    return OZONE_VERSION_INFO._getBranch();
+  }
+
+  /**
+   * The date that Ozone was compiled.
+   * @return the compilation date in unix date format
+   */
+  public static String getDate() {
+    return OZONE_VERSION_INFO._getDate();
+  }
+
+  /**
+   * The user that compiled Ozone.
+   * @return the username of the user
+   */
+  public static String getUser() {
+    return OZONE_VERSION_INFO._getUser();
+  }
+
+  /**
+   * Get the URL for the Ozone repository.
+   * @return the URL of the Ozone repository
+   */
+  public static String getUrl() {
+    return OZONE_VERSION_INFO._getUrl();
+  }
+
+  /**
+   * Get the checksum of the source files from which Ozone was built.
+   * @return the checksum of the source files
+   */
+  public static String getSrcChecksum() {
+    return OZONE_VERSION_INFO._getSrcChecksum();
+  }
+
+  /**
+   * Returns the buildVersion which includes version,
+   * revision, user and date.
+   * @return the buildVersion
+   */
+  public static String getBuildVersion(){
+    return OZONE_VERSION_INFO._getBuildVersion();
+  }
+
+  /**
+   * Returns the protoc version used for the build.
+   * @return the protoc version
+   */
+  public static String getProtocVersion(){
+    return OZONE_VERSION_INFO._getProtocVersion();
+  }
+
+  public static void main(String[] args) {
+    System.out.println(
+        "                  //////////////                 \n" +
+        "               ////////////////////              \n" +
+        "            ////////     ////////////////        \n" +
+        "           //////      ////////////////          \n" +
+        "          /////      ////////////////  /         \n" +
+        "         /////            ////////   ///         \n" +
+        "         ////           ////////    /////        \n" +
+        "        /////         ////////////////           \n" +
+        "        /////       ////////////////   //        \n" +
+        "         ////     ///////////////   /////        \n" +
+        "         /////  ///////////////     ////         \n" +
+        "          /////       //////      /////          \n" +
+        "           //////   //////       /////           \n" +
+        "             ///////////     ////////            \n" +
+        "               //////  ////////////              \n" +
+        "               ///   //////////                  \n" +
+        "              /    "+ getVersion() + "("+ getRelease() +")\n");
+    System.out.println("Source code repository " + getUrl() + " -r " +
+        getRevision());
+    System.out.println("Compiled by " + getUser() + " on " + getDate());
+    System.out.println("Compiled with protoc " + getProtocVersion());
+    System.out.println("From source with checksum " + getSrcChecksum() + "\n");
+    LOG.debug("This command was run using " +
+        ClassUtil.findContainingJar(OzoneVersionInfo.class));
+    HddsVersionInfo.main(args);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-ozone/common/src/main/resources/ozone-version-info.properties
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties
new file mode 100644
index 0000000..599f14d
--- /dev/null
+++ b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version=${declared.ozone.version}
+release=${ozone.release}
+revision=${version-info.scm.commit}
+branch=${version-info.scm.branch}
+user=${user.name}
+date=${version-info.build.time}
+url=${version-info.scm.uri}
+srcChecksum=${version-info.source.md5}
+protocVersion=${protobuf.version}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/25] hadoop git commit: HDDS-245. Handle ContainerReports in the SCM. Contributed by Elek Marton.

Posted by su...@apache.org.
HDDS-245. Handle ContainerReports in the SCM. Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5dbbfe2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5dbbfe2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5dbbfe2

Branch: refs/heads/HDFS-12943
Commit: f5dbbfe2e97a8c11e3df0f95ae4a493f11fdbc28
Parents: b2517dd
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu Aug 9 16:55:13 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Aug 9 16:55:39 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/server/events/EventQueue.java   |   7 +-
 .../scm/container/ContainerReportHandler.java   | 107 +++++-
 .../replication/ReplicationActivityStatus.java  |  86 +++++
 .../ReplicationActivityStatusMXBean.java        |  28 ++
 .../replication/ReplicationRequest.java         |  28 +-
 .../hadoop/hdds/scm/events/SCMEvents.java       |   9 +
 .../hdds/scm/node/states/Node2ContainerMap.java |  10 +-
 .../hdds/scm/node/states/ReportResult.java      |  18 +-
 .../scm/server/StorageContainerManager.java     |  27 +-
 .../container/TestContainerReportHandler.java   | 228 +++++++++++++
 .../scm/node/states/Node2ContainerMapTest.java  | 308 -----------------
 .../scm/node/states/TestNode2ContainerMap.java  | 328 +++++++++++++++++++
 12 files changed, 859 insertions(+), 325 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index f93c54b..b2b0df2 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -147,7 +147,12 @@ public class EventQueue implements EventPublisher, AutoCloseable {
 
         for (EventHandler handler : executorAndHandlers.getValue()) {
           queuedCount.incrementAndGet();
-
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Delivering event {} to executor/handler {}: {}",
+                event.getName(),
+                executorAndHandlers.getKey().getName(),
+                payload);
+          }
           executorAndHandlers.getKey()
               .onMessage(handler, payload, this);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 486162e..b26eed2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -18,30 +18,131 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
+import java.io.IOException;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.replication
+    .ReplicationActivityStatus;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
+import org.apache.hadoop.hdds.scm.node.states.ReportResult;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .ContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Handles container reports from datanode.
  */
 public class ContainerReportHandler implements
     EventHandler<ContainerReportFromDatanode> {
 
-  private final Mapping containerMapping;
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ContainerReportHandler.class);
+
   private final Node2ContainerMap node2ContainerMap;
 
+  private final Mapping containerMapping;
+
+  private ContainerStateManager containerStateManager;
+
+  private ReplicationActivityStatus replicationStatus;
+
+
   public ContainerReportHandler(Mapping containerMapping,
-                                Node2ContainerMap node2ContainerMap) {
+      Node2ContainerMap node2ContainerMap,
+      ReplicationActivityStatus replicationActivityStatus) {
+    Preconditions.checkNotNull(containerMapping);
+    Preconditions.checkNotNull(node2ContainerMap);
+    Preconditions.checkNotNull(replicationActivityStatus);
     this.containerMapping = containerMapping;
     this.node2ContainerMap = node2ContainerMap;
+    this.containerStateManager = containerMapping.getStateManager();
+    this.replicationStatus = replicationActivityStatus;
   }
 
   @Override
   public void onMessage(ContainerReportFromDatanode containerReportFromDatanode,
                         EventPublisher publisher) {
-    // TODO: process container report.
+
+    DatanodeDetails datanodeOrigin =
+        containerReportFromDatanode.getDatanodeDetails();
+
+    ContainerReportsProto containerReport =
+        containerReportFromDatanode.getReport();
+    try {
+
+      //update state in container db and trigger close container events
+      containerMapping.processContainerReports(datanodeOrigin, containerReport);
+
+      Set<ContainerID> containerIds = containerReport.getReportsList().stream()
+          .map(containerProto -> containerProto.getContainerID())
+          .map(ContainerID::new)
+          .collect(Collectors.toSet());
+
+      ReportResult reportResult = node2ContainerMap
+          .processReport(datanodeOrigin.getUuid(), containerIds);
+
+      //we have the report, so we can update the states for the next iteration.
+      node2ContainerMap
+          .setContainersForDatanode(datanodeOrigin.getUuid(), containerIds);
+
+      for (ContainerID containerID : reportResult.getMissingContainers()) {
+        containerStateManager
+            .removeContainerReplica(containerID, datanodeOrigin);
+        emitReplicationRequestEvent(containerID, publisher);
+      }
+
+      for (ContainerID containerID : reportResult.getNewContainers()) {
+        containerStateManager.addContainerReplica(containerID, datanodeOrigin);
+
+        emitReplicationRequestEvent(containerID, publisher);
+      }
+
+    } catch (IOException e) {
+      //TODO: stop all the replication?
+      LOG.error("Error on processing container report from datanode {}",
+          datanodeOrigin, e);
+    }
+
+  }
+
+  private void emitReplicationRequestEvent(ContainerID containerID,
+      EventPublisher publisher) throws SCMException {
+    ContainerInfo container = containerStateManager.getContainer(containerID);
+
+    if (container == null) {
+      //warning unknown container
+      LOG.warn(
+          "Container is missing from containerStateManager. Can't request "
+              + "replication. {}",
+          containerID);
+    }
+    if (replicationStatus.isReplicationEnabled()) {
+
+      int existingReplicas =
+          containerStateManager.getContainerReplicas(containerID).size();
+
+      int expectedReplicas = container.getReplicationFactor().getNumber();
+
+      if (existingReplicas != expectedReplicas) {
+
+        publisher.fireEvent(SCMEvents.REPLICATE_CONTAINER,
+            new ReplicationRequest(containerID.getId(), existingReplicas,
+                container.getReplicationFactor().getNumber()));
+      }
+    }
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
new file mode 100644
index 0000000..4a9888c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import javax.management.ObjectName;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.metrics2.util.MBeans;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Event listener to track the current state of replication.
+ */
+public class ReplicationActivityStatus
+    implements EventHandler<Boolean>, ReplicationActivityStatusMXBean,
+    Closeable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReplicationActivityStatus.class);
+
+  private AtomicBoolean replicationEnabled = new AtomicBoolean();
+
+  private ObjectName jmxObjectName;
+
+  public boolean isReplicationEnabled() {
+    return replicationEnabled.get();
+  }
+
+  @VisibleForTesting
+  public void setReplicationEnabled(boolean enabled) {
+    replicationEnabled.set(enabled);
+  }
+
+  @VisibleForTesting
+  public void enableReplication() {
+    replicationEnabled.set(true);
+  }
+
+  /**
+   * The replication status could be set by async events.
+   */
+  @Override
+  public void onMessage(Boolean enabled, EventPublisher publisher) {
+    replicationEnabled.set(enabled);
+  }
+
+  public void start() {
+    try {
+      this.jmxObjectName =
+          MBeans.register(
+              "StorageContainerManager", "ReplicationActivityStatus", this);
+    } catch (Exception ex) {
+      LOG.error("JMX bean for ReplicationActivityStatus can't be registered",
+          ex);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (this.jmxObjectName != null) {
+      MBeans.unregister(jmxObjectName);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java
new file mode 100644
index 0000000..164bd24
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+/**
+ * JMX interface to monitor replication status.
+ */
+public interface ReplicationActivityStatusMXBean {
+
+  boolean isReplicationEnabled();
+
+  void setReplicationEnabled(boolean enabled);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java
index ef7c546..d40cd9c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationRequest.java
@@ -29,18 +29,24 @@ import org.apache.commons.lang3.builder.HashCodeBuilder;
 public class ReplicationRequest implements Comparable<ReplicationRequest>,
     Serializable {
   private final long containerId;
-  private final short replicationCount;
-  private final short expecReplicationCount;
+  private final int replicationCount;
+  private final int expecReplicationCount;
   private final long timestamp;
 
-  public ReplicationRequest(long containerId, short replicationCount,
-      long timestamp, short expecReplicationCount) {
+  public ReplicationRequest(long containerId, int replicationCount,
+      long timestamp, int expecReplicationCount) {
     this.containerId = containerId;
     this.replicationCount = replicationCount;
     this.timestamp = timestamp;
     this.expecReplicationCount = expecReplicationCount;
   }
 
+  public ReplicationRequest(long containerId, int replicationCount,
+      int expecReplicationCount) {
+    this(containerId, replicationCount, System.currentTimeMillis(),
+        expecReplicationCount);
+  }
+
   /**
    * Compares this object with the specified object for order.  Returns a
    * negative integer, zero, or a positive integer as this object is less
@@ -93,7 +99,7 @@ public class ReplicationRequest implements Comparable<ReplicationRequest>,
     return containerId;
   }
 
-  public short getReplicationCount() {
+  public int getReplicationCount() {
     return replicationCount;
   }
 
@@ -101,7 +107,17 @@ public class ReplicationRequest implements Comparable<ReplicationRequest>,
     return timestamp;
   }
 
-  public short getExpecReplicationCount() {
+  public int getExpecReplicationCount() {
     return expecReplicationCount;
   }
+
+  @Override
+  public String toString() {
+    return "ReplicationRequest{" +
+        "containerId=" + containerId +
+        ", replicationCount=" + replicationCount +
+        ", expecReplicationCount=" + expecReplicationCount +
+        ", timestamp=" + timestamp +
+        '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
index d49dd4f..70b1e96 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -174,6 +174,15 @@ public final class SCMEvents {
       new TypedEvent<>(ReplicationCompleted.class);
 
   /**
+   * Signal for all the components (but especially for the replication
+   * manager and container report handler) that the replication could be
+   * started. Should be send only if (almost) all the container state are
+   * available from the datanodes.
+   */
+  public static final TypedEvent<Boolean> START_REPLICATION =
+      new TypedEvent<>(Boolean.class);
+
+  /**
    * Private Ctor. Never Constructed.
    */
   private SCMEvents() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
index 1960604..8ed6d59 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
@@ -68,7 +69,8 @@ public class Node2ContainerMap {
       throws SCMException {
     Preconditions.checkNotNull(containerIDs);
     Preconditions.checkNotNull(datanodeID);
-    if(dn2ContainerMap.putIfAbsent(datanodeID, containerIDs) != null) {
+    if (dn2ContainerMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs))
+        != null) {
       throw new SCMException("Node already exists in the map",
                   DUPLICATE_DATANODE);
     }
@@ -82,11 +84,13 @@ public class Node2ContainerMap {
    * @throws SCMException - if we don't know about this datanode, for new DN
    *                      use insertNewDatanode.
    */
-  public void updateDatanodeMap(UUID datanodeID, Set<ContainerID> containers)
+  public void setContainersForDatanode(UUID datanodeID, Set<ContainerID> containers)
       throws SCMException {
     Preconditions.checkNotNull(datanodeID);
     Preconditions.checkNotNull(containers);
-    if(dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> v) == null){
+    if (dn2ContainerMap
+        .computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers))
+        == null) {
       throw new SCMException("No such datanode", NO_SUCH_DATANODE);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
index cb06cb3..2697629 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
@@ -21,10 +21,13 @@ package org.apache.hadoop.hdds.scm.node.states;
 
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 
+import java.util.Collections;
 import java.util.Set;
 
+import com.google.common.base.Preconditions;
+
 /**
- * A Container Report gets processsed by the Node2Container and returns the
+ * A Container Report gets processsed by the Node2Container and returns
  * Report Result class.
  */
 public class ReportResult {
@@ -36,6 +39,8 @@ public class ReportResult {
       Set<ContainerID> missingContainers,
       Set<ContainerID> newContainers) {
     this.status = status;
+    Preconditions.checkNotNull(missingContainers);
+    Preconditions.checkNotNull(newContainers);
     this.missingContainers = missingContainers;
     this.newContainers = newContainers;
   }
@@ -80,7 +85,16 @@ public class ReportResult {
     }
 
     ReportResult build() {
-      return new ReportResult(status, missingContainers, newContainers);
+
+      Set<ContainerID> nullSafeMissingContainers = this.missingContainers;
+      Set<ContainerID> nullSafeNewContainers = this.newContainers;
+      if (nullSafeNewContainers == null) {
+        nullSafeNewContainers = Collections.emptySet();
+      }
+      if (nullSafeMissingContainers == null) {
+        nullSafeMissingContainers = Collections.emptySet();
+      }
+      return new ReportResult(status, nullSafeMissingContainers, nullSafeNewContainers);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 9cb1318..47a9100 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.replication
+    .ReplicationActivityStatus;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
@@ -164,9 +166,13 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    * Key = DatanodeUuid, value = ContainerStat.
    */
   private Cache<String, ContainerStat> containerReportCache;
+
   private final ReplicationManager replicationManager;
+
   private final LeaseManager<Long> commandWatcherLeaseManager;
 
+  private final ReplicationActivityStatus replicationStatus;
+
   /**
    * Creates a new StorageContainerManager. Configuration will be updated
    * with information on the
@@ -199,19 +205,26 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
 
     Node2ContainerMap node2ContainerMap = new Node2ContainerMap();
 
+    replicationStatus = new ReplicationActivityStatus();
+
     CloseContainerEventHandler closeContainerHandler =
         new CloseContainerEventHandler(scmContainerManager);
     NodeReportHandler nodeReportHandler =
         new NodeReportHandler(scmNodeManager);
-    ContainerReportHandler containerReportHandler =
-        new ContainerReportHandler(scmContainerManager, node2ContainerMap);
+
     CommandStatusReportHandler cmdStatusReportHandler =
         new CommandStatusReportHandler();
+
     NewNodeHandler newNodeHandler = new NewNodeHandler(node2ContainerMap);
     StaleNodeHandler staleNodeHandler = new StaleNodeHandler(node2ContainerMap);
     DeadNodeHandler deadNodeHandler = new DeadNodeHandler(node2ContainerMap);
     ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
 
+    ContainerReportHandler containerReportHandler =
+        new ContainerReportHandler(scmContainerManager, node2ContainerMap,
+            replicationStatus);
+
+
     eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
     eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
     eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler);
@@ -221,6 +234,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);
     eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
     eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
+    eventQueue.addHandler(SCMEvents.START_REPLICATION, replicationStatus);
 
     long watcherTimeout =
         conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
@@ -580,6 +594,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
     getDatanodeProtocolServer().start();
 
+    replicationStatus.start();
     httpServer.start();
     scmBlockManager.start();
     replicationManager.start();
@@ -592,6 +607,14 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   public void stop() {
 
     try {
+      LOG.info("Stopping Replication Activity Status tracker.");
+      replicationStatus.close();
+    } catch (Exception ex) {
+      LOG.error("Replication Activity Status tracker stop failed.", ex);
+    }
+
+
+    try {
       LOG.info("Stopping Replication Manager Service.");
       replicationManager.stop();
     } catch (Exception ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
new file mode 100644
index 0000000..363db99
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -0,0 +1,228 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo
+    .Builder;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.replication
+    .ReplicationActivityStatus;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
+import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
+import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+    .ContainerReportFromDatanode;
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import static org.mockito.Matchers.anyLong;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.when;
+import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test the behaviour of the ContainerReportHandler.
+ */
+public class TestContainerReportHandler implements EventPublisher {
+
+  private List<Object> publishedEvents = new ArrayList<>();
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestContainerReportHandler.class);
+
+  @Before
+  public void resetEventCollector() {
+    publishedEvents.clear();
+  }
+
+  @Test
+  public void test() throws IOException {
+
+    //given
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    Node2ContainerMap node2ContainerMap = new Node2ContainerMap();
+    Mapping mapping = Mockito.mock(Mapping.class);
+
+    when(mapping.getContainer(anyLong()))
+        .thenAnswer(
+            (Answer<ContainerInfo>) invocation ->
+                new Builder()
+                    .setReplicationFactor(ReplicationFactor.THREE)
+                    .setContainerID((Long) invocation.getArguments()[0])
+                    .build()
+        );
+
+    ContainerStateManager containerStateManager =
+        new ContainerStateManager(conf, mapping);
+
+    when(mapping.getStateManager()).thenReturn(containerStateManager);
+
+    ReplicationActivityStatus replicationActivityStatus =
+        new ReplicationActivityStatus();
+
+    ContainerReportHandler reportHandler =
+        new ContainerReportHandler(mapping, node2ContainerMap,
+            replicationActivityStatus);
+
+    DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
+    DatanodeDetails dn2 = TestUtils.randomDatanodeDetails();
+    DatanodeDetails dn3 = TestUtils.randomDatanodeDetails();
+    DatanodeDetails dn4 = TestUtils.randomDatanodeDetails();
+    node2ContainerMap.insertNewDatanode(dn1.getUuid(), new HashSet<>());
+    node2ContainerMap.insertNewDatanode(dn2.getUuid(), new HashSet<>());
+    node2ContainerMap.insertNewDatanode(dn3.getUuid(), new HashSet<>());
+    node2ContainerMap.insertNewDatanode(dn4.getUuid(), new HashSet<>());
+    PipelineSelector pipelineSelector = Mockito.mock(PipelineSelector.class);
+
+    Pipeline pipeline = new Pipeline("leader", LifeCycleState.CLOSED,
+        ReplicationType.STAND_ALONE, ReplicationFactor.THREE, "pipeline1");
+
+    when(pipelineSelector.getReplicationPipeline(ReplicationType.STAND_ALONE,
+        ReplicationFactor.THREE)).thenReturn(pipeline);
+
+    long c1 = containerStateManager
+        .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
+            ReplicationFactor.THREE, "root").getContainerInfo()
+        .getContainerID();
+
+    long c2 = containerStateManager
+        .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
+            ReplicationFactor.THREE, "root").getContainerInfo()
+        .getContainerID();
+
+    //when
+
+    //initial reports before replication is enabled. 2 containers w 3 replicas.
+    reportHandler.onMessage(
+        new ContainerReportFromDatanode(dn1,
+            createContainerReport(new long[] {c1, c2})), this);
+
+    reportHandler.onMessage(
+        new ContainerReportFromDatanode(dn2,
+            createContainerReport(new long[] {c1, c2})), this);
+
+    reportHandler.onMessage(
+        new ContainerReportFromDatanode(dn3,
+            createContainerReport(new long[] {c1, c2})), this);
+
+    reportHandler.onMessage(
+        new ContainerReportFromDatanode(dn4,
+            createContainerReport(new long[] {})), this);
+
+    Assert.assertEquals(0, publishedEvents.size());
+
+    replicationActivityStatus.enableReplication();
+
+    //no problem here
+    reportHandler.onMessage(
+        new ContainerReportFromDatanode(dn1,
+            createContainerReport(new long[] {c1, c2})), this);
+
+    Assert.assertEquals(0, publishedEvents.size());
+
+    //container is missing from d2
+    reportHandler.onMessage(
+        new ContainerReportFromDatanode(dn2,
+            createContainerReport(new long[] {c1})), this);
+
+    Assert.assertEquals(1, publishedEvents.size());
+    ReplicationRequest replicationRequest =
+        (ReplicationRequest) publishedEvents.get(0);
+
+    Assert.assertEquals(c2, replicationRequest.getContainerId());
+    Assert.assertEquals(3, replicationRequest.getExpecReplicationCount());
+    Assert.assertEquals(2, replicationRequest.getReplicationCount());
+
+    //container was replicated to dn4
+    reportHandler.onMessage(
+        new ContainerReportFromDatanode(dn4,
+            createContainerReport(new long[] {c2})), this);
+
+    //no more event, everything is perfect
+    Assert.assertEquals(1, publishedEvents.size());
+
+    //c2 was found at dn2 (it was missing before, magic)
+    reportHandler.onMessage(
+        new ContainerReportFromDatanode(dn2,
+            createContainerReport(new long[] {c1, c2})), this);
+
+    //c2 is over replicated (dn1,dn2,dn3,dn4)
+    Assert.assertEquals(2, publishedEvents.size());
+
+    replicationRequest =
+        (ReplicationRequest) publishedEvents.get(1);
+
+    Assert.assertEquals(c2, replicationRequest.getContainerId());
+    Assert.assertEquals(3, replicationRequest.getExpecReplicationCount());
+    Assert.assertEquals(4, replicationRequest.getReplicationCount());
+
+  }
+
+  private ContainerReportsProto createContainerReport(long[] containerIds) {
+
+    ContainerReportsProto.Builder crBuilder =
+        ContainerReportsProto.newBuilder();
+
+    for (long containerId : containerIds) {
+      org.apache.hadoop.hdds.protocol.proto
+          .StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder
+          ciBuilder = org.apache.hadoop.hdds.protocol.proto
+          .StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
+      ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
+          .setSize(5368709120L)
+          .setUsed(2000000000L)
+          .setKeyCount(100000000L)
+          .setReadCount(100000000L)
+          .setWriteCount(100000000L)
+          .setReadBytes(2000000000L)
+          .setWriteBytes(2000000000L)
+          .setContainerID(containerId)
+          .setDeleteTransactionId(0);
+
+      crBuilder.addReports(ciBuilder.build());
+    }
+
+    return crBuilder.build();
+  }
+
+  @Override
+  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
+      EVENT_TYPE event, PAYLOAD payload) {
+    LOG.info("Event is published: {}", payload);
+    publishedEvents.add(payload);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
deleted file mode 100644
index 79f1b40..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.node.states;
-
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * Test classes for Node2ContainerMap.
- */
-public class Node2ContainerMapTest {
-  private final static int DATANODE_COUNT = 300;
-  private final static int CONTAINER_COUNT = 1000;
-  private final Map<UUID, TreeSet<ContainerID>> testData = new
-      ConcurrentHashMap<>();
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private void generateData() {
-    for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
-      TreeSet<ContainerID> currentSet = new TreeSet<>();
-      for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
-        long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex;
-        currentSet.add(new ContainerID(currentCnIndex));
-      }
-      testData.put(UUID.randomUUID(), currentSet);
-    }
-  }
-
-  private UUID getFirstKey() {
-    return testData.keySet().iterator().next();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    generateData();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-  }
-
-  @Test
-  public void testIsKnownDatanode() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID knownNode = getFirstKey();
-    UUID unknownNode = UUID.randomUUID();
-    Set<ContainerID> containerIDs = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, containerIDs);
-    Assert.assertTrue("Not able to detect a known node",
-        map.isKnownDatanode(knownNode));
-    Assert.assertFalse("Unknown node detected",
-        map.isKnownDatanode(unknownNode));
-  }
-
-  @Test
-  public void testInsertNewDatanode() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID knownNode = getFirstKey();
-    Set<ContainerID> containerIDs = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, containerIDs);
-    Set<ContainerID> readSet = map.getContainers(knownNode);
-
-    // Assert that all elements are present in the set that we read back from
-    // node map.
-    Set newSet = new TreeSet((readSet));
-    Assert.assertTrue(newSet.removeAll(containerIDs));
-    Assert.assertTrue(newSet.size() == 0);
-
-    thrown.expect(SCMException.class);
-    thrown.expectMessage("already exists");
-    map.insertNewDatanode(knownNode, containerIDs);
-
-    map.removeDatanode(knownNode);
-    map.insertNewDatanode(knownNode, containerIDs);
-
-  }
-
-  @Test
-  public void testProcessReportCheckOneNode() throws SCMException {
-    UUID key = getFirstKey();
-    Set<ContainerID> values = testData.get(key);
-    Node2ContainerMap map = new Node2ContainerMap();
-    map.insertNewDatanode(key, values);
-    Assert.assertTrue(map.isKnownDatanode(key));
-    ReportResult result = map.processReport(key, values);
-    Assert.assertEquals(result.getStatus(),
-        Node2ContainerMap.ReportStatus.ALL_IS_WELL);
-  }
-
-  @Test
-  public void testProcessReportInsertAll() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-
-    for (Map.Entry<UUID, TreeSet<ContainerID>> keyEntry : testData.entrySet()) {
-      map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
-    }
-    // Assert all Keys are known datanodes.
-    for (UUID key : testData.keySet()) {
-      Assert.assertTrue(map.isKnownDatanode(key));
-    }
-  }
-
-  /*
-  For ProcessReport we have to test the following scenarios.
-
-  1. New Datanode - A new datanode appears and we have to add that to the
-  SCM's Node2Container Map.
-
-  2.  New Container - A Datanode exists, but a new container is added to that
-   DN. We need to detect that and return a list of added containers.
-
-  3. Missing Container - A Datanode exists, but one of the expected container
-   on that datanode is missing. We need to detect that.
-
-   4. We get a container report that has both the missing and new containers.
-    We need to return separate lists for these.
-   */
-
-  /**
-   * Assert that we are able to detect the addition of a new datanode.
-   *
-   * @throws SCMException
-   */
-  @Test
-  public void testProcessReportDetectNewDataNode() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    // If we attempt to process a node that is not present in the map,
-    // we get a result back that says, NEW_NODE_FOUND.
-    UUID key = getFirstKey();
-    TreeSet<ContainerID> values = testData.get(key);
-    ReportResult result = map.processReport(key, values);
-    Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_DATANODE_FOUND,
-        result.getStatus());
-    Assert.assertEquals(result.getNewContainers().size(), values.size());
-  }
-
-  /**
-   * This test asserts that processReport is able to detect new containers
-   * when it is added to a datanode. For that we populate the DN with a list
-   * of containerIDs and then add few more containers and make sure that we
-   * are able to detect them.
-   *
-   * @throws SCMException
-   */
-  @Test
-  public void testProcessReportDetectNewContainers() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID key = getFirstKey();
-    TreeSet<ContainerID> values = testData.get(key);
-    map.insertNewDatanode(key, values);
-
-    final int newCount = 100;
-    // This is not a mistake, the treeset seems to be reverse sorted.
-    ContainerID last = values.pollFirst();
-    TreeSet<ContainerID> addedContainers = new TreeSet<>();
-    for (int x = 1; x <= newCount; x++) {
-      long cTemp = last.getId() + x;
-      addedContainers.add(new ContainerID(cTemp));
-    }
-
-    // This set is the super set of existing containers and new containers.
-    TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
-    newContainersSet.addAll(addedContainers);
-
-    ReportResult result = map.processReport(key, newContainersSet);
-
-    //Assert that expected size of missing container is same as addedContainers
-    Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_CONTAINERS_FOUND,
-        result.getStatus());
-
-    Assert.assertEquals(addedContainers.size(),
-        result.getNewContainers().size());
-
-    // Assert that the Container IDs are the same as we added new.
-    Assert.assertTrue("All objects are not removed.",
-        result.getNewContainers().removeAll(addedContainers));
-  }
-
-  /**
-   * This test asserts that processReport is able to detect missing containers
-   * if they are misssing from a list.
-   *
-   * @throws SCMException
-   */
-  @Test
-  public void testProcessReportDetectMissingContainers() throws SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID key = getFirstKey();
-    TreeSet<ContainerID> values = testData.get(key);
-    map.insertNewDatanode(key, values);
-
-    final int removeCount = 100;
-    Random r = new Random();
-
-    ContainerID first = values.pollLast();
-    TreeSet<ContainerID> removedContainers = new TreeSet<>();
-
-    // Pick a random container to remove it is ok to collide no issues.
-    for (int x = 0; x < removeCount; x++) {
-      int startBase = (int) first.getId();
-      long cTemp = r.nextInt(values.size());
-      removedContainers.add(new ContainerID(cTemp + startBase));
-    }
-
-    // This set is a new set with some containers removed.
-    TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
-    newContainersSet.removeAll(removedContainers);
-
-    ReportResult result = map.processReport(key, newContainersSet);
-
-
-    //Assert that expected size of missing container is same as addedContainers
-    Assert.assertEquals(Node2ContainerMap.ReportStatus.MISSING_CONTAINERS,
-        result.getStatus());
-    Assert.assertEquals(removedContainers.size(),
-        result.getMissingContainers().size());
-
-    // Assert that the Container IDs are the same as we added new.
-    Assert.assertTrue("All missing containers not found.",
-        result.getMissingContainers().removeAll(removedContainers));
-  }
-
-  @Test
-  public void testProcessReportDetectNewAndMissingContainers() throws
-      SCMException {
-    Node2ContainerMap map = new Node2ContainerMap();
-    UUID key = getFirstKey();
-    TreeSet<ContainerID> values = testData.get(key);
-    map.insertNewDatanode(key, values);
-
-    Set<ContainerID> insertedSet = new TreeSet<>();
-    // Insert nodes from 1..30
-    for (int x = 1; x <= 30; x++) {
-      insertedSet.add(new ContainerID(x));
-    }
-
-
-    final int removeCount = 100;
-    Random r = new Random();
-
-    ContainerID first = values.pollLast();
-    TreeSet<ContainerID> removedContainers = new TreeSet<>();
-
-    // Pick a random container to remove it is ok to collide no issues.
-    for (int x = 0; x < removeCount; x++) {
-      int startBase = (int) first.getId();
-      long cTemp = r.nextInt(values.size());
-      removedContainers.add(new ContainerID(cTemp + startBase));
-    }
-
-    Set<ContainerID> newSet = new TreeSet<>(values);
-    newSet.addAll(insertedSet);
-    newSet.removeAll(removedContainers);
-
-    ReportResult result = map.processReport(key, newSet);
-
-
-    Assert.assertEquals(
-        Node2ContainerMap.ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND,
-        result.getStatus());
-    Assert.assertEquals(removedContainers.size(),
-        result.getMissingContainers().size());
-
-
-    // Assert that the Container IDs are the same as we added new.
-    Assert.assertTrue("All missing containers not found.",
-        result.getMissingContainers().removeAll(removedContainers));
-
-    Assert.assertEquals(insertedSet.size(),
-        result.getNewContainers().size());
-
-    // Assert that the Container IDs are the same as we added new.
-    Assert.assertTrue("All inserted containers are not found.",
-        result.getNewContainers().removeAll(insertedSet));
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
new file mode 100644
index 0000000..633653b
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Test classes for Node2ContainerMap.
+ */
+public class TestNode2ContainerMap {
+  private final static int DATANODE_COUNT = 300;
+  private final static int CONTAINER_COUNT = 1000;
+  private final Map<UUID, TreeSet<ContainerID>> testData = new
+      ConcurrentHashMap<>();
+
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  private void generateData() {
+    for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
+      TreeSet<ContainerID> currentSet = new TreeSet<>();
+      for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
+        long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex;
+        currentSet.add(new ContainerID(currentCnIndex));
+      }
+      testData.put(UUID.randomUUID(), currentSet);
+    }
+  }
+
+  private UUID getFirstKey() {
+    return testData.keySet().iterator().next();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    generateData();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testIsKnownDatanode() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID knownNode = getFirstKey();
+    UUID unknownNode = UUID.randomUUID();
+    Set<ContainerID> containerIDs = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, containerIDs);
+    Assert.assertTrue("Not able to detect a known node",
+        map.isKnownDatanode(knownNode));
+    Assert.assertFalse("Unknown node detected",
+        map.isKnownDatanode(unknownNode));
+  }
+
+  @Test
+  public void testInsertNewDatanode() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID knownNode = getFirstKey();
+    Set<ContainerID> containerIDs = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, containerIDs);
+    Set<ContainerID> readSet = map.getContainers(knownNode);
+
+    // Assert that all elements are present in the set that we read back from
+    // node map.
+    Set newSet = new TreeSet((readSet));
+    Assert.assertTrue(newSet.removeAll(containerIDs));
+    Assert.assertTrue(newSet.size() == 0);
+
+    thrown.expect(SCMException.class);
+    thrown.expectMessage("already exists");
+    map.insertNewDatanode(knownNode, containerIDs);
+
+    map.removeDatanode(knownNode);
+    map.insertNewDatanode(knownNode, containerIDs);
+
+  }
+
+  @Test
+  public void testProcessReportCheckOneNode() throws SCMException {
+    UUID key = getFirstKey();
+    Set<ContainerID> values = testData.get(key);
+    Node2ContainerMap map = new Node2ContainerMap();
+    map.insertNewDatanode(key, values);
+    Assert.assertTrue(map.isKnownDatanode(key));
+    ReportResult result = map.processReport(key, values);
+    Assert.assertEquals(result.getStatus(),
+        Node2ContainerMap.ReportStatus.ALL_IS_WELL);
+  }
+
+  @Test
+  public void testUpdateDatanodeMap() throws SCMException {
+    UUID datanodeId = getFirstKey();
+    Set<ContainerID> values = testData.get(datanodeId);
+    Node2ContainerMap map = new Node2ContainerMap();
+    map.insertNewDatanode(datanodeId, values);
+    Assert.assertTrue(map.isKnownDatanode(datanodeId));
+    Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size());
+
+    //remove one container
+    values.remove(values.iterator().next());
+    Assert.assertEquals(CONTAINER_COUNT - 1, values.size());
+    Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size());
+
+    map.setContainersForDatanode(datanodeId, values);
+
+    Assert.assertEquals(values.size(), map.getContainers(datanodeId).size());
+    Assert.assertEquals(values, map.getContainers(datanodeId));
+  }
+
+  @Test
+  public void testProcessReportInsertAll() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+
+    for (Map.Entry<UUID, TreeSet<ContainerID>> keyEntry : testData.entrySet()) {
+      map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
+    }
+    // Assert all Keys are known datanodes.
+    for (UUID key : testData.keySet()) {
+      Assert.assertTrue(map.isKnownDatanode(key));
+    }
+  }
+
+  /*
+  For ProcessReport we have to test the following scenarios.
+
+  1. New Datanode - A new datanode appears and we have to add that to the
+  SCM's Node2Container Map.
+
+  2.  New Container - A Datanode exists, but a new container is added to that
+   DN. We need to detect that and return a list of added containers.
+
+  3. Missing Container - A Datanode exists, but one of the expected container
+   on that datanode is missing. We need to detect that.
+
+   4. We get a container report that has both the missing and new containers.
+    We need to return separate lists for these.
+   */
+
+  /**
+   * Assert that we are able to detect the addition of a new datanode.
+   *
+   * @throws SCMException
+   */
+  @Test
+  public void testProcessReportDetectNewDataNode() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    // If we attempt to process a node that is not present in the map,
+    // we get a result back that says, NEW_NODE_FOUND.
+    UUID key = getFirstKey();
+    TreeSet<ContainerID> values = testData.get(key);
+    ReportResult result = map.processReport(key, values);
+    Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_DATANODE_FOUND,
+        result.getStatus());
+    Assert.assertEquals(result.getNewContainers().size(), values.size());
+  }
+
+  /**
+   * This test asserts that processReport is able to detect new containers
+   * when it is added to a datanode. For that we populate the DN with a list
+   * of containerIDs and then add few more containers and make sure that we
+   * are able to detect them.
+   *
+   * @throws SCMException
+   */
+  @Test
+  public void testProcessReportDetectNewContainers() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID key = getFirstKey();
+    TreeSet<ContainerID> values = testData.get(key);
+    map.insertNewDatanode(key, values);
+
+    final int newCount = 100;
+    // This is not a mistake, the treeset seems to be reverse sorted.
+    ContainerID last = values.first();
+    TreeSet<ContainerID> addedContainers = new TreeSet<>();
+    for (int x = 1; x <= newCount; x++) {
+      long cTemp = last.getId() + x;
+      addedContainers.add(new ContainerID(cTemp));
+    }
+
+    // This set is the super set of existing containers and new containers.
+    TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
+    newContainersSet.addAll(addedContainers);
+
+    ReportResult result = map.processReport(key, newContainersSet);
+
+    //Assert that expected size of missing container is same as addedContainers
+    Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_CONTAINERS_FOUND,
+        result.getStatus());
+
+    Assert.assertEquals(addedContainers.size(),
+        result.getNewContainers().size());
+
+    // Assert that the Container IDs are the same as we added new.
+    Assert.assertTrue("All objects are not removed.",
+        result.getNewContainers().removeAll(addedContainers));
+  }
+
+  /**
+   * This test asserts that processReport is able to detect missing containers
+   * if they are misssing from a list.
+   *
+   * @throws SCMException
+   */
+  @Test
+  public void testProcessReportDetectMissingContainers() throws SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID key = getFirstKey();
+    TreeSet<ContainerID> values = testData.get(key);
+    map.insertNewDatanode(key, values);
+
+    final int removeCount = 100;
+    Random r = new Random();
+
+    ContainerID first = values.last();
+    TreeSet<ContainerID> removedContainers = new TreeSet<>();
+
+    // Pick a random container to remove it is ok to collide no issues.
+    for (int x = 0; x < removeCount; x++) {
+      int startBase = (int) first.getId();
+      long cTemp = r.nextInt(values.size());
+      removedContainers.add(new ContainerID(cTemp + startBase));
+    }
+
+    // This set is a new set with some containers removed.
+    TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
+    newContainersSet.removeAll(removedContainers);
+
+    ReportResult result = map.processReport(key, newContainersSet);
+
+
+    //Assert that expected size of missing container is same as addedContainers
+    Assert.assertEquals(Node2ContainerMap.ReportStatus.MISSING_CONTAINERS,
+        result.getStatus());
+    Assert.assertEquals(removedContainers.size(),
+        result.getMissingContainers().size());
+
+    // Assert that the Container IDs are the same as we added new.
+    Assert.assertTrue("All missing containers not found.",
+        result.getMissingContainers().removeAll(removedContainers));
+  }
+
+  @Test
+  public void testProcessReportDetectNewAndMissingContainers() throws
+      SCMException {
+    Node2ContainerMap map = new Node2ContainerMap();
+    UUID key = getFirstKey();
+    TreeSet<ContainerID> values = testData.get(key);
+    map.insertNewDatanode(key, values);
+
+    Set<ContainerID> insertedSet = new TreeSet<>();
+    // Insert nodes from 1..30
+    for (int x = 1; x <= 30; x++) {
+      insertedSet.add(new ContainerID(x));
+    }
+
+
+    final int removeCount = 100;
+    Random r = new Random();
+
+    ContainerID first = values.last();
+    TreeSet<ContainerID> removedContainers = new TreeSet<>();
+
+    // Pick a random container to remove it is ok to collide no issues.
+    for (int x = 0; x < removeCount; x++) {
+      int startBase = (int) first.getId();
+      long cTemp = r.nextInt(values.size());
+      removedContainers.add(new ContainerID(cTemp + startBase));
+    }
+
+    Set<ContainerID> newSet = new TreeSet<>(values);
+    newSet.addAll(insertedSet);
+    newSet.removeAll(removedContainers);
+
+    ReportResult result = map.processReport(key, newSet);
+
+
+    Assert.assertEquals(
+        Node2ContainerMap.ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND,
+        result.getStatus());
+    Assert.assertEquals(removedContainers.size(),
+        result.getMissingContainers().size());
+
+
+    // Assert that the Container IDs are the same as we added new.
+    Assert.assertTrue("All missing containers not found.",
+        result.getMissingContainers().removeAll(removedContainers));
+
+    Assert.assertEquals(insertedSet.size(),
+        result.getNewContainers().size());
+
+    // Assert that the Container IDs are the same as we added new.
+    Assert.assertTrue("All inserted containers are not found.",
+        result.getNewContainers().removeAll(insertedSet));
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/25] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
deleted file mode 100644
index 3bc0433..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- *  File:         demo_table.css
- *  CVS:          $Id$
- *  Description:  CSS descriptions for DataTables demo pages
- *  Author:       Allan Jardine
- *  Created:      Tue May 12 06:47:22 BST 2009
- *  Modified:     $Date$ by $Author$
- *  Language:     CSS
- *  Project:      DataTables
- *
- *  Copyright 2009 Allan Jardine. All Rights Reserved.
- *
- * ***************************************************************************
- * DESCRIPTION
- *
- * The styles given here are suitable for the demos that are used with the standard DataTables
- * distribution (see www.datatables.net). You will most likely wish to modify these styles to
- * meet the layout requirements of your site.
- *
- * Common issues:
- *   'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
- *     no conflict between the two pagination types. If you want to use full_numbers pagination
- *     ensure that you either have "example_alt_pagination" as a body class name, or better yet,
- *     modify that selector.
- *   Note that the path used for Images is relative. All images are by default located in
- *     ../images/ - relative to this CSS file.
- */
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables features
- */
-
-.dataTables_wrapper {
-	position: relative;
-	min-height: 302px;
-	clear: both;
-	_height: 302px;
-	zoom: 1; /* Feeling sorry for IE */
-}
-
-.dataTables_processing {
-	position: absolute;
-	top: 50%;
-	left: 50%;
-	width: 250px;
-	height: 30px;
-	margin-left: -125px;
-	margin-top: -15px;
-	padding: 14px 0 2px 0;
-	border: 1px solid #ddd;
-	text-align: center;
-	color: #999;
-	font-size: 14px;
-	background-color: white;
-}
-
-.dataTables_length {
-	width: 40%;
-	float: left;
-}
-
-.dataTables_filter {
-	width: 50%;
-	float: right;
-	text-align: right;
-}
-
-.dataTables_info {
-	width: 60%;
-	float: left;
-}
-
-.dataTables_paginate {
-	width: 44px;
-	* width: 50px;
-	float: right;
-	text-align: right;
-}
-
-/* Pagination nested */
-.paginate_disabled_previous, .paginate_enabled_previous, .paginate_disabled_next, .paginate_enabled_next {
-	height: 19px;
-	width: 19px;
-	margin-left: 3px;
-	float: left;
-}
-
-.paginate_disabled_previous {
-	background-image: url('../images/back_disabled.jpg');
-}
-
-.paginate_enabled_previous {
-	background-image: url('../images/back_enabled.jpg');
-}
-
-.paginate_disabled_next {
-	background-image: url('../images/forward_disabled.jpg');
-}
-
-.paginate_enabled_next {
-	background-image: url('../images/forward_enabled.jpg');
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables display
- */
-table.display {
-	margin: 0 auto;
-	clear: both;
-	width: 100%;
-	
-	/* Note Firefox 3.5 and before have a bug with border-collapse
-	 * ( https://bugzilla.mozilla.org/show%5Fbug.cgi?id=155955 ) 
-	 * border-spacing: 0; is one possible option. Conditional-css.com is
-	 * useful for this kind of thing
-	 *
-	 * Further note IE 6/7 has problems when calculating widths with border width.
-	 * It subtracts one px relative to the other browsers from the first column, and
-	 * adds one to the end...
-	 *
-	 * If you want that effect I'd suggest setting a border-top/left on th/td's and 
-	 * then filling in the gaps with other borders.
-	 */
-}
-
-table.display thead th {
-	padding: 3px 18px 3px 10px;
-	border-bottom: 1px solid black;
-	font-weight: bold;
-	cursor: pointer;
-	* cursor: hand;
-}
-
-table.display tfoot th {
-	padding: 3px 18px 3px 10px;
-	border-top: 1px solid black;
-	font-weight: bold;
-}
-
-table.display tr.heading2 td {
-	border-bottom: 1px solid #aaa;
-}
-
-table.display td {
-	padding: 3px 10px;
-}
-
-table.display td.center {
-	text-align: center;
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables sorting
- */
-
-.sorting_asc {
-	background: url('../images/sort_asc.png') no-repeat center right;
-}
-
-.sorting_desc {
-	background: url('../images/sort_desc.png') no-repeat center right;
-}
-
-.sorting {
-	background: url('../images/sort_both.png') no-repeat center right;
-}
-
-.sorting_asc_disabled {
-	background: url('../images/sort_asc_disabled.png') no-repeat center right;
-}
-
-.sorting_desc_disabled {
-	background: url('../images/sort_desc_disabled.png') no-repeat center right;
-}
-
-
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables row classes
- */
-table.display tr.odd.gradeA {
-	background-color: #ddffdd;
-}
-
-table.display tr.even.gradeA {
-	background-color: #eeffee;
-}
-
-table.display tr.odd.gradeC {
-	background-color: #ddddff;
-}
-
-table.display tr.even.gradeC {
-	background-color: #eeeeff;
-}
-
-table.display tr.odd.gradeX {
-	background-color: #ffdddd;
-}
-
-table.display tr.even.gradeX {
-	background-color: #ffeeee;
-}
-
-table.display tr.odd.gradeU {
-	background-color: #ddd;
-}
-
-table.display tr.even.gradeU {
-	background-color: #eee;
-}
-
-
-tr.odd {
-	background-color: #E2E4FF;
-}
-
-tr.even {
-	background-color: white;
-}
-
-
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Misc
- */
-.dataTables_scroll {
-	clear: both;
-}
-
-.dataTables_scrollBody {
-	*margin-top: -1px;
-}
-
-.top, .bottom {
-	padding: 15px;
-	background-color: #F5F5F5;
-	border: 1px solid #CCCCCC;
-}
-
-.top .dataTables_info {
-	float: none;
-}
-
-.clear {
-	clear: both;
-}
-
-.dataTables_empty {
-	text-align: center;
-}
-
-tfoot input {
-	margin: 0.5em 0;
-	width: 100%;
-	color: #444;
-}
-
-tfoot input.search_init {
-	color: #999;
-}
-
-td.group {
-	background-color: #d1cfd0;
-	border-bottom: 2px solid #A19B9E;
-	border-top: 2px solid #A19B9E;
-}
-
-td.details {
-	background-color: #d1cfd0;
-	border: 2px solid #A19B9E;
-}
-
-
-.example_alt_pagination div.dataTables_info {
-	width: 40%;
-}
-
-.paging_full_numbers {
-	width: 400px;
-	height: 22px;
-	line-height: 22px;
-}
-
-.paging_full_numbers span.paginate_button,
- 	.paging_full_numbers span.paginate_active {
-	border: 1px solid #aaa;
-	-webkit-border-radius: 5px;
-	-moz-border-radius: 5px;
-	padding: 2px 5px;
-	margin: 0 3px;
-	cursor: pointer;
-	*cursor: hand;
-}
-
-.paging_full_numbers span.paginate_button {
-	background-color: #ddd;
-}
-
-.paging_full_numbers span.paginate_button:hover {
-	background-color: #ccc;
-}
-
-.paging_full_numbers span.paginate_active {
-	background-color: #99B3FF;
-}
-
-table.display tr.even.row_selected td {
-	background-color: #B0BED9;
-}
-
-table.display tr.odd.row_selected td {
-	background-color: #9FAFD1;
-}
-
-
-/*
- * Sorting classes for columns
- */
-/* For the standard odd/even */
-tr.odd td.sorting_1 {
-	background-color: #D3D6FF;
-}
-
-tr.odd td.sorting_2 {
-	background-color: #DADCFF;
-}
-
-tr.odd td.sorting_3 {
-	background-color: #E0E2FF;
-}
-
-tr.even td.sorting_1 {
-	background-color: #EAEBFF;
-}
-
-tr.even td.sorting_2 {
-	background-color: #F2F3FF;
-}
-
-tr.even td.sorting_3 {
-	background-color: #F9F9FF;
-}
-
-
-/* For the Conditional-CSS grading rows */
-/*
- 	Colour calculations (based off the main row colours)
-  Level 1:
-		dd > c4
-		ee > d5
-	Level 2:
-	  dd > d1
-	  ee > e2
- */
-tr.odd.gradeA td.sorting_1 {
-	background-color: #c4ffc4;
-}
-
-tr.odd.gradeA td.sorting_2 {
-	background-color: #d1ffd1;
-}
-
-tr.odd.gradeA td.sorting_3 {
-	background-color: #d1ffd1;
-}
-
-tr.even.gradeA td.sorting_1 {
-	background-color: #d5ffd5;
-}
-
-tr.even.gradeA td.sorting_2 {
-	background-color: #e2ffe2;
-}
-
-tr.even.gradeA td.sorting_3 {
-	background-color: #e2ffe2;
-}
-
-tr.odd.gradeC td.sorting_1 {
-	background-color: #c4c4ff;
-}
-
-tr.odd.gradeC td.sorting_2 {
-	background-color: #d1d1ff;
-}
-
-tr.odd.gradeC td.sorting_3 {
-	background-color: #d1d1ff;
-}
-
-tr.even.gradeC td.sorting_1 {
-	background-color: #d5d5ff;
-}
-
-tr.even.gradeC td.sorting_2 {
-	background-color: #e2e2ff;
-}
-
-tr.even.gradeC td.sorting_3 {
-	background-color: #e2e2ff;
-}
-
-tr.odd.gradeX td.sorting_1 {
-	background-color: #ffc4c4;
-}
-
-tr.odd.gradeX td.sorting_2 {
-	background-color: #ffd1d1;
-}
-
-tr.odd.gradeX td.sorting_3 {
-	background-color: #ffd1d1;
-}
-
-tr.even.gradeX td.sorting_1 {
-	background-color: #ffd5d5;
-}
-
-tr.even.gradeX td.sorting_2 {
-	background-color: #ffe2e2;
-}
-
-tr.even.gradeX td.sorting_3 {
-	background-color: #ffe2e2;
-}
-
-tr.odd.gradeU td.sorting_1 {
-	background-color: #c4c4c4;
-}
-
-tr.odd.gradeU td.sorting_2 {
-	background-color: #d1d1d1;
-}
-
-tr.odd.gradeU td.sorting_3 {
-	background-color: #d1d1d1;
-}
-
-tr.even.gradeU td.sorting_1 {
-	background-color: #d5d5d5;
-}
-
-tr.even.gradeU td.sorting_2 {
-	background-color: #e2e2e2;
-}
-
-tr.even.gradeU td.sorting_3 {
-	background-color: #e2e2e2;
-}
-
-
-/*
- * Row highlighting example
- */
-.ex_highlight #example tbody tr.even:hover, #example tbody tr.even td.highlighted {
-	background-color: #ECFFB3;
-}
-
-.ex_highlight #example tbody tr.odd:hover, #example tbody tr.odd td.highlighted {
-	background-color: #E6FF99;
-}
-
-.ex_highlight_row #example tr.even:hover {
-	background-color: #ECFFB3;
-}
-
-.ex_highlight_row #example tr.even:hover td.sorting_1 {
-	background-color: #DDFF75;
-}
-
-.ex_highlight_row #example tr.even:hover td.sorting_2 {
-	background-color: #E7FF9E;
-}
-
-.ex_highlight_row #example tr.even:hover td.sorting_3 {
-	background-color: #E2FF89;
-}
-
-.ex_highlight_row #example tr.odd:hover {
-	background-color: #E6FF99;
-}
-
-.ex_highlight_row #example tr.odd:hover td.sorting_1 {
-	background-color: #D6FF5C;
-}
-
-.ex_highlight_row #example tr.odd:hover td.sorting_2 {
-	background-color: #E0FF84;
-}
-
-.ex_highlight_row #example tr.odd:hover td.sorting_3 {
-	background-color: #DBFF70;
-}
-
-
-/*
- * KeyTable
- */
-table.KeyTable td {
-	border: 3px solid transparent;
-}
-
-table.KeyTable td.focus {
-	border: 3px solid #3366FF;
-}
-
-table.display tr.gradeA {
-	background-color: #eeffee;
-}
-
-table.display tr.gradeC {
-	background-color: #ddddff;
-}
-
-table.display tr.gradeX {
-	background-color: #ffdddd;
-}
-
-table.display tr.gradeU {
-	background-color: #ddd;
-}
-
-div.box {
-	height: 100px;
-	padding: 10px;
-	overflow: auto;
-	border: 1px solid #8080FF;
-	background-color: #E5E5FF;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css
deleted file mode 100644
index 6f6f414..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- *  File:         demo_table_jui.css
- *  CVS:          $Id$
- *  Description:  CSS descriptions for DataTables demo pages
- *  Author:       Allan Jardine
- *  Created:      Tue May 12 06:47:22 BST 2009
- *  Modified:     $Date$ by $Author$
- *  Language:     CSS
- *  Project:      DataTables
- *
- *  Copyright 2009 Allan Jardine. All Rights Reserved.
- *
- * ***************************************************************************
- * DESCRIPTION
- *
- * The styles given here are suitable for the demos that are used with the standard DataTables
- * distribution (see www.datatables.net). You will most likely wish to modify these styles to
- * meet the layout requirements of your site.
- *
- * Common issues:
- *   'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
- *     no conflict between the two pagination types. If you want to use full_numbers pagination
- *     ensure that you either have "example_alt_pagination" as a body class name, or better yet,
- *     modify that selector.
- *   Note that the path used for Images is relative. All images are by default located in
- *     ../images/ - relative to this CSS file.
- */
-
-
-/*
- * jQuery UI specific styling
- */
-
-.paging_two_button .ui-button {
-	float: left;
-	cursor: pointer;
-	* cursor: hand;
-}
-
-.paging_full_numbers .ui-button {
-	padding: 2px 6px;
-	margin: 0;
-	cursor: pointer;
-	* cursor: hand;
-}
-
-.ui-buttonset .ui-button {
-	margin-right: -0.1em !important;
-}
-
-.paging_full_numbers {
-	width: 350px !important;
-}
-
-.ui-toolbar {
-	padding: 5px;
-}
-
-.dataTables_paginate {
-	width: auto;
-}
-
-.dataTables_info {
-	padding-top: 3px;
-}
-
-table.display thead th {
-	padding: 3px 0px 3px 10px;
-	cursor: pointer;
-	* cursor: hand;
-}
-
-div.dataTables_wrapper .ui-widget-header {
-	font-weight: normal;
-}
-
-
-/*
- * Sort arrow icon positioning
- */
-table.display thead th div.DataTables_sort_wrapper {
-	position: relative;
-	padding-right: 20px;
-	padding-right: 20px;
-}
-
-table.display thead th div.DataTables_sort_wrapper span {
-	position: absolute;
-	top: 50%;
-	margin-top: -8px;
-	right: 0;
-}
-
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- *
- * Everything below this line is the same as demo_table.css. This file is
- * required for 'cleanliness' of the markup
- *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables features
- */
-
-.dataTables_wrapper {
-	position: relative;
-	min-height: 35px;
-	_height: 35px;
-	clear: both;
-}
-
-.dataTables_processing {
-	position: absolute;
-	top: 0px;
-	left: 50%;
-	width: 250px;
-	margin-left: -125px;
-	border: 1px solid #ddd;
-	text-align: center;
-	color: #999;
-	font-size: 11px;
-	padding: 2px 0;
-}
-
-.dataTables_length {
-	width: 40%;
-	float: left;
-}
-
-.dataTables_filter {
-	width: 50%;
-	float: right;
-	text-align: right;
-}
-
-.dataTables_info {
-	width: 50%;
-	float: left;
-}
-
-.dataTables_paginate {
-	float: right;
-	text-align: right;
-}
-
-/* Pagination nested */
-.paginate_disabled_previous, .paginate_enabled_previous, .paginate_disabled_next, .paginate_enabled_next {
-	height: 19px;
-	width: 19px;
-	margin-left: 3px;
-	float: left;
-}
-
-.paginate_disabled_previous {
-	background-image: url('../images/back_disabled.jpg');
-}
-
-.paginate_enabled_previous {
-	background-image: url('../images/back_enabled.jpg');
-}
-
-.paginate_disabled_next {
-	background-image: url('../images/forward_disabled.jpg');
-}
-
-.paginate_enabled_next {
-	background-image: url('../images/forward_enabled.jpg');
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables display
- */
-table.display {
-	margin: 0 auto;
-	width: 100%;
-	clear: both;
-	border-collapse: collapse;
-}
-
-table.display tfoot th {
-	padding: 3px 0px 3px 10px;
-	font-weight: bold;
-	font-weight: normal;
-}
-
-table.display tr.heading2 td {
-	border-bottom: 1px solid #aaa;
-}
-
-table.display td {
-	padding: 3px 10px;
-}
-
-table.display td.center {
-	text-align: center;
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables sorting
- */
-
-.sorting_asc {
-	background: url('../images/sort_asc.jpg') no-repeat center right;
-}
-
-.sorting_desc {
-	background: url('../images/sort_desc.jpg') no-repeat center right;
-}
-
-.sorting {
-	background: url('../images/sort_both.jpg') no-repeat center right;
-}
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Misc
- */
-.dataTables_scroll {
-	clear: both;
-}
-
-.top, .bottom {
-	padding: 15px;
-	background-color: #F5F5F5;
-	border: 1px solid #CCCCCC;
-}
-
-.top .dataTables_info {
-	float: none;
-}
-
-.clear {
-	clear: both;
-}
-
-.dataTables_empty {
-	text-align: center;
-}
-
-tfoot input {
-	margin: 0.5em 0;
-	width: 100%;
-	color: #444;
-}
-
-tfoot input.search_init {
-	color: #999;
-}
-
-td.group {
-	background-color: #d1cfd0;
-	border-bottom: 2px solid #A19B9E;
-	border-top: 2px solid #A19B9E;
-}
-
-td.details {
-	background-color: #d1cfd0;
-	border: 2px solid #A19B9E;
-}
-
-
-.example_alt_pagination div.dataTables_info {
-	width: 40%;
-}
-
-.paging_full_numbers span.paginate_button,
- 	.paging_full_numbers span.paginate_active {
-	border: 1px solid #aaa;
-	-webkit-border-radius: 5px;
-	-moz-border-radius: 5px;
-	padding: 2px 5px;
-	margin: 0 3px;
-	cursor: pointer;
-	*cursor: hand;
-}
-
-.paging_full_numbers span.paginate_button {
-	background-color: #ddd;
-}
-
-.paging_full_numbers span.paginate_button:hover {
-	background-color: #ccc;
-}
-
-.paging_full_numbers span.paginate_active {
-	background-color: #99B3FF;
-}
-
-table.display tr.even.row_selected td {
-	background-color: #B0BED9;
-}
-
-table.display tr.odd.row_selected td {
-	background-color: #9FAFD1;
-}
-
-/* Striping */
-tr.odd { background: rgba(255, 255, 255, 0.1); }
-tr.even { background: rgba(0, 0, 255, 0.05); }
-
-
-/*
- * Sorting classes for columns
- */
-tr.odd td.sorting_1 { background: rgba(0, 0, 0, 0.03); }
-tr.odd td.sorting_2 { background: rgba(0, 0, 0, 0.02); } 
-tr.odd td.sorting_3 { background: rgba(0, 0, 0, 0.02); }
-tr.even td.sorting_1 { background: rgba(0, 0, 0, 0.08); }
-tr.even td.sorting_2 { background: rgba(0, 0, 0, 0.06); }
-tr.even td.sorting_3 { background: rgba(0, 0, 0, 0.06); }
-
-.css_left { position: relative; float: left; }
-.css_right { position: relative; float: right; }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psd b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psd
deleted file mode 100644
index 53b2e06..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psd and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/back_disabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/back_disabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/back_disabled.jpg
deleted file mode 100644
index 1e73a54..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/back_disabled.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/back_enabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/back_enabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/back_enabled.jpg
deleted file mode 100644
index a6d764c..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/back_enabled.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/favicon.ico
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/favicon.ico b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/favicon.ico
deleted file mode 100644
index 6eeaa2a..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/favicon.ico and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/forward_disabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/forward_disabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/forward_disabled.jpg
deleted file mode 100644
index 28a9dc5..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/forward_disabled.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/forward_enabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/forward_enabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/forward_enabled.jpg
deleted file mode 100644
index 598c075..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/forward_enabled.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_asc.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_asc.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_asc.png
deleted file mode 100644
index a56d0e2..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_asc.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_asc_disabled.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_asc_disabled.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_asc_disabled.png
deleted file mode 100644
index b7e621e..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_asc_disabled.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_both.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_both.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_both.png
deleted file mode 100644
index 839ac4b..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_both.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_desc.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_desc.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_desc.png
deleted file mode 100644
index 90b2951..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_desc.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_desc_disabled.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_desc_disabled.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_desc_disabled.png
deleted file mode 100644
index 2409653..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/images/sort_desc_disabled.png and /dev/null differ


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/25] hadoop git commit: YARN-8575. Avoid committing allocation proposal to unavailable nodes in async scheduling. Contributed by Tao Yang.

Posted by su...@apache.org.
YARN-8575. Avoid committing allocation proposal to unavailable nodes in async scheduling. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a71bf14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a71bf14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a71bf14

Branch: refs/heads/HDFS-12943
Commit: 0a71bf145293adbd3728525ab4c36c08d51377d3
Parents: 08d5060
Author: Weiwei Yang <ww...@apache.org>
Authored: Fri Aug 10 14:37:45 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Fri Aug 10 14:37:45 2018 +0800

----------------------------------------------------------------------
 .../scheduler/common/fica/FiCaSchedulerApp.java | 12 ++++
 .../yarn/server/resourcemanager/MockNodes.java  |  6 +-
 .../resourcemanager/TestResourceManager.java    | 16 ++++-
 .../TestCapacitySchedulerAsyncScheduling.java   | 69 ++++++++++++++++++++
 .../scheduler/capacity/TestUtils.java           |  2 +
 5 files changed, 100 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a71bf14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 9810e98..6a5af81 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -429,6 +430,17 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
         SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode>
             schedulerContainer = allocation.getAllocatedOrReservedContainer();
 
+        // Make sure node is in RUNNING state
+        if (schedulerContainer.getSchedulerNode().getRMNode().getState()
+            != NodeState.RUNNING) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Failed to accept this proposal because node "
+                + schedulerContainer.getSchedulerNode().getNodeID() + " is in "
+                + schedulerContainer.getSchedulerNode().getRMNode().getState()
+                + " state (not RUNNING)");
+          }
+          return false;
+        }
         if (schedulerContainer.isAllocated()) {
           // When allocate a new container
           containerRequest =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a71bf14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
index 9041132..c444b6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
@@ -347,17 +347,17 @@ public class MockNodes {
   }
 
   public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) {
-    return buildRMNode(rack, perNode, null, "localhost:0", hostnum, null, 123);
+    return buildRMNode(rack, perNode, NodeState.RUNNING, "localhost:0", hostnum, null, 123);
   }
   
   public static RMNode newNodeInfo(int rack, final Resource perNode,
       int hostnum, String hostName) {
-    return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName, 123);
+    return buildRMNode(rack, perNode, NodeState.RUNNING, "localhost:0", hostnum, hostName, 123);
   }
 
   public static RMNode newNodeInfo(int rack, final Resource perNode,
       int hostnum, String hostName, int port) {
-    return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName, port);
+    return buildRMNode(rack, perNode, NodeState.RUNNING, "localhost:0", hostnum, hostName, port);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a71bf14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index 941e477..a66c583 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,6 +40,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStartedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
@@ -86,8 +89,9 @@ public class TestResourceManager {
   }
 
   @Test
-  public void testResourceAllocation() throws IOException,
-      YarnException, InterruptedException {
+  public void testResourceAllocation()
+      throws IOException, YarnException, InterruptedException,
+      TimeoutException {
     LOG.info("--- START: testResourceAllocation ---");
         
     final int memory = 4 * 1024;
@@ -105,6 +109,14 @@ public class TestResourceManager {
       registerNode(host2, 1234, 2345, NetworkTopology.DEFAULT_RACK, 
           Resources.createResource(memory/2, vcores/2));
 
+    // nodes should be in RUNNING state
+    RMNodeImpl node1 = (RMNodeImpl) resourceManager.getRMContext().getRMNodes().get(
+        nm1.getNodeId());
+    RMNodeImpl node2 = (RMNodeImpl) resourceManager.getRMContext().getRMNodes().get(
+        nm2.getNodeId());
+    node1.handle(new RMNodeStartedEvent(nm1.getNodeId(), null, null));
+    node2.handle(new RMNodeStartedEvent(nm2.getNodeId(), null, null));
+
     // Submit an application
     Application application = new Application("user1", resourceManager);
     application.submit();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a71bf14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index c2c1519..840d30d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -18,12 +18,14 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -43,6 +45,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -745,6 +749,71 @@ public class TestCapacitySchedulerAsyncScheduling {
     rm1.close();
   }
 
+  @Test(timeout = 30000)
+  public void testCommitProposalsForUnusableNode() throws Exception {
+    // disable async-scheduling for simulating complex scene
+    Configuration disableAsyncConf = new Configuration(conf);
+    disableAsyncConf.setBoolean(
+        CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false);
+
+    // init RM & NMs
+    final MockRM rm = new MockRM(disableAsyncConf);
+    rm.start();
+    final MockNM nm1 = rm.registerNode("192.168.0.1:1234", 8 * GB);
+    final MockNM nm2 = rm.registerNode("192.168.0.2:2234", 8 * GB);
+    final MockNM nm3 = rm.registerNode("192.168.0.3:2234", 8 * GB);
+    rm.drainEvents();
+    CapacityScheduler cs =
+        (CapacityScheduler) rm.getRMContext().getScheduler();
+    SchedulerNode sn1 = cs.getSchedulerNode(nm1.getNodeId());
+
+    // launch app1-am on nm1
+    RMApp app1 = rm.submitApp(1 * GB, "app1", "user", null, false, "default",
+        YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS, null, null, true, true);
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
+
+    // launch app2-am on nm2
+    RMApp app2 = rm.submitApp(1 * GB, "app2", "user", null, false, "default",
+        YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS, null, null, true, true);
+    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
+
+    // app2 asks 1 * 8G container
+    am2.allocate(ImmutableList.of(ResourceRequest
+        .newInstance(Priority.newInstance(0), "*",
+            Resources.createResource(8 * GB), 1)), null);
+
+    List<Object> reservedProposalParts = new ArrayList<>();
+    final CapacityScheduler spyCs = Mockito.spy(cs);
+    // handle CapacityScheduler#tryCommit
+    Mockito.doAnswer(new Answer<Object>() {
+      public Boolean answer(InvocationOnMock invocation) throws Exception {
+        for (Object argument : invocation.getArguments()) {
+          reservedProposalParts.add(argument);
+        }
+        return false;
+      }
+    }).when(spyCs).tryCommit(Mockito.any(Resource.class),
+        Mockito.any(ResourceCommitRequest.class), Mockito.anyBoolean());
+
+    spyCs.handle(new NodeUpdateSchedulerEvent(sn1.getRMNode()));
+
+    // decommission nm1
+    RMNode rmNode1 = cs.getNode(nm1.getNodeId()).getRMNode();
+    cs.getRMContext().getDispatcher().getEventHandler().handle(
+        new RMNodeEvent(nm1.getNodeId(), RMNodeEventType.DECOMMISSION));
+    rm.drainEvents();
+    Assert.assertEquals(NodeState.DECOMMISSIONED, rmNode1.getState());
+    Assert.assertNull(cs.getNode(nm1.getNodeId()));
+
+    // try commit after nm1 decommissioned
+    boolean isSuccess =
+        cs.tryCommit((Resource) reservedProposalParts.get(0),
+            (ResourceCommitRequest) reservedProposalParts.get(1),
+            (Boolean) reservedProposalParts.get(2));
+    Assert.assertFalse(isSuccess);
+    rm.stop();
+  }
+
   private ResourceCommitRequest createAllocateFromReservedProposal(
       int containerId, Resource allocateResource, FiCaSchedulerApp schedulerApp,
       SchedulerNode allocateNode, SchedulerNode reservedNode,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a71bf14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index fae63be..b13790d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
@@ -220,6 +221,7 @@ public class TestUtils {
     when(rmNode.getNodeAddress()).thenReturn(host+":"+port);
     when(rmNode.getHostName()).thenReturn(host);
     when(rmNode.getRackName()).thenReturn(rack);
+    when(rmNode.getState()).thenReturn(NodeState.RUNNING);
     
     FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode, false));
     LOG.info("node = " + host + " avail=" + node.getUnallocatedResource());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/25] hadoop git commit: HDFS-13795. Fix potential NPE in InMemoryLevelDBAliasMapServer.

Posted by su...@apache.org.
HDFS-13795. Fix potential NPE in InMemoryLevelDBAliasMapServer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15241c63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15241c63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15241c63

Branch: refs/heads/HDFS-12943
Commit: 15241c6349a5245761ed43bd0d38b25f783cc96b
Parents: 0a71bf1
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Fri Aug 10 09:38:40 2018 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Fri Aug 10 09:38:40 2018 -0700

----------------------------------------------------------------------
 ...yAliasMapProtocolClientSideTranslatorPB.java |  6 +++
 .../aliasmap/InMemoryLevelDBAliasMapServer.java |  8 +++-
 .../impl/TestInMemoryLevelDBAliasMapClient.java | 39 ++++++++++++++++++++
 3 files changed, 51 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15241c63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
index 2025c16..d9e984b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
@@ -167,6 +167,9 @@ public class InMemoryAliasMapProtocolClientSideTranslatorPB
   public Optional<ProvidedStorageLocation> read(@Nonnull Block block)
       throws IOException {
 
+    if (block == null) {
+      throw new IOException("Block cannot be null");
+    }
     ReadRequestProto request =
         ReadRequestProto
             .newBuilder()
@@ -191,6 +194,9 @@ public class InMemoryAliasMapProtocolClientSideTranslatorPB
   public void write(@Nonnull Block block,
       @Nonnull ProvidedStorageLocation providedStorageLocation)
       throws IOException {
+    if (block == null || providedStorageLocation == null) {
+      throw new IOException("Provided block and location cannot be null");
+    }
     WriteRequestProto request =
         WriteRequestProto
             .newBuilder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15241c63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
index f201bfd..5c56736 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
@@ -150,11 +150,15 @@ public class InMemoryLevelDBAliasMapServer implements InMemoryAliasMapProtocol,
   public void close() {
     LOG.info("Stopping InMemoryLevelDBAliasMapServer");
     try {
-      aliasMap.close();
+      if (aliasMap != null) {
+        aliasMap.close();
+      }
     } catch (IOException e) {
       LOG.error(e.getMessage());
     }
-    aliasMapServer.stop();
+    if (aliasMapServer != null) {
+      aliasMapServer.stop();
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15241c63/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
index f062633..fccb6f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
@@ -28,14 +28,19 @@ import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap;
 import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -59,6 +64,9 @@ public class TestInMemoryLevelDBAliasMapClient {
   private Configuration conf;
   private final static String BPID = "BPID-0";
 
+  @Rule
+  public final ExpectedException exception = ExpectedException.none();
+
   @Before
   public void setUp() throws IOException {
     conf = new Configuration();
@@ -348,4 +356,35 @@ public class TestInMemoryLevelDBAliasMapClient {
     conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, "0.0.0.0");
     writeRead();
   }
+
+  @Test
+  public void testNonExistentFile() throws Exception {
+    // delete alias map location
+    FileUtils.deleteDirectory(tempDir);
+    // expect a RuntimeException when the aliasmap is started.
+    exception.expect(RuntimeException.class);
+    levelDBAliasMapServer.setConf(conf);
+  }
+
+  @Test
+  public void testNonExistentBlock() throws Exception {
+    inMemoryLevelDBAliasMapClient.setConf(conf);
+    levelDBAliasMapServer.setConf(conf);
+    levelDBAliasMapServer.start();
+    Block block1 = new Block(100, 43, 44);
+    ProvidedStorageLocation providedStorageLocation1 = null;
+    BlockAliasMap.Writer<FileRegion> writer1 =
+        inMemoryLevelDBAliasMapClient.getWriter(null, BPID);
+    try {
+      writer1.store(new FileRegion(block1, providedStorageLocation1));
+      fail("Should fail on writing a region with null ProvidedLocation");
+    } catch (IOException | IllegalArgumentException e) {
+      assertTrue(e.getMessage().contains("not be null"));
+    }
+
+    BlockAliasMap.Reader<FileRegion> reader =
+        inMemoryLevelDBAliasMapClient.getReader(null, BPID);
+    LambdaTestUtils.assertOptionalUnset("Expected empty BlockAlias",
+        reader.resolve(block1));
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/25] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
deleted file mode 100644
index 61acb9b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * File:        jquery.dataTables.min.js
- * Version:     1.9.4
- * Author:      Allan Jardine (www.sprymedia.co.uk)
- * Info:        www.datatables.net
- *
- * Copyright 2008-2012 Allan Jardine, all rights reserved.
- *
- * This source file is free software, under either the GPL v2 license or a
- * BSD style license, available at:
- *   http://datatables.net/license_gpl2
- *   http://datatables.net/license_bsd
- *
- * This source file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the license files for details.
- */
-(function(la,s,p){(function(i){if(typeof define==="function"&&define.amd)define(["jquery"],i);else jQuery&&!jQuery.fn.dataTable&&i(jQuery)})(function(i){var l=function(h){function n(a,b){var c=l.defaults.columns,d=a.aoColumns.length;b=i.extend({},l.models.oColumn,c,{sSortingClass:a.oClasses.sSortable,sSortingClassJUI:a.oClasses.sSortJUI,nTh:b?b:s.createElement("th"),sTitle:c.sTitle?c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[d],mData:c.mData?c.oDefaults:d});a.aoColumns.push(b);if(a.aoPreSearchCols[d]===
-p||a.aoPreSearchCols[d]===null)a.aoPreSearchCols[d]=i.extend({},l.models.oSearch);else{b=a.aoPreSearchCols[d];if(b.bRegex===p)b.bRegex=true;if(b.bSmart===p)b.bSmart=true;if(b.bCaseInsensitive===p)b.bCaseInsensitive=true}q(a,d,null)}function q(a,b,c){var d=a.aoColumns[b];if(c!==p&&c!==null){if(c.mDataProp&&!c.mData)c.mData=c.mDataProp;if(c.sType!==p){d.sType=c.sType;d._bAutoType=false}i.extend(d,c);r(d,c,"sWidth","sWidthOrig");if(c.iDataSort!==p)d.aDataSort=[c.iDataSort];r(d,c,"aDataSort")}var e=d.mRender?
-ca(d.mRender):null,f=ca(d.mData);d.fnGetData=function(g,j){var k=f(g,j);if(d.mRender&&j&&j!=="")return e(k,j,g);return k};d.fnSetData=Ja(d.mData);if(!a.oFeatures.bSort)d.bSortable=false;if(!d.bSortable||i.inArray("asc",d.asSorting)==-1&&i.inArray("desc",d.asSorting)==-1){d.sSortingClass=a.oClasses.sSortableNone;d.sSortingClassJUI=""}else if(i.inArray("asc",d.asSorting)==-1&&i.inArray("desc",d.asSorting)==-1){d.sSortingClass=a.oClasses.sSortable;d.sSortingClassJUI=a.oClasses.sSortJUI}else if(i.inArray("asc",
-d.asSorting)!=-1&&i.inArray("desc",d.asSorting)==-1){d.sSortingClass=a.oClasses.sSortableAsc;d.sSortingClassJUI=a.oClasses.sSortJUIAscAllowed}else if(i.inArray("asc",d.asSorting)==-1&&i.inArray("desc",d.asSorting)!=-1){d.sSortingClass=a.oClasses.sSortableDesc;d.sSortingClassJUI=a.oClasses.sSortJUIDescAllowed}}function o(a){if(a.oFeatures.bAutoWidth===false)return false;ta(a);for(var b=0,c=a.aoColumns.length;b<c;b++)a.aoColumns[b].nTh.style.width=a.aoColumns[b].sWidth}function v(a,b){a=A(a,"bVisible");
-return typeof a[b]==="number"?a[b]:null}function w(a,b){a=A(a,"bVisible");b=i.inArray(b,a);return b!==-1?b:null}function D(a){return A(a,"bVisible").length}function A(a,b){var c=[];i.map(a.aoColumns,function(d,e){d[b]&&c.push(e)});return c}function G(a){for(var b=l.ext.aTypes,c=b.length,d=0;d<c;d++){var e=b[d](a);if(e!==null)return e}return"string"}function E(a,b){b=b.split(",");for(var c=[],d=0,e=a.aoColumns.length;d<e;d++)for(var f=0;f<e;f++)if(a.aoColumns[d].sName==b[f]){c.push(f);break}return c}
-function Y(a){for(var b="",c=0,d=a.aoColumns.length;c<d;c++)b+=a.aoColumns[c].sName+",";if(b.length==d)return"";return b.slice(0,-1)}function ma(a,b,c,d){var e,f,g,j,k;if(b)for(e=b.length-1;e>=0;e--){var m=b[e].aTargets;i.isArray(m)||O(a,1,"aTargets must be an array of targets, not a "+typeof m);f=0;for(g=m.length;f<g;f++)if(typeof m[f]==="number"&&m[f]>=0){for(;a.aoColumns.length<=m[f];)n(a);d(m[f],b[e])}else if(typeof m[f]==="number"&&m[f]<0)d(a.aoColumns.length+m[f],b[e]);else if(typeof m[f]===
-"string"){j=0;for(k=a.aoColumns.length;j<k;j++)if(m[f]=="_all"||i(a.aoColumns[j].nTh).hasClass(m[f]))d(j,b[e])}}if(c){e=0;for(a=c.length;e<a;e++)d(e,c[e])}}function R(a,b){var c;c=i.isArray(b)?b.slice():i.extend(true,{},b);b=a.aoData.length;var d=i.extend(true,{},l.models.oRow);d._aData=c;a.aoData.push(d);var e;d=0;for(var f=a.aoColumns.length;d<f;d++){c=a.aoColumns[d];typeof c.fnRender==="function"&&c.bUseRendered&&c.mData!==null?S(a,b,d,da(a,b,d)):S(a,b,d,F(a,b,d));if(c._bAutoType&&c.sType!="string"){e=
-F(a,b,d,"type");if(e!==null&&e!==""){e=G(e);if(c.sType===null)c.sType=e;else if(c.sType!=e&&c.sType!="html")c.sType="string"}}}a.aiDisplayMaster.push(b);a.oFeatures.bDeferRender||ua(a,b);return b}function ea(a){var b,c,d,e,f,g,j;if(a.bDeferLoading||a.sAjaxSource===null)for(b=a.nTBody.firstChild;b;){if(b.nodeName.toUpperCase()=="TR"){c=a.aoData.length;b._DT_RowIndex=c;a.aoData.push(i.extend(true,{},l.models.oRow,{nTr:b}));a.aiDisplayMaster.push(c);f=b.firstChild;for(d=0;f;){g=f.nodeName.toUpperCase();
-if(g=="TD"||g=="TH"){S(a,c,d,i.trim(f.innerHTML));d++}f=f.nextSibling}}b=b.nextSibling}e=fa(a);d=[];b=0;for(c=e.length;b<c;b++)for(f=e[b].firstChild;f;){g=f.nodeName.toUpperCase();if(g=="TD"||g=="TH")d.push(f);f=f.nextSibling}c=0;for(e=a.aoColumns.length;c<e;c++){j=a.aoColumns[c];if(j.sTitle===null)j.sTitle=j.nTh.innerHTML;var k=j._bAutoType,m=typeof j.fnRender==="function",u=j.sClass!==null,x=j.bVisible,y,B;if(k||m||u||!x){g=0;for(b=a.aoData.length;g<b;g++){f=a.aoData[g];y=d[g*e+c];if(k&&j.sType!=
-"string"){B=F(a,g,c,"type");if(B!==""){B=G(B);if(j.sType===null)j.sType=B;else if(j.sType!=B&&j.sType!="html")j.sType="string"}}if(j.mRender)y.innerHTML=F(a,g,c,"display");else if(j.mData!==c)y.innerHTML=F(a,g,c,"display");if(m){B=da(a,g,c);y.innerHTML=B;j.bUseRendered&&S(a,g,c,B)}if(u)y.className+=" "+j.sClass;if(x)f._anHidden[c]=null;else{f._anHidden[c]=y;y.parentNode.removeChild(y)}j.fnCreatedCell&&j.fnCreatedCell.call(a.oInstance,y,F(a,g,c,"display"),f._aData,g,c)}}}if(a.aoRowCreatedCallback.length!==
-0){b=0;for(c=a.aoData.length;b<c;b++){f=a.aoData[b];K(a,"aoRowCreatedCallback",null,[f.nTr,f._aData,b])}}}function V(a,b){return b._DT_RowIndex!==p?b._DT_RowIndex:null}function va(a,b,c){b=W(a,b);var d=0;for(a=a.aoColumns.length;d<a;d++)if(b[d]===c)return d;return-1}function na(a,b,c,d){for(var e=[],f=0,g=d.length;f<g;f++)e.push(F(a,b,d[f],c));return e}function F(a,b,c,d){var e=a.aoColumns[c];if((c=e.fnGetData(a.aoData[b]._aData,d))===p){if(a.iDrawError!=a.iDraw&&e.sDefaultContent===null){O(a,0,"Requested unknown parameter "+
-(typeof e.mData=="function"?"{mData function}":"'"+e.mData+"'")+" from the data source for row "+b);a.iDrawError=a.iDraw}return e.sDefaultContent}if(c===null&&e.sDefaultContent!==null)c=e.sDefaultContent;else if(typeof c==="function")return c();if(d=="display"&&c===null)return"";return c}function S(a,b,c,d){a.aoColumns[c].fnSetData(a.aoData[b]._aData,d)}function ca(a){if(a===null)return function(){return null};else if(typeof a==="function")return function(c,d,e){return a(c,d,e)};else if(typeof a===
-"string"&&(a.indexOf(".")!==-1||a.indexOf("[")!==-1)){var b=function(c,d,e){var f=e.split("."),g;if(e!==""){var j=0;for(g=f.length;j<g;j++){if(e=f[j].match(ga)){f[j]=f[j].replace(ga,"");if(f[j]!=="")c=c[f[j]];g=[];f.splice(0,j+1);f=f.join(".");j=0;for(var k=c.length;j<k;j++)g.push(b(c[j],d,f));c=e[0].substring(1,e[0].length-1);c=c===""?g:g.join(c);break}if(c===null||c[f[j]]===p)return p;c=c[f[j]]}}return c};return function(c,d){return b(c,d,a)}}else return function(c){return c[a]}}function Ja(a){if(a===
-null)return function(){};else if(typeof a==="function")return function(c,d){a(c,"set",d)};else if(typeof a==="string"&&(a.indexOf(".")!==-1||a.indexOf("[")!==-1)){var b=function(c,d,e){e=e.split(".");var f,g,j=0;for(g=e.length-1;j<g;j++){if(f=e[j].match(ga)){e[j]=e[j].replace(ga,"");c[e[j]]=[];f=e.slice();f.splice(0,j+1);g=f.join(".");for(var k=0,m=d.length;k<m;k++){f={};b(f,d[k],g);c[e[j]].push(f)}return}if(c[e[j]]===null||c[e[j]]===p)c[e[j]]={};c=c[e[j]]}c[e[e.length-1].replace(ga,"")]=d};return function(c,
-d){return b(c,d,a)}}else return function(c,d){c[a]=d}}function oa(a){for(var b=[],c=a.aoData.length,d=0;d<c;d++)b.push(a.aoData[d]._aData);return b}function wa(a){a.aoData.splice(0,a.aoData.length);a.aiDisplayMaster.splice(0,a.aiDisplayMaster.length);a.aiDisplay.splice(0,a.aiDisplay.length);I(a)}function xa(a,b){for(var c=-1,d=0,e=a.length;d<e;d++)if(a[d]==b)c=d;else a[d]>b&&a[d]--;c!=-1&&a.splice(c,1)}function da(a,b,c){var d=a.aoColumns[c];return d.fnRender({iDataRow:b,iDataColumn:c,oSettings:a,
-aData:a.aoData[b]._aData,mDataProp:d.mData},F(a,b,c,"display"))}function ua(a,b){var c=a.aoData[b],d;if(c.nTr===null){c.nTr=s.createElement("tr");c.nTr._DT_RowIndex=b;if(c._aData.DT_RowId)c.nTr.id=c._aData.DT_RowId;if(c._aData.DT_RowClass)c.nTr.className=c._aData.DT_RowClass;for(var e=0,f=a.aoColumns.length;e<f;e++){var g=a.aoColumns[e];d=s.createElement(g.sCellType);d.innerHTML=typeof g.fnRender==="function"&&(!g.bUseRendered||g.mData===null)?da(a,b,e):F(a,b,e,"display");if(g.sClass!==null)d.className=
-g.sClass;if(g.bVisible){c.nTr.appendChild(d);c._anHidden[e]=null}else c._anHidden[e]=d;g.fnCreatedCell&&g.fnCreatedCell.call(a.oInstance,d,F(a,b,e,"display"),c._aData,b,e)}K(a,"aoRowCreatedCallback",null,[c.nTr,c._aData,b])}}function Ka(a){var b,c,d;if(i("th, td",a.nTHead).length!==0){b=0;for(d=a.aoColumns.length;b<d;b++){c=a.aoColumns[b].nTh;c.setAttribute("role","columnheader");if(a.aoColumns[b].bSortable){c.setAttribute("tabindex",a.iTabIndex);c.setAttribute("aria-controls",a.sTableId)}a.aoColumns[b].sClass!==
-null&&i(c).addClass(a.aoColumns[b].sClass);if(a.aoColumns[b].sTitle!=c.innerHTML)c.innerHTML=a.aoColumns[b].sTitle}}else{var e=s.createElement("tr");b=0;for(d=a.aoColumns.length;b<d;b++){c=a.aoColumns[b].nTh;c.innerHTML=a.aoColumns[b].sTitle;c.setAttribute("tabindex","0");a.aoColumns[b].sClass!==null&&i(c).addClass(a.aoColumns[b].sClass);e.appendChild(c)}i(a.nTHead).html("")[0].appendChild(e);ha(a.aoHeader,a.nTHead)}i(a.nTHead).children("tr").attr("role","row");if(a.bJUI){b=0;for(d=a.aoColumns.length;b<
-d;b++){c=a.aoColumns[b].nTh;e=s.createElement("div");e.className=a.oClasses.sSortJUIWrapper;i(c).contents().appendTo(e);var f=s.createElement("span");f.className=a.oClasses.sSortIcon;e.appendChild(f);c.appendChild(e)}}if(a.oFeatures.bSort)for(b=0;b<a.aoColumns.length;b++)a.aoColumns[b].bSortable!==false?ya(a,a.aoColumns[b].nTh,b):i(a.aoColumns[b].nTh).addClass(a.oClasses.sSortableNone);a.oClasses.sFooterTH!==""&&i(a.nTFoot).children("tr").children("th").addClass(a.oClasses.sFooterTH);if(a.nTFoot!==
-null){c=Z(a,null,a.aoFooter);b=0;for(d=a.aoColumns.length;b<d;b++)if(c[b]){a.aoColumns[b].nTf=c[b];a.aoColumns[b].sClass&&i(c[b]).addClass(a.aoColumns[b].sClass)}}}function ia(a,b,c){var d,e,f,g=[],j=[],k=a.aoColumns.length,m;if(c===p)c=false;d=0;for(e=b.length;d<e;d++){g[d]=b[d].slice();g[d].nTr=b[d].nTr;for(f=k-1;f>=0;f--)!a.aoColumns[f].bVisible&&!c&&g[d].splice(f,1);j.push([])}d=0;for(e=g.length;d<e;d++){if(a=g[d].nTr)for(;f=a.firstChild;)a.removeChild(f);f=0;for(b=g[d].length;f<b;f++){m=k=1;
-if(j[d][f]===p){a.appendChild(g[d][f].cell);for(j[d][f]=1;g[d+k]!==p&&g[d][f].cell==g[d+k][f].cell;){j[d+k][f]=1;k++}for(;g[d][f+m]!==p&&g[d][f].cell==g[d][f+m].cell;){for(c=0;c<k;c++)j[d+c][f+m]=1;m++}g[d][f].cell.rowSpan=k;g[d][f].cell.colSpan=m}}}}function H(a){var b=K(a,"aoPreDrawCallback","preDraw",[a]);if(i.inArray(false,b)!==-1)P(a,false);else{var c,d;b=[];var e=0,f=a.asStripeClasses.length;c=a.aoOpenRows.length;a.bDrawing=true;if(a.iInitDisplayStart!==p&&a.iInitDisplayStart!=-1){a._iDisplayStart=
-a.oFeatures.bServerSide?a.iInitDisplayStart:a.iInitDisplayStart>=a.fnRecordsDisplay()?0:a.iInitDisplayStart;a.iInitDisplayStart=-1;I(a)}if(a.bDeferLoading){a.bDeferLoading=false;a.iDraw++}else if(a.oFeatures.bServerSide){if(!a.bDestroying&&!La(a))return}else a.iDraw++;if(a.aiDisplay.length!==0){var g=a._iDisplayStart;d=a._iDisplayEnd;if(a.oFeatures.bServerSide){g=0;d=a.aoData.length}for(g=g;g<d;g++){var j=a.aoData[a.aiDisplay[g]];j.nTr===null&&ua(a,a.aiDisplay[g]);var k=j.nTr;if(f!==0){var m=a.asStripeClasses[e%
-f];if(j._sRowStripe!=m){i(k).removeClass(j._sRowStripe).addClass(m);j._sRowStripe=m}}K(a,"aoRowCallback",null,[k,a.aoData[a.aiDisplay[g]]._aData,e,g]);b.push(k);e++;if(c!==0)for(j=0;j<c;j++)if(k==a.aoOpenRows[j].nParent){b.push(a.aoOpenRows[j].nTr);break}}}else{b[0]=s.createElement("tr");if(a.asStripeClasses[0])b[0].className=a.asStripeClasses[0];c=a.oLanguage;f=c.sZeroRecords;if(a.iDraw==1&&a.sAjaxSource!==null&&!a.oFeatures.bServerSide)f=c.sLoadingRecords;else if(c.sEmptyTable&&a.fnRecordsTotal()===
-0)f=c.sEmptyTable;c=s.createElement("td");c.setAttribute("valign","top");c.colSpan=D(a);c.className=a.oClasses.sRowEmpty;c.innerHTML=za(a,f);b[e].appendChild(c)}K(a,"aoHeaderCallback","header",[i(a.nTHead).children("tr")[0],oa(a),a._iDisplayStart,a.fnDisplayEnd(),a.aiDisplay]);K(a,"aoFooterCallback","footer",[i(a.nTFoot).children("tr")[0],oa(a),a._iDisplayStart,a.fnDisplayEnd(),a.aiDisplay]);e=s.createDocumentFragment();c=s.createDocumentFragment();if(a.nTBody){f=a.nTBody.parentNode;c.appendChild(a.nTBody);
-if(!a.oScroll.bInfinite||!a._bInitComplete||a.bSorted||a.bFiltered)for(;c=a.nTBody.firstChild;)a.nTBody.removeChild(c);c=0;for(d=b.length;c<d;c++)e.appendChild(b[c]);a.nTBody.appendChild(e);f!==null&&f.appendChild(a.nTBody)}K(a,"aoDrawCallback","draw",[a]);a.bSorted=false;a.bFiltered=false;a.bDrawing=false;if(a.oFeatures.bServerSide){P(a,false);a._bInitComplete||pa(a)}}}function qa(a){if(a.oFeatures.bSort)$(a,a.oPreviousSearch);else if(a.oFeatures.bFilter)X(a,a.oPreviousSearch);else{I(a);H(a)}}function Ma(a){var b=
-i("<div></div>")[0];a.nTable.parentNode.insertBefore(b,a.nTable);a.nTableWrapper=i('<div id="'+a.sTableId+'_wrapper" class="'+a.oClasses.sWrapper+'" role="grid"></div>')[0];a.nTableReinsertBefore=a.nTable.nextSibling;for(var c=a.nTableWrapper,d=a.sDom.split(""),e,f,g,j,k,m,u,x=0;x<d.length;x++){f=0;g=d[x];if(g=="<"){j=i("<div></div>")[0];k=d[x+1];if(k=="'"||k=='"'){m="";for(u=2;d[x+u]!=k;){m+=d[x+u];u++}if(m=="H")m=a.oClasses.sJUIHeader;else if(m=="F")m=a.oClasses.sJUIFooter;if(m.indexOf(".")!=-1){k=
-m.split(".");j.id=k[0].substr(1,k[0].length-1);j.className=k[1]}else if(m.charAt(0)=="#")j.id=m.substr(1,m.length-1);else j.className=m;x+=u}c.appendChild(j);c=j}else if(g==">")c=c.parentNode;else if(g=="l"&&a.oFeatures.bPaginate&&a.oFeatures.bLengthChange){e=Na(a);f=1}else if(g=="f"&&a.oFeatures.bFilter){e=Oa(a);f=1}else if(g=="r"&&a.oFeatures.bProcessing){e=Pa(a);f=1}else if(g=="t"){e=Qa(a);f=1}else if(g=="i"&&a.oFeatures.bInfo){e=Ra(a);f=1}else if(g=="p"&&a.oFeatures.bPaginate){e=Sa(a);f=1}else if(l.ext.aoFeatures.length!==
-0){j=l.ext.aoFeatures;u=0;for(k=j.length;u<k;u++)if(g==j[u].cFeature){if(e=j[u].fnInit(a))f=1;break}}if(f==1&&e!==null){if(typeof a.aanFeatures[g]!=="object")a.aanFeatures[g]=[];a.aanFeatures[g].push(e);c.appendChild(e)}}b.parentNode.replaceChild(a.nTableWrapper,b)}function ha(a,b){b=i(b).children("tr");var c,d,e,f,g,j,k,m,u,x,y=function(B,T,M){for(B=B[T];B[M];)M++;return M};a.splice(0,a.length);e=0;for(j=b.length;e<j;e++)a.push([]);e=0;for(j=b.length;e<j;e++){c=b[e];for(d=c.firstChild;d;){if(d.nodeName.toUpperCase()==
-"TD"||d.nodeName.toUpperCase()=="TH"){m=d.getAttribute("colspan")*1;u=d.getAttribute("rowspan")*1;m=!m||m===0||m===1?1:m;u=!u||u===0||u===1?1:u;k=y(a,e,0);x=m===1?true:false;for(g=0;g<m;g++)for(f=0;f<u;f++){a[e+f][k+g]={cell:d,unique:x};a[e+f].nTr=c}}d=d.nextSibling}}}function Z(a,b,c){var d=[];if(!c){c=a.aoHeader;if(b){c=[];ha(c,b)}}b=0;for(var e=c.length;b<e;b++)for(var f=0,g=c[b].length;f<g;f++)if(c[b][f].unique&&(!d[f]||!a.bSortCellsTop))d[f]=c[b][f].cell;return d}function La(a){if(a.bAjaxDataGet){a.iDraw++;
-P(a,true);var b=Ta(a);Aa(a,b);a.fnServerData.call(a.oInstance,a.sAjaxSource,b,function(c){Ua(a,c)},a);return false}else return true}function Ta(a){var b=a.aoColumns.length,c=[],d,e,f,g;c.push({name:"sEcho",value:a.iDraw});c.push({name:"iColumns",value:b});c.push({name:"sColumns",value:Y(a)});c.push({name:"iDisplayStart",value:a._iDisplayStart});c.push({name:"iDisplayLength",value:a.oFeatures.bPaginate!==false?a._iDisplayLength:-1});for(f=0;f<b;f++){d=a.aoColumns[f].mData;c.push({name:"mDataProp_"+
-f,value:typeof d==="function"?"function":d})}if(a.oFeatures.bFilter!==false){c.push({name:"sSearch",value:a.oPreviousSearch.sSearch});c.push({name:"bRegex",value:a.oPreviousSearch.bRegex});for(f=0;f<b;f++){c.push({name:"sSearch_"+f,value:a.aoPreSearchCols[f].sSearch});c.push({name:"bRegex_"+f,value:a.aoPreSearchCols[f].bRegex});c.push({name:"bSearchable_"+f,value:a.aoColumns[f].bSearchable})}}if(a.oFeatures.bSort!==false){var j=0;d=a.aaSortingFixed!==null?a.aaSortingFixed.concat(a.aaSorting):a.aaSorting.slice();
-for(f=0;f<d.length;f++){e=a.aoColumns[d[f][0]].aDataSort;for(g=0;g<e.length;g++){c.push({name:"iSortCol_"+j,value:e[g]});c.push({name:"sSortDir_"+j,value:d[f][1]});j++}}c.push({name:"iSortingCols",value:j});for(f=0;f<b;f++)c.push({name:"bSortable_"+f,value:a.aoColumns[f].bSortable})}return c}function Aa(a,b){K(a,"aoServerParams","serverParams",[b])}function Ua(a,b){if(b.sEcho!==p)if(b.sEcho*1<a.iDraw)return;else a.iDraw=b.sEcho*1;if(!a.oScroll.bInfinite||a.oScroll.bInfinite&&(a.bSorted||a.bFiltered))wa(a);
-a._iRecordsTotal=parseInt(b.iTotalRecords,10);a._iRecordsDisplay=parseInt(b.iTotalDisplayRecords,10);var c=Y(a);c=b.sColumns!==p&&c!==""&&b.sColumns!=c;var d;if(c)d=E(a,b.sColumns);b=ca(a.sAjaxDataProp)(b);for(var e=0,f=b.length;e<f;e++)if(c){for(var g=[],j=0,k=a.aoColumns.length;j<k;j++)g.push(b[e][d[j]]);R(a,g)}else R(a,b[e]);a.aiDisplay=a.aiDisplayMaster.slice();a.bAjaxDataGet=false;H(a);a.bAjaxDataGet=true;P(a,false)}function Oa(a){var b=a.oPreviousSearch,c=a.oLanguage.sSearch;c=c.indexOf("_INPUT_")!==
--1?c.replace("_INPUT_",'<input type="text" />'):c===""?'<input type="text" />':c+' <input type="text" />';var d=s.createElement("div");d.className=a.oClasses.sFilter;d.innerHTML="<label>"+c+"</label>";if(!a.aanFeatures.f)d.id=a.sTableId+"_filter";c=i('input[type="text"]',d);d._DT_Input=c[0];c.val(b.sSearch.replace('"',"&quot;"));c.bind("keyup.DT",function(){for(var e=a.aanFeatures.f,f=this.value===""?"":this.value,g=0,j=e.length;g<j;g++)e[g]!=i(this).parents("div.dataTables_filter")[0]&&i(e[g]._DT_Input).val(f);
-f!=b.sSearch&&X(a,{sSearch:f,bRegex:b.bRegex,bSmart:b.bSmart,bCaseInsensitive:b.bCaseInsensitive})});c.attr("aria-controls",a.sTableId).bind("keypress.DT",function(e){if(e.keyCode==13)return false});return d}function X(a,b,c){var d=a.oPreviousSearch,e=a.aoPreSearchCols,f=function(g){d.sSearch=g.sSearch;d.bRegex=g.bRegex;d.bSmart=g.bSmart;d.bCaseInsensitive=g.bCaseInsensitive};if(a.oFeatures.bServerSide)f(b);else{Va(a,b.sSearch,c,b.bRegex,b.bSmart,b.bCaseInsensitive);f(b);for(b=0;b<a.aoPreSearchCols.length;b++)Wa(a,
-e[b].sSearch,b,e[b].bRegex,e[b].bSmart,e[b].bCaseInsensitive);Xa(a)}a.bFiltered=true;i(a.oInstance).trigger("filter",a);a._iDisplayStart=0;I(a);H(a);Ba(a,0)}function Xa(a){for(var b=l.ext.afnFiltering,c=A(a,"bSearchable"),d=0,e=b.length;d<e;d++)for(var f=0,g=0,j=a.aiDisplay.length;g<j;g++){var k=a.aiDisplay[g-f];if(!b[d](a,na(a,k,"filter",c),k)){a.aiDisplay.splice(g-f,1);f++}}}function Wa(a,b,c,d,e,f){if(b!==""){var g=0;b=Ca(b,d,e,f);for(d=a.aiDisplay.length-1;d>=0;d--){e=Ya(F(a,a.aiDisplay[d],c,
-"filter"),a.aoColumns[c].sType);if(!b.test(e)){a.aiDisplay.splice(d,1);g++}}}}function Va(a,b,c,d,e,f){d=Ca(b,d,e,f);e=a.oPreviousSearch;c||(c=0);if(l.ext.afnFiltering.length!==0)c=1;if(b.length<=0){a.aiDisplay.splice(0,a.aiDisplay.length);a.aiDisplay=a.aiDisplayMaster.slice()}else if(a.aiDisplay.length==a.aiDisplayMaster.length||e.sSearch.length>b.length||c==1||b.indexOf(e.sSearch)!==0){a.aiDisplay.splice(0,a.aiDisplay.length);Ba(a,1);for(b=0;b<a.aiDisplayMaster.length;b++)d.test(a.asDataSearch[b])&&
-a.aiDisplay.push(a.aiDisplayMaster[b])}else for(b=c=0;b<a.asDataSearch.length;b++)if(!d.test(a.asDataSearch[b])){a.aiDisplay.splice(b-c,1);c++}}function Ba(a,b){if(!a.oFeatures.bServerSide){a.asDataSearch=[];var c=A(a,"bSearchable");b=b===1?a.aiDisplayMaster:a.aiDisplay;for(var d=0,e=b.length;d<e;d++)a.asDataSearch[d]=Da(a,na(a,b[d],"filter",c))}}function Da(a,b){a=b.join("  ");if(a.indexOf("&")!==-1)a=i("<div>").html(a).text();return a.replace(/[\n\r]/g," ")}function Ca(a,b,c,d){if(c){a=b?a.split(" "):
-Ea(a).split(" ");a="^(?=.*?"+a.join(")(?=.*?")+").*$";return new RegExp(a,d?"i":"")}else{a=b?a:Ea(a);return new RegExp(a,d?"i":"")}}function Ya(a,b){if(typeof l.ext.ofnSearch[b]==="function")return l.ext.ofnSearch[b](a);else if(a===null)return"";else if(b=="html")return a.replace(/[\r\n]/g," ").replace(/<.*?>/g,"");else if(typeof a==="string")return a.replace(/[\r\n]/g," ");return a}function Ea(a){return a.replace(new RegExp("(\\/|\\.|\\*|\\+|\\?|\\||\\(|\\)|\\[|\\]|\\{|\\}|\\\\|\\$|\\^|\\-)","g"),
-"\\$1")}function Ra(a){var b=s.createElement("div");b.className=a.oClasses.sInfo;if(!a.aanFeatures.i){a.aoDrawCallback.push({fn:Za,sName:"information"});b.id=a.sTableId+"_info"}a.nTable.setAttribute("aria-describedby",a.sTableId+"_info");return b}function Za(a){if(!(!a.oFeatures.bInfo||a.aanFeatures.i.length===0)){var b=a.oLanguage,c=a._iDisplayStart+1,d=a.fnDisplayEnd(),e=a.fnRecordsTotal(),f=a.fnRecordsDisplay(),g;g=f===0?b.sInfoEmpty:b.sInfo;if(f!=e)g+=" "+b.sInfoFiltered;g+=b.sInfoPostFix;g=za(a,
-g);if(b.fnInfoCallback!==null)g=b.fnInfoCallback.call(a.oInstance,a,c,d,e,f,g);a=a.aanFeatures.i;b=0;for(c=a.length;b<c;b++)i(a[b]).html(g)}}function za(a,b){var c=a.fnFormatNumber(a._iDisplayStart+1),d=a.fnDisplayEnd();d=a.fnFormatNumber(d);var e=a.fnRecordsDisplay();e=a.fnFormatNumber(e);var f=a.fnRecordsTotal();f=a.fnFormatNumber(f);if(a.oScroll.bInfinite)c=a.fnFormatNumber(1);return b.replace(/_START_/g,c).replace(/_END_/g,d).replace(/_TOTAL_/g,e).replace(/_MAX_/g,f)}function ra(a){var b,c,d=
-a.iInitDisplayStart;if(a.bInitialised===false)setTimeout(function(){ra(a)},200);else{Ma(a);Ka(a);ia(a,a.aoHeader);a.nTFoot&&ia(a,a.aoFooter);P(a,true);a.oFeatures.bAutoWidth&&ta(a);b=0;for(c=a.aoColumns.length;b<c;b++)if(a.aoColumns[b].sWidth!==null)a.aoColumns[b].nTh.style.width=t(a.aoColumns[b].sWidth);if(a.oFeatures.bSort)$(a);else if(a.oFeatures.bFilter)X(a,a.oPreviousSearch);else{a.aiDisplay=a.aiDisplayMaster.slice();I(a);H(a)}if(a.sAjaxSource!==null&&!a.oFeatures.bServerSide){c=[];Aa(a,c);a.fnServerData.call(a.oInstance,
-a.sAjaxSource,c,function(e){var f=a.sAjaxDataProp!==""?ca(a.sAjaxDataProp)(e):e;for(b=0;b<f.length;b++)R(a,f[b]);a.iInitDisplayStart=d;if(a.oFeatures.bSort)$(a);else{a.aiDisplay=a.aiDisplayMaster.slice();I(a);H(a)}P(a,false);pa(a,e)},a)}else if(!a.oFeatures.bServerSide){P(a,false);pa(a)}}}function pa(a,b){a._bInitComplete=true;K(a,"aoInitComplete","init",[a,b])}function Fa(a){var b=l.defaults.oLanguage;!a.sEmptyTable&&a.sZeroRecords&&b.sEmptyTable==="No data available in table"&&r(a,a,"sZeroRecords",
-"sEmptyTable");!a.sLoadingRecords&&a.sZeroRecords&&b.sLoadingRecords==="Loading..."&&r(a,a,"sZeroRecords","sLoadingRecords")}function Na(a){if(a.oScroll.bInfinite)return null;var b='<select size="1" '+('name="'+a.sTableId+'_length"')+">",c,d,e=a.aLengthMenu;if(e.length==2&&typeof e[0]==="object"&&typeof e[1]==="object"){c=0;for(d=e[0].length;c<d;c++)b+='<option value="'+e[0][c]+'">'+e[1][c]+"</option>"}else{c=0;for(d=e.length;c<d;c++)b+='<option value="'+e[c]+'">'+e[c]+"</option>"}b+="</select>";
-e=s.createElement("div");if(!a.aanFeatures.l)e.id=a.sTableId+"_length";e.className=a.oClasses.sLength;e.innerHTML="<label>"+a.oLanguage.sLengthMenu.replace("_MENU_",b)+"</label>";i('select option[value="'+a._iDisplayLength+'"]',e).attr("selected",true);i("select",e).bind("change.DT",function(){var f=i(this).val(),g=a.aanFeatures.l;c=0;for(d=g.length;c<d;c++)g[c]!=this.parentNode&&i("select",g[c]).val(f);a._iDisplayLength=parseInt(f,10);I(a);if(a.fnDisplayEnd()==a.fnRecordsDisplay()){a._iDisplayStart=
-a.fnDisplayEnd()-a._iDisplayLength;if(a._iDisplayStart<0)a._iDisplayStart=0}if(a._iDisplayLength==-1)a._iDisplayStart=0;H(a)});i("select",e).attr("aria-controls",a.sTableId);return e}function I(a){a._iDisplayEnd=a.oFeatures.bPaginate===false?a.aiDisplay.length:a._iDisplayStart+a._iDisplayLength>a.aiDisplay.length||a._iDisplayLength==-1?a.aiDisplay.length:a._iDisplayStart+a._iDisplayLength}function Sa(a){if(a.oScroll.bInfinite)return null;var b=s.createElement("div");b.className=a.oClasses.sPaging+
-a.sPaginationType;l.ext.oPagination[a.sPaginationType].fnInit(a,b,function(c){I(c);H(c)});a.aanFeatures.p||a.aoDrawCallback.push({fn:function(c){l.ext.oPagination[c.sPaginationType].fnUpdate(c,function(d){I(d);H(d)})},sName:"pagination"});return b}function Ga(a,b){var c=a._iDisplayStart;if(typeof b==="number"){a._iDisplayStart=b*a._iDisplayLength;if(a._iDisplayStart>a.fnRecordsDisplay())a._iDisplayStart=0}else if(b=="first")a._iDisplayStart=0;else if(b=="previous"){a._iDisplayStart=a._iDisplayLength>=
-0?a._iDisplayStart-a._iDisplayLength:0;if(a._iDisplayStart<0)a._iDisplayStart=0}else if(b=="next")if(a._iDisplayLength>=0){if(a._iDisplayStart+a._iDisplayLength<a.fnRecordsDisplay())a._iDisplayStart+=a._iDisplayLength}else a._iDisplayStart=0;else if(b=="last")if(a._iDisplayLength>=0){b=parseInt((a.fnRecordsDisplay()-1)/a._iDisplayLength,10)+1;a._iDisplayStart=(b-1)*a._iDisplayLength}else a._iDisplayStart=0;else O(a,0,"Unknown paging action: "+b);i(a.oInstance).trigger("page",a);return c!=a._iDisplayStart}
-function Pa(a){var b=s.createElement("div");if(!a.aanFeatures.r)b.id=a.sTableId+"_processing";b.innerHTML=a.oLanguage.sProcessing;b.className=a.oClasses.sProcessing;a.nTable.parentNode.insertBefore(b,a.nTable);return b}function P(a,b){if(a.oFeatures.bProcessing)for(var c=a.aanFeatures.r,d=0,e=c.length;d<e;d++)c[d].style.visibility=b?"visible":"hidden";i(a.oInstance).trigger("processing",[a,b])}function Qa(a){if(a.oScroll.sX===""&&a.oScroll.sY==="")return a.nTable;var b=s.createElement("div"),c=s.createElement("div"),
-d=s.createElement("div"),e=s.createElement("div"),f=s.createElement("div"),g=s.createElement("div"),j=a.nTable.cloneNode(false),k=a.nTable.cloneNode(false),m=a.nTable.getElementsByTagName("thead")[0],u=a.nTable.getElementsByTagName("tfoot").length===0?null:a.nTable.getElementsByTagName("tfoot")[0],x=a.oClasses;c.appendChild(d);f.appendChild(g);e.appendChild(a.nTable);b.appendChild(c);b.appendChild(e);d.appendChild(j);j.appendChild(m);if(u!==null){b.appendChild(f);g.appendChild(k);k.appendChild(u)}b.className=
-x.sScrollWrapper;c.className=x.sScrollHead;d.className=x.sScrollHeadInner;e.className=x.sScrollBody;f.className=x.sScrollFoot;g.className=x.sScrollFootInner;if(a.oScroll.bAutoCss){c.style.overflow="hidden";c.style.position="relative";f.style.overflow="hidden";e.style.overflow="auto"}c.style.border="0";c.style.width="100%";f.style.border="0";d.style.width=a.oScroll.sXInner!==""?a.oScroll.sXInner:"100%";j.removeAttribute("id");j.style.marginLeft="0";a.nTable.style.marginLeft="0";if(u!==null){k.removeAttribute("id");
-k.style.marginLeft="0"}d=i(a.nTable).children("caption");if(d.length>0){d=d[0];if(d._captionSide==="top")j.appendChild(d);else d._captionSide==="bottom"&&u&&k.appendChild(d)}if(a.oScroll.sX!==""){c.style.width=t(a.oScroll.sX);e.style.width=t(a.oScroll.sX);if(u!==null)f.style.width=t(a.oScroll.sX);i(e).scroll(function(){c.scrollLeft=this.scrollLeft;if(u!==null)f.scrollLeft=this.scrollLeft})}if(a.oScroll.sY!=="")e.style.height=t(a.oScroll.sY);a.aoDrawCallback.push({fn:$a,sName:"scrolling"});a.oScroll.bInfinite&&
-i(e).scroll(function(){if(!a.bDrawing&&i(this).scrollTop()!==0)if(i(this).scrollTop()+i(this).height()>i(a.nTable).height()-a.oScroll.iLoadGap)if(a.fnDisplayEnd()<a.fnRecordsDisplay()){Ga(a,"next");I(a);H(a)}});a.nScrollHead=c;a.nScrollFoot=f;return b}function $a(a){var b=a.nScrollHead.getElementsByTagName("div")[0],c=b.getElementsByTagName("table")[0],d=a.nTable.parentNode,e,f,g,j,k,m,u,x,y=[],B=[],T=a.nTFoot!==null?a.nScrollFoot.getElementsByTagName("div")[0]:null,M=a.nTFoot!==null?T.getElementsByTagName("table")[0]:
-null,L=a.oBrowser.bScrollOversize,ja=function(z){u=z.style;u.paddingTop="0";u.paddingBottom="0";u.borderTopWidth="0";u.borderBottomWidth="0";u.height=0};i(a.nTable).children("thead, tfoot").remove();e=i(a.nTHead).clone()[0];a.nTable.insertBefore(e,a.nTable.childNodes[0]);g=a.nTHead.getElementsByTagName("tr");j=e.getElementsByTagName("tr");if(a.nTFoot!==null){k=i(a.nTFoot).clone()[0];a.nTable.insertBefore(k,a.nTable.childNodes[1]);m=a.nTFoot.getElementsByTagName("tr");k=k.getElementsByTagName("tr")}if(a.oScroll.sX===
-""){d.style.width="100%";b.parentNode.style.width="100%"}var U=Z(a,e);e=0;for(f=U.length;e<f;e++){x=v(a,e);U[e].style.width=a.aoColumns[x].sWidth}a.nTFoot!==null&&N(function(z){z.style.width=""},k);if(a.oScroll.bCollapse&&a.oScroll.sY!=="")d.style.height=d.offsetHeight+a.nTHead.offsetHeight+"px";e=i(a.nTable).outerWidth();if(a.oScroll.sX===""){a.nTable.style.width="100%";if(L&&(i("tbody",d).height()>d.offsetHeight||i(d).css("overflow-y")=="scroll"))a.nTable.style.width=t(i(a.nTable).outerWidth()-
-a.oScroll.iBarWidth)}else if(a.oScroll.sXInner!=="")a.nTable.style.width=t(a.oScroll.sXInner);else if(e==i(d).width()&&i(d).height()<i(a.nTable).height()){a.nTable.style.width=t(e-a.oScroll.iBarWidth);if(i(a.nTable).outerWidth()>e-a.oScroll.iBarWidth)a.nTable.style.width=t(e)}else a.nTable.style.width=t(e);e=i(a.nTable).outerWidth();N(ja,j);N(function(z){y.push(t(i(z).width()))},j);N(function(z,Q){z.style.width=y[Q]},g);i(j).height(0);if(a.nTFoot!==null){N(ja,k);N(function(z){B.push(t(i(z).width()))},
-k);N(function(z,Q){z.style.width=B[Q]},m);i(k).height(0)}N(function(z,Q){z.innerHTML="";z.style.width=y[Q]},j);a.nTFoot!==null&&N(function(z,Q){z.innerHTML="";z.style.width=B[Q]},k);if(i(a.nTable).outerWidth()<e){g=d.scrollHeight>d.offsetHeight||i(d).css("overflow-y")=="scroll"?e+a.oScroll.iBarWidth:e;if(L&&(d.scrollHeight>d.offsetHeight||i(d).css("overflow-y")=="scroll"))a.nTable.style.width=t(g-a.oScroll.iBarWidth);d.style.width=t(g);a.nScrollHead.style.width=t(g);if(a.nTFoot!==null)a.nScrollFoot.style.width=
-t(g);if(a.oScroll.sX==="")O(a,1,"The table cannot fit into the current element which will cause column misalignment. The table has been drawn at its minimum possible width.");else a.oScroll.sXInner!==""&&O(a,1,"The table cannot fit into the current element which will cause column misalignment. Increase the sScrollXInner value or remove it to allow automatic calculation")}else{d.style.width=t("100%");a.nScrollHead.style.width=t("100%");if(a.nTFoot!==null)a.nScrollFoot.style.width=t("100%")}if(a.oScroll.sY===
-"")if(L)d.style.height=t(a.nTable.offsetHeight+a.oScroll.iBarWidth);if(a.oScroll.sY!==""&&a.oScroll.bCollapse){d.style.height=t(a.oScroll.sY);L=a.oScroll.sX!==""&&a.nTable.offsetWidth>d.offsetWidth?a.oScroll.iBarWidth:0;if(a.nTable.offsetHeight<d.offsetHeight)d.style.height=t(a.nTable.offsetHeight+L)}L=i(a.nTable).outerWidth();c.style.width=t(L);b.style.width=t(L);c=i(a.nTable).height()>d.clientHeight||i(d).css("overflow-y")=="scroll";b.style.paddingRight=c?a.oScroll.iBarWidth+"px":"0px";if(a.nTFoot!==
-null){M.style.width=t(L);T.style.width=t(L);T.style.paddingRight=c?a.oScroll.iBarWidth+"px":"0px"}i(d).scroll();if(a.bSorted||a.bFiltered)d.scrollTop=0}function N(a,b,c){for(var d=0,e=0,f=b.length,g,j;e<f;){g=b[e].firstChild;for(j=c?c[e].firstChild:null;g;){if(g.nodeType===1){c?a(g,j,d):a(g,d);d++}g=g.nextSibling;j=c?j.nextSibling:null}e++}}function ab(a,b){if(!a||a===null||a==="")return 0;if(!b)b=s.body;var c=s.createElement("div");c.style.width=t(a);b.appendChild(c);a=c.offsetWidth;b.removeChild(c);
-return a}function ta(a){var b=0,c,d=0,e=a.aoColumns.length,f,g,j=i("th",a.nTHead),k=a.nTable.getAttribute("width");g=a.nTable.parentNode;for(f=0;f<e;f++)if(a.aoColumns[f].bVisible){d++;if(a.aoColumns[f].sWidth!==null){c=ab(a.aoColumns[f].sWidthOrig,g);if(c!==null)a.aoColumns[f].sWidth=t(c);b++}}if(e==j.length&&b===0&&d==e&&a.oScroll.sX===""&&a.oScroll.sY==="")for(f=0;f<a.aoColumns.length;f++){c=i(j[f]).width();if(c!==null)a.aoColumns[f].sWidth=t(c)}else{b=a.nTable.cloneNode(false);f=a.nTHead.cloneNode(true);
-d=s.createElement("tbody");c=s.createElement("tr");b.removeAttribute("id");b.appendChild(f);if(a.nTFoot!==null){b.appendChild(a.nTFoot.cloneNode(true));N(function(u){u.style.width=""},b.getElementsByTagName("tr"))}b.appendChild(d);d.appendChild(c);d=i("thead th",b);if(d.length===0)d=i("tbody tr:eq(0)>td",b);j=Z(a,f);for(f=d=0;f<e;f++){var m=a.aoColumns[f];if(m.bVisible&&m.sWidthOrig!==null&&m.sWidthOrig!=="")j[f-d].style.width=t(m.sWidthOrig);else if(m.bVisible)j[f-d].style.width="";else d++}for(f=
-0;f<e;f++)if(a.aoColumns[f].bVisible){d=bb(a,f);if(d!==null){d=d.cloneNode(true);if(a.aoColumns[f].sContentPadding!=="")d.innerHTML+=a.aoColumns[f].sContentPadding;c.appendChild(d)}}g.appendChild(b);if(a.oScroll.sX!==""&&a.oScroll.sXInner!=="")b.style.width=t(a.oScroll.sXInner);else if(a.oScroll.sX!==""){b.style.width="";if(i(b).width()<g.offsetWidth)b.style.width=t(g.offsetWidth)}else if(a.oScroll.sY!=="")b.style.width=t(g.offsetWidth);else if(k)b.style.width=t(k);b.style.visibility="hidden";cb(a,
-b);e=i("tbody tr:eq(0)",b).children();if(e.length===0)e=Z(a,i("thead",b)[0]);if(a.oScroll.sX!==""){for(f=d=g=0;f<a.aoColumns.length;f++)if(a.aoColumns[f].bVisible){g+=a.aoColumns[f].sWidthOrig===null?i(e[d]).outerWidth():parseInt(a.aoColumns[f].sWidth.replace("px",""),10)+(i(e[d]).outerWidth()-i(e[d]).width());d++}b.style.width=t(g);a.nTable.style.width=t(g)}for(f=d=0;f<a.aoColumns.length;f++)if(a.aoColumns[f].bVisible){g=i(e[d]).width();if(g!==null&&g>0)a.aoColumns[f].sWidth=t(g);d++}e=i(b).css("width");
-a.nTable.style.width=e.indexOf("%")!==-1?e:t(i(b).outerWidth());b.parentNode.removeChild(b)}if(k)a.nTable.style.width=t(k)}function cb(a,b){if(a.oScroll.sX===""&&a.oScroll.sY!==""){i(b).width();b.style.width=t(i(b).outerWidth()-a.oScroll.iBarWidth)}else if(a.oScroll.sX!=="")b.style.width=t(i(b).outerWidth())}function bb(a,b){var c=db(a,b);if(c<0)return null;if(a.aoData[c].nTr===null){var d=s.createElement("td");d.innerHTML=F(a,c,b,"");return d}return W(a,c)[b]}function db(a,b){for(var c=-1,d=-1,e=
-0;e<a.aoData.length;e++){var f=F(a,e,b,"display")+"";f=f.replace(/<.*?>/g,"");if(f.length>c){c=f.length;d=e}}return d}function t(a){if(a===null)return"0px";if(typeof a=="number"){if(a<0)return"0px";return a+"px"}var b=a.charCodeAt(a.length-1);if(b<48||b>57)return a;return a+"px"}function eb(){var a=s.createElement("p"),b=a.style;b.width="100%";b.height="200px";b.padding="0px";var c=s.createElement("div");b=c.style;b.position="absolute";b.top="0px";b.left="0px";b.visibility="hidden";b.width="200px";
-b.height="150px";b.padding="0px";b.overflow="hidden";c.appendChild(a);s.body.appendChild(c);b=a.offsetWidth;c.style.overflow="scroll";a=a.offsetWidth;if(b==a)a=c.clientWidth;s.body.removeChild(c);return b-a}function $(a,b){var c,d,e,f,g,j,k=[],m=[],u=l.ext.oSort,x=a.aoData,y=a.aoColumns,B=a.oLanguage.oAria;if(!a.oFeatures.bServerSide&&(a.aaSorting.length!==0||a.aaSortingFixed!==null)){k=a.aaSortingFixed!==null?a.aaSortingFixed.concat(a.aaSorting):a.aaSorting.slice();for(c=0;c<k.length;c++){d=k[c][0];
-e=w(a,d);f=a.aoColumns[d].sSortDataType;if(l.ext.afnSortData[f]){g=l.ext.afnSortData[f].call(a.oInstance,a,d,e);if(g.length===x.length){e=0;for(f=x.length;e<f;e++)S(a,e,d,g[e])}else O(a,0,"Returned data sort array (col "+d+") is the wrong length")}}c=0;for(d=a.aiDisplayMaster.length;c<d;c++)m[a.aiDisplayMaster[c]]=c;var T=k.length,M;c=0;for(d=x.length;c<d;c++)for(e=0;e<T;e++){M=y[k[e][0]].aDataSort;g=0;for(j=M.length;g<j;g++){f=y[M[g]].sType;f=u[(f?f:"string")+"-pre"];x[c]._aSortData[M[g]]=f?f(F(a,
-c,M[g],"sort")):F(a,c,M[g],"sort")}}a.aiDisplayMaster.sort(function(L,ja){var U,z,Q,aa,ka;for(U=0;U<T;U++){ka=y[k[U][0]].aDataSort;z=0;for(Q=ka.length;z<Q;z++){aa=y[ka[z]].sType;aa=u[(aa?aa:"string")+"-"+k[U][1]](x[L]._aSortData[ka[z]],x[ja]._aSortData[ka[z]]);if(aa!==0)return aa}}return u["numeric-asc"](m[L],m[ja])})}if((b===p||b)&&!a.oFeatures.bDeferRender)ba(a);c=0;for(d=a.aoColumns.length;c<d;c++){e=y[c].sTitle.replace(/<.*?>/g,"");b=y[c].nTh;b.removeAttribute("aria-sort");b.removeAttribute("aria-label");
-if(y[c].bSortable)if(k.length>0&&k[0][0]==c){b.setAttribute("aria-sort",k[0][1]=="asc"?"ascending":"descending");b.setAttribute("aria-label",e+((y[c].asSorting[k[0][2]+1]?y[c].asSorting[k[0][2]+1]:y[c].asSorting[0])=="asc"?B.sSortAscending:B.sSortDescending))}else b.setAttribute("aria-label",e+(y[c].asSorting[0]=="asc"?B.sSortAscending:B.sSortDescending));else b.setAttribute("aria-label",e)}a.bSorted=true;i(a.oInstance).trigger("sort",a);if(a.oFeatures.bFilter)X(a,a.oPreviousSearch,1);else{a.aiDisplay=
-a.aiDisplayMaster.slice();a._iDisplayStart=0;I(a);H(a)}}function ya(a,b,c,d){fb(b,{},function(e){if(a.aoColumns[c].bSortable!==false){var f=function(){var g,j;if(e.shiftKey){for(var k=false,m=0;m<a.aaSorting.length;m++)if(a.aaSorting[m][0]==c){k=true;g=a.aaSorting[m][0];j=a.aaSorting[m][2]+1;if(a.aoColumns[g].asSorting[j]){a.aaSorting[m][1]=a.aoColumns[g].asSorting[j];a.aaSorting[m][2]=j}else a.aaSorting.splice(m,1);break}k===false&&a.aaSorting.push([c,a.aoColumns[c].asSorting[0],0])}else if(a.aaSorting.length==
-1&&a.aaSorting[0][0]==c){g=a.aaSorting[0][0];j=a.aaSorting[0][2]+1;a.aoColumns[g].asSorting[j]||(j=0);a.aaSorting[0][1]=a.aoColumns[g].asSorting[j];a.aaSorting[0][2]=j}else{a.aaSorting.splice(0,a.aaSorting.length);a.aaSorting.push([c,a.aoColumns[c].asSorting[0],0])}$(a)};if(a.oFeatures.bProcessing){P(a,true);setTimeout(function(){f();a.oFeatures.bServerSide||P(a,false)},0)}else f();typeof d=="function"&&d(a)}})}function ba(a){var b,c,d,e,f,g=a.aoColumns.length,j=a.oClasses;for(b=0;b<g;b++)a.aoColumns[b].bSortable&&
-i(a.aoColumns[b].nTh).removeClass(j.sSortAsc+" "+j.sSortDesc+" "+a.aoColumns[b].sSortingClass);c=a.aaSortingFixed!==null?a.aaSortingFixed.concat(a.aaSorting):a.aaSorting.slice();for(b=0;b<a.aoColumns.length;b++)if(a.aoColumns[b].bSortable){f=a.aoColumns[b].sSortingClass;e=-1;for(d=0;d<c.length;d++)if(c[d][0]==b){f=c[d][1]=="asc"?j.sSortAsc:j.sSortDesc;e=d;break}i(a.aoColumns[b].nTh).addClass(f);if(a.bJUI){f=i("span."+j.sSortIcon,a.aoColumns[b].nTh);f.removeClass(j.sSortJUIAsc+" "+j.sSortJUIDesc+" "+
-j.sSortJUI+" "+j.sSortJUIAscAllowed+" "+j.sSortJUIDescAllowed);f.addClass(e==-1?a.aoColumns[b].sSortingClassJUI:c[e][1]=="asc"?j.sSortJUIAsc:j.sSortJUIDesc)}}else i(a.aoColumns[b].nTh).addClass(a.aoColumns[b].sSortingClass);f=j.sSortColumn;if(a.oFeatures.bSort&&a.oFeatures.bSortClasses){a=W(a);e=[];for(b=0;b<g;b++)e.push("");b=0;for(d=1;b<c.length;b++){j=parseInt(c[b][0],10);e[j]=f+d;d<3&&d++}f=new RegExp(f+"[123]");var k;b=0;for(c=a.length;b<c;b++){j=b%g;d=a[b].className;k=e[j];j=d.replace(f,k);
-if(j!=d)a[b].className=i.trim(j);else if(k.length>0&&d.indexOf(k)==-1)a[b].className=d+" "+k}}}function Ha(a){if(!(!a.oFeatures.bStateSave||a.bDestroying)){var b,c;b=a.oScroll.bInfinite;var d={iCreate:(new Date).getTime(),iStart:b?0:a._iDisplayStart,iEnd:b?a._iDisplayLength:a._iDisplayEnd,iLength:a._iDisplayLength,aaSorting:i.extend(true,[],a.aaSorting),oSearch:i.extend(true,{},a.oPreviousSearch),aoSearchCols:i.extend(true,[],a.aoPreSearchCols),abVisCols:[]};b=0;for(c=a.aoColumns.length;b<c;b++)d.abVisCols.push(a.aoColumns[b].bVisible);
-K(a,"aoStateSaveParams","stateSaveParams",[a,d]);a.fnStateSave.call(a.oInstance,a,d)}}function gb(a,b){if(a.oFeatures.bStateSave){var c=a.fnStateLoad.call(a.oInstance,a);if(c){var d=K(a,"aoStateLoadParams","stateLoadParams",[a,c]);if(i.inArray(false,d)===-1){a.oLoadedState=i.extend(true,{},c);a._iDisplayStart=c.iStart;a.iInitDisplayStart=c.iStart;a._iDisplayEnd=c.iEnd;a._iDisplayLength=c.iLength;a.aaSorting=c.aaSorting.slice();a.saved_aaSorting=c.aaSorting.slice();i.extend(a.oPreviousSearch,c.oSearch);
-i.extend(true,a.aoPreSearchCols,c.aoSearchCols);b.saved_aoColumns=[];for(d=0;d<c.abVisCols.length;d++){b.saved_aoColumns[d]={};b.saved_aoColumns[d].bVisible=c.abVisCols[d]}K(a,"aoStateLoaded","stateLoaded",[a,c])}}}}function lb(a,b,c,d,e){var f=new Date;f.setTime(f.getTime()+c*1E3);c=la.location.pathname.split("/");a=a+"_"+c.pop().replace(/[\/:]/g,"").toLowerCase();var g;if(e!==null){g=typeof i.parseJSON==="function"?i.parseJSON(b):eval("("+b+")");b=e(a,g,f.toGMTString(),c.join("/")+"/")}else b=a+
-"="+encodeURIComponent(b)+"; expires="+f.toGMTString()+"; path="+c.join("/")+"/";a=s.cookie.split(";");e=b.split(";")[0].length;f=[];if(e+s.cookie.length+10>4096){for(var j=0,k=a.length;j<k;j++)if(a[j].indexOf(d)!=-1){var m=a[j].split("=");try{(g=eval("("+decodeURIComponent(m[1])+")"))&&g.iCreate&&f.push({name:m[0],time:g.iCreate})}catch(u){}}for(f.sort(function(x,y){return y.time-x.time});e+s.cookie.length+10>4096;){if(f.length===0)return;d=f.pop();s.cookie=d.name+"=; expires=Thu, 01-Jan-1970 00:00:01 GMT; path="+
-c.join("/")+"/"}}s.cookie=b}function mb(a){var b=la.location.pathname.split("/");a=a+"_"+b[b.length-1].replace(/[\/:]/g,"").toLowerCase()+"=";b=s.cookie.split(";");for(var c=0;c<b.length;c++){for(var d=b[c];d.charAt(0)==" ";)d=d.substring(1,d.length);if(d.indexOf(a)===0)return decodeURIComponent(d.substring(a.length,d.length))}return null}function C(a){for(var b=0;b<l.settings.length;b++)if(l.settings[b].nTable===a)return l.settings[b];return null}function fa(a){var b=[];a=a.aoData;for(var c=0,d=
-a.length;c<d;c++)a[c].nTr!==null&&b.push(a[c].nTr);return b}function W(a,b){var c=[],d,e,f,g,j;e=0;var k=a.aoData.length;if(b!==p){e=b;k=b+1}for(e=e;e<k;e++){j=a.aoData[e];if(j.nTr!==null){b=[];for(d=j.nTr.firstChild;d;){f=d.nodeName.toLowerCase();if(f=="td"||f=="th")b.push(d);d=d.nextSibling}f=d=0;for(g=a.aoColumns.length;f<g;f++)if(a.aoColumns[f].bVisible)c.push(b[f-d]);else{c.push(j._anHidden[f]);d++}}}return c}function O(a,b,c){a=a===null?"DataTables warning: "+c:"DataTables warning (table id = '"+
-a.sTableId+"'): "+c;if(b===0)if(l.ext.sErrMode=="alert")alert(a);else throw new Error(a);else la.console&&console.log&&console.log(a)}function r(a,b,c,d){if(d===p)d=c;if(b[c]!==p)a[d]=b[c]}function hb(a,b){var c;for(var d in b)if(b.hasOwnProperty(d)){c=b[d];if(typeof h[d]==="object"&&c!==null&&i.isArray(c)===false)i.extend(true,a[d],c);else a[d]=c}return a}function fb(a,b,c){i(a).bind("click.DT",b,function(d){a.blur();c(d)}).bind("keypress.DT",b,function(d){d.which===13&&c(d)}).bind("selectstart.DT",
-function(){return false})}function J(a,b,c,d){c&&a[b].push({fn:c,sName:d})}function K(a,b,c,d){b=a[b];for(var e=[],f=b.length-1;f>=0;f--)e.push(b[f].fn.apply(a.oInstance,d));c!==null&&i(a.oInstance).trigger(c,d);return e}function ib(a){var b=i('<div style="position:absolute; top:0; left:0; height:1px; width:1px; overflow:hidden"><div style="position:absolute; top:1px; left:1px; width:100px; overflow:scroll;"><div id="DT_BrowserTest" style="width:100%; height:10px;"></div></div></div>')[0];s.body.appendChild(b);
-a.oBrowser.bScrollOversize=i("#DT_BrowserTest",b)[0].offsetWidth===100?true:false;s.body.removeChild(b)}function jb(a){return function(){var b=[C(this[l.ext.iApiIndex])].concat(Array.prototype.slice.call(arguments));return l.ext.oApi[a].apply(this,b)}}var ga=/\[.*?\]$/,kb=la.JSON?JSON.stringify:function(a){var b=typeof a;if(b!=="object"||a===null){if(b==="string")a='"'+a+'"';return a+""}var c,d,e=[],f=i.isArray(a);for(c in a){d=a[c];b=typeof d;if(b==="string")d='"'+d+'"';else if(b==="object"&&d!==
-null)d=kb(d);e.push((f?"":'"'+c+'":')+d)}return(f?"[":"{")+e+(f?"]":"}")};this.$=function(a,b){var c,d=[],e;c=C(this[l.ext.iApiIndex]);var f=c.aoData,g=c.aiDisplay,j=c.aiDisplayMaster;b||(b={});b=i.extend({},{filter:"none",order:"current",page:"all"},b);if(b.page=="current"){b=c._iDisplayStart;for(c=c.fnDisplayEnd();b<c;b++)(e=f[g[b]].nTr)&&d.push(e)}else if(b.order=="current"&&b.filter=="none"){b=0;for(c=j.length;b<c;b++)(e=f[j[b]].nTr)&&d.push(e)}else if(b.order=="current"&&b.filter=="applied"){b=
-0;for(c=g.length;b<c;b++)(e=f[g[b]].nTr)&&d.push(e)}else if(b.order=="original"&&b.filter=="none"){b=0;for(c=f.length;b<c;b++)(e=f[b].nTr)&&d.push(e)}else if(b.order=="original"&&b.filter=="applied"){b=0;for(c=f.length;b<c;b++){e=f[b].nTr;i.inArray(b,g)!==-1&&e&&d.push(e)}}else O(c,1,"Unknown selection options");f=i(d);d=f.filter(a);a=f.find(a);return i([].concat(i.makeArray(d),i.makeArray(a)))};this._=function(a,b){var c=[],d=this.$(a,b);a=0;for(b=d.length;a<b;a++)c.push(this.fnGetData(d[a]));return c};
-this.fnAddData=function(a,b){if(a.length===0)return[];var c=[],d,e=C(this[l.ext.iApiIndex]);if(typeof a[0]==="object"&&a[0]!==null)for(var f=0;f<a.length;f++){d=R(e,a[f]);if(d==-1)return c;c.push(d)}else{d=R(e,a);if(d==-1)return c;c.push(d)}e.aiDisplay=e.aiDisplayMaster.slice();if(b===p||b)qa(e);return c};this.fnAdjustColumnSizing=function(a){var b=C(this[l.ext.iApiIndex]);o(b);if(a===p||a)this.fnDraw(false);else if(b.oScroll.sX!==""||b.oScroll.sY!=="")this.oApi._fnScrollDraw(b)};this.fnClearTable=
-function(a){var b=C(this[l.ext.iApiIndex]);wa(b);if(a===p||a)H(b)};this.fnClose=function(a){for(var b=C(this[l.ext.iApiIndex]),c=0;c<b.aoOpenRows.length;c++)if(b.aoOpenRows[c].nParent==a){(a=b.aoOpenRows[c].nTr.parentNode)&&a.removeChild(b.aoOpenRows[c].nTr);b.aoOpenRows.splice(c,1);return 0}return 1};this.fnDeleteRow=function(a,b,c){var d=C(this[l.ext.iApiIndex]),e,f;a=typeof a==="object"?V(d,a):a;var g=d.aoData.splice(a,1);e=0;for(f=d.aoData.length;e<f;e++)if(d.aoData[e].nTr!==null)d.aoData[e].nTr._DT_RowIndex=
-e;e=i.inArray(a,d.aiDisplay);d.asDataSearch.splice(e,1);xa(d.aiDisplayMaster,a);xa(d.aiDisplay,a);typeof b==="function"&&b.call(this,d,g);if(d._iDisplayStart>=d.fnRecordsDisplay()){d._iDisplayStart-=d._iDisplayLength;if(d._iDisplayStart<0)d._iDisplayStart=0}if(c===p||c){I(d);H(d)}return g};this.fnDestroy=function(a){var b=C(this[l.ext.iApiIndex]),c=b.nTableWrapper.parentNode,d=b.nTBody,e,f;a=a===p?false:a;b.bDestroying=true;K(b,"aoDestroyCallback","destroy",[b]);if(!a){e=0;for(f=b.aoColumns.length;e<
-f;e++)b.aoColumns[e].bVisible===false&&this.fnSetColumnVis(e,true)}i(b.nTableWrapper).find("*").andSelf().unbind(".DT");i("tbody>tr>td."+b.oClasses.sRowEmpty,b.nTable).parent().remove();if(b.nTable!=b.nTHead.parentNode){i(b.nTable).children("thead").remove();b.nTable.appendChild(b.nTHead)}if(b.nTFoot&&b.nTable!=b.nTFoot.parentNode){i(b.nTable).children("tfoot").remove();b.nTable.appendChild(b.nTFoot)}b.nTable.parentNode.removeChild(b.nTable);i(b.nTableWrapper).remove();b.aaSorting=[];b.aaSortingFixed=
-[];ba(b);i(fa(b)).removeClass(b.asStripeClasses.join(" "));i("th, td",b.nTHead).removeClass([b.oClasses.sSortable,b.oClasses.sSortableAsc,b.oClasses.sSortableDesc,b.oClasses.sSortableNone].join(" "));if(b.bJUI){i("th span."+b.oClasses.sSortIcon+", td span."+b.oClasses.sSortIcon,b.nTHead).remove();i("th, td",b.nTHead).each(function(){var g=i("div."+b.oClasses.sSortJUIWrapper,this),j=g.contents();i(this).append(j);g.remove()})}if(!a&&b.nTableReinsertBefore)c.insertBefore(b.nTable,b.nTableReinsertBefore);
-else a||c.appendChild(b.nTable);e=0;for(f=b.aoData.length;e<f;e++)b.aoData[e].nTr!==null&&d.appendChild(b.aoData[e].nTr);if(b.oFeatures.bAutoWidth===true)b.nTable.style.width=t(b.sDestroyWidth);if(f=b.asDestroyStripes.length){a=i(d).children("tr");for(e=0;e<f;e++)a.filter(":nth-child("+f+"n + "+e+")").addClass(b.asDestroyStripes[e])}e=0;for(f=l.settings.length;e<f;e++)l.settings[e]==b&&l.settings.splice(e,1);h=b=null};this.fnDraw=function(a){var b=C(this[l.ext.iApiIndex]);if(a===false){I(b);H(b)}else qa(b)};
-this.fnFilter=function(a,b,c,d,e,f){var g=C(this[l.ext.iApiIndex]);if(g.oFeatures.bFilter){if(c===p||c===null)c=false;if(d===p||d===null)d=true;if(e===p||e===null)e=true;if(f===p||f===null)f=true;if(b===p||b===null){X(g,{sSearch:a+"",bRegex:c,bSmart:d,bCaseInsensitive:f},1);if(e&&g.aanFeatures.f){b=g.aanFeatures.f;c=0;for(d=b.length;c<d;c++)try{b[c]._DT_Input!=s.activeElement&&i(b[c]._DT_Input).val(a)}catch(j){i(b[c]._DT_Input).val(a)}}}else{i.extend(g.aoPreSearchCols[b],{sSearch:a+"",bRegex:c,bSmart:d,
-bCaseInsensitive:f});X(g,g.oPreviousSearch,1)}}};this.fnGetData=function(a,b){var c=C(this[l.ext.iApiIndex]);if(a!==p){var d=a;if(typeof a==="object"){var e=a.nodeName.toLowerCase();if(e==="tr")d=V(c,a);else if(e==="td"){d=V(c,a.parentNode);b=va(c,d,a)}}if(b!==p)return F(c,d,b,"");return c.aoData[d]!==p?c.aoData[d]._aData:null}return oa(c)};this.fnGetNodes=function(a){var b=C(this[l.ext.iApiIndex]);if(a!==p)return b.aoData[a]!==p?b.aoData[a].nTr:null;return fa(b)};this.fnGetPosition=function(a){var b=
-C(this[l.ext.iApiIndex]),c=a.nodeName.toUpperCase();if(c=="TR")return V(b,a);else if(c=="TD"||c=="TH"){c=V(b,a.parentNode);a=va(b,c,a);return[c,w(b,a),a]}return null};this.fnIsOpen=function(a){for(var b=C(this[l.ext.iApiIndex]),c=0;c<b.aoOpenRows.length;c++)if(b.aoOpenRows[c].nParent==a)return true;return false};this.fnOpen=function(a,b,c){var d=C(this[l.ext.iApiIndex]),e=fa(d);if(i.inArray(a,e)!==-1){this.fnClose(a);e=s.createElement("tr");var f=s.createElement("td");e.appendChild(f);f.className=
-c;f.colSpan=D(d);if(typeof b==="string")f.innerHTML=b;else i(f).html(b);b=i("tr",d.nTBody);i.inArray(a,b)!=-1&&i(e).insertAfter(a);d.aoOpenRows.push({nTr:e,nParent:a});return e}};this.fnPageChange=function(a,b){var c=C(this[l.ext.iApiIndex]);Ga(c,a);I(c);if(b===p||b)H(c)};this.fnSetColumnVis=function(a,b,c){var d=C(this[l.ext.iApiIndex]),e,f,g=d.aoColumns,j=d.aoData,k,m;if(g[a].bVisible!=b){if(b){for(e=f=0;e<a;e++)g[e].bVisible&&f++;m=f>=D(d);if(!m)for(e=a;e<g.length;e++)if(g[e].bVisible){k=e;break}e=
-0;for(f=j.length;e<f;e++)if(j[e].nTr!==null)m?j[e].nTr.appendChild(j[e]._anHidden[a]):j[e].nTr.insertBefore(j[e]._anHidden[a],W(d,e)[k])}else{e=0;for(f=j.length;e<f;e++)if(j[e].nTr!==null){k=W(d,e)[a];j[e]._anHidden[a]=k;k.parentNode.removeChild(k)}}g[a].bVisible=b;ia(d,d.aoHeader);d.nTFoot&&ia(d,d.aoFooter);e=0;for(f=d.aoOpenRows.length;e<f;e++)d.aoOpenRows[e].nTr.colSpan=D(d);if(c===p||c){o(d);H(d)}Ha(d)}};this.fnSettings=function(){return C(this[l.ext.iApiIndex])};this.fnSort=function(a){var b=
-C(this[l.ext.iApiIndex]);b.aaSorting=a;$(b)};this.fnSortListener=function(a,b,c){ya(C(this[l.ext.iApiIndex]),a,b,c)};this.fnUpdate=function(a,b,c,d,e){var f=C(this[l.ext.iApiIndex]);b=typeof b==="object"?V(f,b):b;if(i.isArray(a)&&c===p){f.aoData[b]._aData=a.slice();for(c=0;c<f.aoColumns.length;c++)this.fnUpdate(F(f,b,c),b,c,false,false)}else if(i.isPlainObject(a)&&c===p){f.aoData[b]._aData=i.extend(true,{},a);for(c=0;c<f.aoColumns.length;c++)this.fnUpdate(F(f,b,c),b,c,false,false)}else{S(f,b,c,a);
-a=F(f,b,c,"display");var g=f.aoColumns[c];if(g.fnRender!==null){a=da(f,b,c);g.bUseRendered&&S(f,b,c,a)}if(f.aoData[b].nTr!==null)W(f,b)[c].innerHTML=a}c=i.inArray(b,f.aiDisplay);f.asDataSearch[c]=Da(f,na(f,b,"filter",A(f,"bSearchable")));if(e===p||e)o(f);if(d===p||d)qa(f);return 0};this.fnVersionCheck=l.ext.fnVersionCheck;this.oApi={_fnExternApiFunc:jb,_fnInitialise:ra,_fnInitComplete:pa,_fnLanguageCompat:Fa,_fnAddColumn:n,_fnColumnOptions:q,_fnAddData:R,_fnCreateTr:ua,_fnGatherData:ea,_fnBuildHead:Ka,
-_fnDrawHead:ia,_fnDraw:H,_fnReDraw:qa,_fnAjaxUpdate:La,_fnAjaxParameters:Ta,_fnAjaxUpdateDraw:Ua,_fnServerParams:Aa,_fnAddOptionsHtml:Ma,_fnFeatureHtmlTable:Qa,_fnScrollDraw:$a,_fnAdjustColumnSizing:o,_fnFeatureHtmlFilter:Oa,_fnFilterComplete:X,_fnFilterCustom:Xa,_fnFilterColumn:Wa,_fnFilter:Va,_fnBuildSearchArray:Ba,_fnBuildSearchRow:Da,_fnFilterCreateSearch:Ca,_fnDataToSearch:Ya,_fnSort:$,_fnSortAttachListener:ya,_fnSortingClasses:ba,_fnFeatureHtmlPaginate:Sa,_fnPageChange:Ga,_fnFeatureHtmlInfo:Ra,
-_fnUpdateInfo:Za,_fnFeatureHtmlLength:Na,_fnFeatureHtmlProcessing:Pa,_fnProcessingDisplay:P,_fnVisibleToColumnIndex:v,_fnColumnIndexToVisible:w,_fnNodeToDataIndex:V,_fnVisbleColumns:D,_fnCalculateEnd:I,_fnConvertToWidth:ab,_fnCalculateColumnWidths:ta,_fnScrollingWidthAdjust:cb,_fnGetWidestNode:bb,_fnGetMaxLenString:db,_fnStringToCss:t,_fnDetectType:G,_fnSettingsFromNode:C,_fnGetDataMaster:oa,_fnGetTrNodes:fa,_fnGetTdNodes:W,_fnEscapeRegex:Ea,_fnDeleteIndex:xa,_fnReOrderIndex:E,_fnColumnOrdering:Y,
-_fnLog:O,_fnClearTable:wa,_fnSaveState:Ha,_fnLoadState:gb,_fnCreateCookie:lb,_fnReadCookie:mb,_fnDetectHeader:ha,_fnGetUniqueThs:Z,_fnScrollBarWidth:eb,_fnApplyToChildren:N,_fnMap:r,_fnGetRowData:na,_fnGetCellData:F,_fnSetCellData:S,_fnGetObjectDataFn:ca,_fnSetObjectDataFn:Ja,_fnApplyColumnDefs:ma,_fnBindAction:fb,_fnExtend:hb,_fnCallbackReg:J,_fnCallbackFire:K,_fnJsonString:kb,_fnRender:da,_fnNodeToColumnIndex:va,_fnInfoMacros:za,_fnBrowserDetect:ib,_fnGetColumns:A};i.extend(l.ext.oApi,this.oApi);
-for(var Ia in l.ext.oApi)if(Ia)this[Ia]=jb(Ia);var sa=this;this.each(function(){var a=0,b,c,d;c=this.getAttribute("id");var e=false,f=false;if(this.nodeName.toLowerCase()!="table")O(null,0,"Attempted to initialise DataTables on a node which is not a table: "+this.nodeName);else{a=0;for(b=l.settings.length;a<b;a++){if(l.settings[a].nTable==this)if(h===p||h.bRetrieve)return l.settings[a].oInstance;else if(h.bDestroy){l.settings[a].oInstance.fnDestroy();break}else{O(l.settings[a],0,"Cannot reinitialise DataTable.\n\nTo retrieve the DataTables object for this table, pass no arguments or see the docs for bRetrieve and bDestroy");
-return}if(l.settings[a].sTableId==this.id){l.settings.splice(a,1);break}}if(c===null||c==="")this.id=c="DataTables_Table_"+l.ext._oExternConfig.iNextUnique++;var g=i.extend(true,{},l.models.oSettings,{nTable:this,oApi:sa.oApi,oInit:h,sDestroyWidth:i(this).width(),sInstance:c,sTableId:c});l.settings.push(g);g.oInstance=sa.length===1?sa:i(this).dataTable();h||(h={});h.oLanguage&&Fa(h.oLanguage);h=hb(i.extend(true,{},l.defaults),h);r(g.oFeatures,h,"bPaginate");r(g.oFeatures,h,"bLengthChange");r(g.oFeatures,
-h,"bFilter");r(g.oFeatures,h,"bSort");r(g.oFeatures,h,"bInfo");r(g.oFeatures,h,"bProcessing");r(g.oFeatures,h,"bAutoWidth");r(g.oFeatures,h,"bSortClasses");r(g.oFeatures,h,"bServerSide");r(g.oFeatures,h,"bDeferRender");r(g.oScroll,h,"sScrollX","sX");r(g.oScroll,h,"sScrollXInner","sXInner");r(g.oScroll,h,"sScrollY","sY");r(g.oScroll,h,"bScrollCollapse","bCollapse");r(g.oScroll,h,"bScrollInfinite","bInfinite");r(g.oScroll,h,"iScrollLoadGap","iLoadGap");r(g.oScroll,h,"bScrollAutoCss","bAutoCss");r(g,
-h,"asStripeClasses");r(g,h,"asStripClasses","asStripeClasses");r(g,h,"fnServerData");r(g,h,"fnFormatNumber");r(g,h,"sServerMethod");r(g,h,"aaSorting");r(g,h,"aaSortingFixed");r(g,h,"aLengthMenu");r(g,h,"sPaginationType");r(g,h,"sAjaxSource");r(g,h,"sAjaxDataProp");r(g,h,"iCookieDuration");r(g,h,"sCookiePrefix");r(g,h,"sDom");r(g,h,"bSortCellsTop");r(g,h,"iTabIndex");r(g,h,"oSearch","oPreviousSearch");r(g,h,"aoSearchCols","aoPreSearchCols");r(g,h,"iDisplayLength","_iDisplayLength");r(g,h,"bJQueryUI",
-"bJUI");r(g,h,"fnCookieCallback");r(g,h,"fnStateLoad");r(g,h,"fnStateSave");r(g.oLanguage,h,"fnInfoCallback");J(g,"aoDrawCallback",h.fnDrawCallback,"user");J(g,"aoServerParams",h.fnServerParams,"user");J(g,"aoStateSaveParams",h.fnStateSaveParams,"user");J(g,"aoStateLoadParams",h.fnStateLoadParams,"user");J(g,"aoStateLoaded",h.fnStateLoaded,"user");J(g,"aoRowCallback",h.fnRowCallback,"user");J(g,"aoRowCreatedCallback",h.fnCreatedRow,"user");J(g,"aoHeaderCallback",h.fnHeaderCallback,"user");J(g,"aoFooterCallback",
-h.fnFooterCallback,"user");J(g,"aoInitComplete",h.fnInitComplete,"user");J(g,"aoPreDrawCallback",h.fnPreDrawCallback,"user");if(g.oFeatures.bServerSide&&g.oFeatures.bSort&&g.oFeatures.bSortClasses)J(g,"aoDrawCallback",ba,"server_side_sort_classes");else g.oFeatures.bDeferRender&&J(g,"aoDrawCallback",ba,"defer_sort_classes");if(h.bJQueryUI){i.extend(g.oClasses,l.ext.oJUIClasses);if(h.sDom===l.defaults.sDom&&l.defaults.sDom==="lfrtip")g.sDom='<"H"lfr>t<"F"ip>'}else i.extend(g.oClasses,l.ext.oStdClasses);
-i(this).addClass(g.oClasses.sTable);if(g.oScroll.sX!==""||g.oScroll.sY!=="")g.oScroll.iBarWidth=eb();if(g.iInitDisplayStart===p){g.iInitDisplayStart=h.iDisplayStart;g._iDisplayStart=h.iDisplayStart}if(h.bStateSave){g.oFeatures.bStateSave=true;gb(g,h);J(g,"aoDrawCallback",Ha,"state_save")}if(h.iDeferLoading!==null){g.bDeferLoading=true;a=i.isArray(h.iDeferLoading);g._iRecordsDisplay=a?h.iDeferLoading[0]:h.iDeferLoading;g._iRecordsTotal=a?h.iDeferLoading[1]:h.iDeferLoading}if(h.aaData!==null)f=true;
-if(h.oLanguage.sUrl!==""){g.oLanguage.sUrl=h.oLanguage.sUrl;i.getJSON(g.oLanguage.sUrl,null,function(k){Fa(k);i.extend(true,g.oLanguage,h.oLanguage,k);ra(g)});e=true}else i.extend(true,g.oLanguage,h.oLanguage);if(h.asStripeClasses===null)g.asStripeClasses=[g.oClasses.sStripeOdd,g.oClasses.sStripeEven];b=g.asStripeClasses.length;g.asDestroyStripes=[];if(b){c=false;d=i(this).children("tbody").children("tr:lt("+b+")");for(a=0;a<b;a++)if(d.hasClass(g.asStripeClasses[a])){c=true;g.asDestroyStripes.push(g.asStripeClasses[a])}c&&
-d.removeClass(g.asStripeClasses.join(" "))}c=[];a=this.getElementsByTagName("thead");if(a.length!==0){ha(g.aoHeader,a[0]);c=Z(g)}if(h.aoColumns===null){d=[];a=0;for(b=c.length;a<b;a++)d.push(null)}else d=h.aoColumns;a=0;for(b=d.length;a<b;a++){if(h.saved_aoColumns!==p&&h.saved_aoColumns.length==b){if(d[a]===null)d[a]={};d[a].bVisible=h.saved_aoColumns[a].bVisible}n(g,c?c[a]:null)}ma(g,h.aoColumnDefs,d,function(k,m){q(g,k,m)});a=0;for(b=g.aaSorting.length;a<b;a++){if(g.aaSorting[a][0]>=g.aoColumns.length)g.aaSorting[a][0]=
-0;var j=g.aoColumns[g.aaSorting[a][0]];if(g.aaSorting[a][2]===p)g.aaSorting[a][2]=0;if(h.aaSorting===p&&g.saved_aaSorting===p)g.aaSorting[a][1]=j.asSorting[0];c=0;for(d=j.asSorting.length;c<d;c++)if(g.aaSorting[a][1]==j.asSorting[c]){g.aaSorting[a][2]=c;break}}ba(g);ib(g);a=i(this).children("caption").each(function(){this._captionSide=i(this).css("caption-side")});b=i(this).children("thead");if(b.length===0){b=[s.createElement("thead")];this.appendChild(b[0])}g.nTHead=b[0];b=i(this).children("tbody");
-if(b.length===0){b=[s.createElement("tbody")];this.appendChild(b[0])}g.nTBody=b[0];g.nTBody.setAttribute("role","alert");g.nTBody.setAttribute("aria-live","polite");g.nTBody.setAttribute("aria-relevant","all");b=i(this).children("tfoot");if(b.length===0&&a.length>0&&(g.oScroll.sX!==""||g.oScroll.sY!=="")){b=[s.createElement("tfoot")];this.appendChild(b[0])}if(b.length>0){g.nTFoot=b[0];ha(g.aoFooter,g.nTFoot)}if(f)for(a=0;a<h.aaData.length;a++)R(g,h.aaData[a]);else ea(g);g.aiDisplay=g.aiDisplayMaster.slice();
-g.bInitialised=true;e===false&&ra(g)}});sa=null;return this};l.fnVersionCheck=function(h){var n=function(A,G){for(;A.length<G;)A+="0";return A},q=l.ext.sVersion.split(".");h=h.split(".");for(var o="",v="",w=0,D=h.length;w<D;w++){o+=n(q[w],3);v+=n(h[w],3)}return parseInt(o,10)>=parseInt(v,10)};l.fnIsDataTable=function(h){for(var n=l.settings,q=0;q<n.length;q++)if(n[q].nTable===h||n[q].nScrollHead===h||n[q].nScrollFoot===h)return true;return false};l.fnTables=function(h){var n=[];jQuery.each(l.settings,
-function(q,o){if(!h||h===true&&i(o.nTable).is(":visible"))n.push(o.nTable)});return n};l.version="1.9.4";l.settings=[];l.models={};l.models.ext={afnFiltering:[],afnSortData:[],aoFeatures:[],aTypes:[],fnVersionCheck:l.fnVersionCheck,iApiIndex:0,ofnSearch:{},oApi:{},oStdClasses:{},oJUIClasses:{},oPagination:{},oSort:{},sVersion:l.version,sErrMode:"alert",_oExternConfig:{iNextUnique:0}};l.models.oSearch={bCaseInsensitive:true,sSearch:"",bRegex:false,bSmart:true};l.models.oRow={nTr:null,_aData:[],_aSortData:[],
-_anHidden:[],_sRowStripe:""};l.models.oColumn={aDataSort:null,asSorting:null,bSearchable:null,bSortable:null,bUseRendered:null,bVisible:null,_bAutoType:true,fnCreatedCell:null,fnGetData:null,fnRender:null,fnSetData:null,mData:null,mRender:null,nTh:null,nTf:null,sClass:null,sContentPadding:null,sDefaultContent:null,sName:null,sSortDataType:"std",sSortingClass:null,sSortingClassJUI:null,sTitle:null,sType:null,sWidth:null,sWidthOrig:null};l.defaults={aaData:null,aaSorting:[[0,"asc"]],aaSortingFixed:null,
-aLengthMenu:[10,25,50,100],aoColumns:null,aoColumnDefs:null,aoSearchCols:[],asStripeClasses:null,bAutoWidth:true,bDeferRender:false,bDestroy:false,bFilter:true,bInfo:true,bJQueryUI:false,bLengthChange:true,bPaginate:true,bProcessing:false,bRetrieve:false,bScrollAutoCss:true,bScrollCollapse:false,bScrollInfinite:false,bServerSide:false,bSort:true,bSortCellsTop:false,bSortClasses:true,bStateSave:false,fnCookieCallback:null,fnCreatedRow:null,fnDrawCallback:null,fnFooterCallback:null,fnFormatNumber:function(h){if(h<
-1E3)return h;var n=h+"";h=n.split("");var q="";n=n.length;for(var o=0;o<n;o++){if(o%3===0&&o!==0)q=this.oLanguage.sInfoThousands+q;q=h[n-o-1]+q}return q},fnHeaderCallback:null,fnInfoCallback:null,fnInitComplete:null,fnPreDrawCallback:null,fnRowCallback:null,fnServerData:function(h,n,q,o){o.jqXHR=i.ajax({url:h,data:n,success:function(v){v.sError&&o.oApi._fnLog(o,0,v.sError);i(o.oInstance).trigger("xhr",[o,v]);q(v)},dataType:"json",cache:false,type:o.sServerMethod,error:function(v,w){w=="parsererror"&&
-o.oApi._fnLog(o,0,"DataTables warning: JSON data from server could not be parsed. This is caused by a JSON formatting error.")}})},fnServerParams:null,fnStateLoad:function(h){h=this.oApi._fnReadCookie(h.sCookiePrefix+h.sInstance);var n;try{n=typeof i.parseJSON==="function"?i.parseJSON(h):eval("("+h+")")}catch(q){n=null}return n},fnStateLoadParams:null,fnStateLoaded:null,fnStateSave:function(h,n){this.oApi._fnCreateCookie(h.sCookiePrefix+h.sInstance,this.oApi._fnJsonString(n),h.iCookieDuration,h.sCookiePrefix,
-h.fnCookieCallback)},fnStateSaveParams:null,iCookieDuration:7200,iDeferLoading:null,iDisplayLength:10,iDisplayStart:0,iScrollLoadGap:100,iTabIndex:0,oLanguage:{oAria:{sSortAscending:": activate to sort column ascending",sSortDescending:": activate to sort column descending"},oPaginate:{sFirst:"First",sLast:"Last",sNext:"Next",sPrevious:"Previous"},sEmptyTable:"No data available in table",sInfo:"Showing _START_ to _END_ of _TOTAL_ entries",sInfoEmpty:"Showing 0 to 0 of 0 entries",sInfoFiltered:"(filtered from _MAX_ total entries)",
-sInfoPostFix:"",sInfoThousands:",",sLengthMenu:"Show _MENU_ entries",sLoadingRecords:"Loading...",sProcessing:"Processing...",sSearch:"Search:",sUrl:"",sZeroRecords:"No matching records found"},oSearch:i.extend({},l.models.oSearch),sAjaxDataProp:"aaData",sAjaxSource:null,sCookiePrefix:"SpryMedia_DataTables_",sDom:"lfrtip",sPaginationType:"two_button",sScrollX:"",sScrollXInner:"",sScrollY:"",sServerMethod:"GET"};l.defaults.columns={aDataSort:null,asSorting:["asc","desc"],bSearchable:true,bSortable:true,
-bUseRendered:true,bVisible:true,fnCreatedCell:null,fnRender:null,iDataSort:-1,mData:null,mRender:null,sCellType:"td",sClass:"",sContentPadding:"",sDefaultContent:null,sName:"",sSortDataType:"std",sTitle:null,sType:null,sWidth:null};l.models.oSettings={oFeatures:{bAutoWidth:null,bDeferRender:null,bFilter:null,bInfo:null,bLengthChange:null,bPaginate:null,bProcessing:null,bServerSide:null,bSort:null,bSortClasses:null,bStateSave:null},oScroll:{bAutoCss:null,bCollapse:null,bInfinite:null,iBarWidth:0,iLoadGap:null,
-sX:null,sXInner:null,sY:null},oLanguage:{fnInfoCallback:null},oBrowser:{bScrollOversize:false},aanFeatures:[],aoData:[],aiDisplay:[],aiDisplayMaster:[],aoColumns:[],aoHeader:[],aoFooter:[],asDataSearch:[],oPreviousSearch:{},aoPreSearchCols:[],aaSorting:null,aaSortingFixed:null,asStripeClasses:null,asDestroyStripes:[],sDestroyWidth:0,aoRowCallback:[],aoHeaderCallback:[],aoFooterCallback:[],aoDrawCallback:[],aoRowCreatedCallback:[],aoPreDrawCallback:[],aoInitComplete:[],aoStateSaveParams:[],aoStateLoadParams:[],
-aoStateLoaded:[],sTableId:"",nTable:null,nTHead:null,nTFoot:null,nTBody:null,nTableWrapper:null,bDeferLoading:false,bInitialised:false,aoOpenRows:[],sDom:null,sPaginationType:"two_button",iCookieDuration:0,sCookiePrefix:"",fnCookieCallback:null,aoStateSave:[],aoStateLoad:[],oLoadedState:null,sAjaxSource:null,sAjaxDataProp:null,bAjaxDataGet:true,jqXHR:null,fnServerData:null,aoServerParams:[],sServerMethod:null,fnFormatNumber:null,aLengthMenu:null,iDraw:0,bDrawing:false,iDrawError:-1,_iDisplayLength:10,
-_iDisplayStart:0,_iDisplayEnd:10,_iRecordsTotal:0,_iRecordsDisplay:0,bJUI:null,oClasses:{},bFiltered:false,bSorted:false,bSortCellsTop:null,oInit:null,aoDestroyCallback:[],fnRecordsTotal:function(){return this.oFeatures.bServerSide?parseInt(this._iRecordsTotal,10):this.aiDisplayMaster.length},fnRecordsDisplay:function(){return this.oFeatures.bServerSide?parseInt(this._iRecordsDisplay,10):this.aiDisplay.length},fnDisplayEnd:function(){return this.oFeatures.bServerSide?this.oFeatures.bPaginate===false||
-this._iDisplayLength==-1?this._iDisplayStart+this.aiDisplay.length:Math.min(this._iDisplayStart+this._iDisplayLength,this._iRecordsDisplay):this._iDisplayEnd},oInstance:null,sInstance:null,iTabIndex:0,nScrollHead:null,nScrollFoot:null};l.ext=i.extend(true,{},l.models.ext);i.extend(l.ext.oStdClasses,{sTable:"dataTable",sPagePrevEnabled:"paginate_enabled_previous",sPagePrevDisabled:"paginate_disabled_previous",sPageNextEnabled:"paginate_enabled_next",sPageNextDisabled:"paginate_disabled_next",sPageJUINext:"",
-sPageJUIPrev:"",sPageButton:"paginate_button",sPageButtonActive:"paginate_active",sPageButtonStaticDisabled:"paginate_button paginate_button_disabled",sPageFirst:"first",sPagePrevious:"previous",sPageNext:"next",sPageLast:"last",sStripeOdd:"odd",sStripeEven:"even",sRowEmpty:"dataTables_empty",sWrapper:"dataTables_wrapper",sFilter:"dataTables_filter",sInfo:"dataTables_info",sPaging:"dataTables_paginate paging_",sLength:"dataTables_length",sProcessing:"dataTables_processing",sSortAsc:"sorting_asc",
-sSortDesc:"sorting_desc",sSortable:"sorting",sSortableAsc:"sorting_asc_disabled",sSortableDesc:"sorting_desc_disabled",sSortableNone:"sorting_disabled",sSortColumn:"sorting_",sSortJUIAsc:"",sSortJUIDesc:"",sSortJUI:"",sSortJUIAscAllowed:"",sSortJUIDescAllowed:"",sSortJUIWrapper:"",sSortIcon:"",sScrollWrapper:"dataTables_scroll",sScrollHead:"dataTables_scrollHead",sScrollHeadInner:"dataTables_scrollHeadInner",sScrollBody:"dataTables_scrollBody",sScrollFoot:"dataTables_scrollFoot",sScrollFootInner:"dataTables_scrollFootInner",
-sFooterTH:"",sJUIHeader:"",sJUIFooter:""});i.extend(l.ext.oJUIClasses,l.ext.oStdClasses,{sPagePrevEnabled:"fg-button ui-button ui-state-default ui-corner-left",sPagePrevDisabled:"fg-button ui-button ui-state-default ui-corner-left ui-state-disabled",sPageNextEnabled:"fg-button ui-button ui-state-default ui-corner-right",sPageNextDisabled:"fg-button ui-button ui-state-default ui-corner-right ui-state-disabled",sPageJUINext:"ui-icon ui-icon-circle-arrow-e",sPageJUIPrev:"ui-icon ui-icon-circle-arrow-w",
-sPageButton:"fg-button ui-button ui-state-default",sPageButtonActive:"fg-button ui-button ui-state-default ui-state-disabled",sPageButtonStaticDisabled:"fg-button ui-button ui-state-default ui-state-disabled",sPageFirst:"first ui-corner-tl ui-corner-bl",sPageLast:"last ui-corner-tr ui-corner-br",sPaging:"dataTables_paginate fg-buttonset ui-buttonset fg-buttonset-multi ui-buttonset-multi paging_",sSortAsc:"ui-state-default",sSortDesc:"ui-state-default",sSortable:"ui-state-default",sSortableAsc:"ui-state-default",
-sSortableDesc:"ui-state-default",sSortableNone:"ui-state-default",sSortJUIAsc:"css_right ui-icon ui-icon-triangle-1-n",sSortJUIDesc:"css_right ui-icon ui-icon-triangle-1-s",sSortJUI:"css_right ui-icon ui-icon-carat-2-n-s",sSortJUIAscAllowed:"css_right ui-icon ui-icon-carat-1-n",sSortJUIDescAllowed:"css_right ui-icon ui-icon-carat-1-s",sSortJUIWrapper:"DataTables_sort_wrapper",sSortIcon:"DataTables_sort_icon",sScrollHead:"dataTables_scrollHead ui-state-default",sScrollFoot:"dataTables_scrollFoot ui-state-default",
-sFooterTH:"ui-state-default",sJUIHeader:"fg-toolbar ui-toolbar ui-widget-header ui-corner-tl ui-corner-tr ui-helper-clearfix",sJUIFooter:"fg-toolbar ui-toolbar ui-widget-header ui-corner-bl ui-corner-br ui-helper-clearfix"});i.extend(l.ext.oPagination,{two_button:{fnInit:function(h,n,q){var o=h.oLanguage.oPaginate,v=function(D){h.oApi._fnPageChange(h,D.data.action)&&q(h)};o=!h.bJUI?'<a class="'+h.oClasses.sPagePrevDisabled+'" tabindex="'+h.iTabIndex+'" role="button">'+o.sPrevious+'</a><a class="'+
-h.oClasses.sPageNextDisabled+'" tabindex="'+h.iTabIndex+'" role="button">'+o.sNext+"</a>":'<a class="'+h.oClasses.sPagePrevDisabled+'" tabindex="'+h.iTabIndex+'" role="button"><span class="'+h.oClasses.sPageJUIPrev+'"></span></a><a class="'+h.oClasses.sPageNextDisabled+'" tabindex="'+h.iTabIndex+'" role="button"><span class="'+h.oClasses.sPageJUINext+'"></span></a>';i(n).append(o);var w=i("a",n);o=w[0];w=w[1];h.oApi._fnBindAction(o,{action:"previous"},v);h.oApi._fnBindAction(w,{action:"next"},v);
-if(!h.aanFeatures.p){n.id=h.sTableId+"_paginate";o.id=h.sTableId+"_previous";w.id=h.sTableId+"_next";o.setAttribute("aria-controls",h.sTableId);w.setAttribute("aria-controls",h.sTableId)}},fnUpdate:function(h){if(h.aanFeatures.p)for(var n=h.oClasses,q=h.aanFeatures.p,o,v=0,w=q.length;v<w;v++)if(o=q[v].firstChild){o.className=h._iDisplayStart===0?n.sPagePrevDisabled:n.sPagePrevEnabled;o=o.nextSibling;o.className=h.fnDisplayEnd()==h.fnRecordsDisplay()?n.sPageNextDisabled:n.sPageNextEnabled}}},iFullNumbersShowPages:5,
-full_numbers:{fnInit:function(h,n,q){var o=h.oLanguage.oPaginate,v=h.oClasses,w=function(G){h.oApi._fnPageChange(h,G.data.action)&&q(h)};i(n).append('<a  tabindex="'+h.iTabIndex+'" class="'+v.sPageButton+" "+v.sPageFirst+'">'+o.sFirst+'</a><a  tabindex="'+h.iTabIndex+'" class="'+v.sPageButton+" "+v.sPagePrevious+'">'+o.sPrevious+'</a><span></span><a tabindex="'+h.iTabIndex+'" class="'+v.sPageButton+" "+v.sPageNext+'">'+o.sNext+'</a><a tabindex="'+h.iTabIndex+'" class="'+v.sPageButton+" "+v.sPageLast+
-'">'+o.sLast+"</a>");var D=i("a",n);o=D[0];v=D[1];var A=D[2];D=D[3];h.oApi._fnBindAction(o,{action:"first"},w);h.oApi._fnBindAction(v,{action:"previous"},w);h.oApi._fnBindAction(A,{action:"next"},w);h.oApi._fnBindAction(D,{action:"last"},w);if(!h.aanFeatures.p){n.id=h.sTableId+"_paginate";o.id=h.sTableId+"_first";v.id=h.sTableId+"_previous";A.id=h.sTableId+"_next";D.id=h.sTableId+"_last"}},fnUpdate:function(h,n){if(h.aanFeatures.p){var q=l.ext.oPagination.iFullNumbersShowPages,o=Math.floor(q/2),v=
-Math.ceil(h.fnRecordsDisplay()/h._iDisplayLength),w=Math.ceil(h._iDisplayStart/h._iDisplayLength)+1,D="",A,G=h.oClasses,E,Y=h.aanFeatures.p,ma=function(R){h.oApi._fnBindAction(this,{page:R+A-1},function(ea){h.oApi._fnPageChange(h,ea.data.page);n(h);ea.preventDefault()})};if(h._iDisplayLength===-1)w=o=A=1;else if(v<q){A=1;o=v}else if(w<=o){A=1;o=q}else if(w>=v-o){A=v-q+1;o=v}else{A=w-Math.ceil(q/2)+1;o=A+q-1}for(q=A;q<=o;q++)D+=w!==q?'<a tabindex="'+h.iTabIndex+'" class="'+G.sPageButton+'">'+h.fnFormatNumber(q)+
-"</a>":'<a tabindex="'+h.iTabIndex+'" class="'+G.sPageButtonActive+'">'+h.fnFormatNumber(q)+"</a>";q=0;for(o=Y.length;q<o;q++){E=Y[q];if(E.hasChildNodes()){i("span:eq(0)",E).html(D).children("a").each(ma);E=E.getElementsByTagName("a");E=[E[0],E[1],E[E.length-2],E[E.length-1]];i(E).removeClass(G.sPageButton+" "+G.sPageButtonActive+" "+G.sPageButtonStaticDisabled);i([E[0],E[1]]).addClass(w==1?G.sPageButtonStaticDisabled:G.sPageButton);i([E[2],E[3]]).addClass(v===0||w===v||h._iDisplayLength===-1?G.sPageButtonStaticDisabled:
-G.sPageButton)}}}}}});i.extend(l.ext.oSort,{"string-pre":function(h){if(typeof h!="string")h=h!==null&&h.toString?h.toString():"";return h.toLowerCase()},"string-asc":function(h,n){return h<n?-1:h>n?1:0},"string-desc":function(h,n){return h<n?1:h>n?-1:0},"html-pre":function(h){return h.replace(/<.*?>/g,"").toLowerCase()},"html-asc":function(h,n){return h<n?-1:h>n?1:0},"html-desc":function(h,n){return h<n?1:h>n?-1:0},"date-pre":function(h){h=Date.parse(h);if(isNaN(h)||h==="")h=Date.parse("01/01/1970 00:00:00");
-return h},"date-asc":function(h,n){return h-n},"date-desc":function(h,n){return n-h},"numeric-pre":function(h){return h=="-"||h===""?0:h*1},"numeric-asc":function(h,n){return h-n},"numeric-desc":function(h,n){return n-h}});i.extend(l.ext.aTypes,[function(h){if(typeof h==="number")return"numeric";else if(typeof h!=="string")return null;var n,q=false;n=h.charAt(0);if("0123456789-".indexOf(n)==-1)return null;for(var o=1;o<h.length;o++){n=h.charAt(o);if("0123456789.".indexOf(n)==-1)return null;if(n==
-"."){if(q)return null;q=true}}return"numeric"},function(h){var n=Date.parse(h);if(n!==null&&!isNaN(n)||typeof h==="string"&&h.length===0)return"date";return null},function(h){if(typeof h==="string"&&h.indexOf("<")!=-1&&h.indexOf(">")!=-1)return"html";return null}]);i.fn.DataTable=l;i.fn.dataTable=l;i.fn.dataTableSettings=l.settings;i.fn.dataTableExt=l.ext})})(window,document);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/25] hadoop git commit: YARN-8588. Logging improvements for better debuggability. (Suma Shivaprasad via wangda)

Posted by su...@apache.org.
YARN-8588. Logging improvements for better debuggability. (Suma Shivaprasad via wangda)

Change-Id: I66aa4b0ec031ae5ce0fae558e2f8cbcbbfebc442


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/344c335a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/344c335a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/344c335a

Branch: refs/heads/HDFS-12943
Commit: 344c335a920e6f32a35ebace0a118a9dc4a22fb7
Parents: 5326a79
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Aug 9 11:03:00 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Thu Aug 9 11:04:02 2018 -0700

----------------------------------------------------------------------
 .../capacity/AutoCreatedLeafQueueConfig.java    |  5 ++
 .../capacity/QueueManagementChange.java         |  2 +-
 .../QueueManagementDynamicEditPolicy.java       | 36 ++++++--------
 .../GuaranteedOrZeroCapacityOverTimePolicy.java | 50 ++++++++++++--------
 4 files changed, 52 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/344c335a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
index 5952250..87ef1c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
@@ -63,4 +63,9 @@ public class AutoCreatedLeafQueueConfig {
   public CapacitySchedulerConfiguration getLeafQueueConfigs() {
     return leafQueueConfigs;
   }
+
+  @Override public String toString() {
+    return "AutoCreatedLeafQueueConfig{" + "queueCapacities=" + queueCapacities
+        + ", leafQueueConfigs=" + leafQueueConfigs + '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/344c335a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
index 74d9b23..64ba578 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
@@ -124,7 +124,7 @@ public abstract class QueueManagementChange {
 
   @Override
   public String toString() {
-    return "QueueManagementChange{" + "queue=" + queue
+    return "QueueManagementChange{" + "queue=" + queue.getQueueName()
         + ", updatedEntitlementsByPartition=" + queueTemplateUpdate
         + ", queueAction=" + queueAction + ", transitionToQueueState="
         + transitionToQueueState + '}';

http://git-wip-us.apache.org/repos/asf/hadoop/blob/344c335a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
index 9b0cf7b..ea43ac8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
@@ -19,8 +19,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -50,8 +51,8 @@ import java.util.Set;
  */
 public class QueueManagementDynamicEditPolicy implements SchedulingEditPolicy {
 
-  private static final Log LOG =
-      LogFactory.getLog(QueueManagementDynamicEditPolicy.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(QueueManagementDynamicEditPolicy.class);
 
   private Clock clock;
 
@@ -90,7 +91,7 @@ public class QueueManagementDynamicEditPolicy implements SchedulingEditPolicy {
   @Override
   public void init(final Configuration config, final RMContext context,
       final ResourceScheduler sched) {
-    LOG.info("Queue Management Policy monitor:" + this.
+    LOG.info("Queue Management Policy monitor: {}" + this.
         getClass().getCanonicalName());
     assert null == scheduler : "Unexpected duplicate call to init";
     if (!(sched instanceof CapacityScheduler)) {
@@ -189,13 +190,7 @@ public class QueueManagementDynamicEditPolicy implements SchedulingEditPolicy {
           parentQueue.getAutoCreatedQueueManagementPolicy();
       long startTime = 0;
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(MessageFormat
-              .format("Trying to use {0} to compute preemption "
-                      + "candidates",
-                  policyClazz.getClass().getName()));
-          startTime = clock.getTime();
-        }
+        startTime = clock.getTime();
 
         queueManagementChanges = policyClazz.computeQueueManagementChanges();
 
@@ -209,15 +204,14 @@ public class QueueManagementDynamicEditPolicy implements SchedulingEditPolicy {
         }
 
         if (LOG.isDebugEnabled()) {
-          LOG.debug(MessageFormat.format("{0} uses {1} millisecond"
-                  + " to run",
-              policyClazz.getClass().getName(), clock.getTime()
-                  - startTime));
+          LOG.debug("{} uses {} millisecond" + " to run",
+              policyClazz.getClass().getName(), clock.getTime() - startTime);
           if (queueManagementChanges.size() > 0) {
-            LOG.debug(" Updated queue management updates for parent queue"
-                + " ["
-                + parentQueue.getQueueName() + ": [\n" + queueManagementChanges
-                .toString() + "\n]");
+            LOG.debug(" Updated queue management changes for parent queue" + " "
+                    + "{}: [{}]", parentQueue.getQueueName(),
+                queueManagementChanges.size() < 25 ?
+                    queueManagementChanges.toString() :
+                    queueManagementChanges.size());
           }
         }
       } catch (YarnException e) {
@@ -232,7 +226,7 @@ public class QueueManagementDynamicEditPolicy implements SchedulingEditPolicy {
             "Skipping queue management updates for parent queue "
                 + parentQueue
                 .getQueuePath() + " "
-                + "since configuration for  auto creating queue's beyond "
+                + "since configuration for auto creating queues beyond "
                 + "parent's "
                 + "guaranteed capacity is disabled");
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/344c335a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
index b2301fd..faa6e6f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
     .queuemanagement;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
+    .QueueManagementDynamicEditPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler
     .SchedulerDynamicEditException;
@@ -81,8 +83,8 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
   private CapacitySchedulerContext scheduler;
   private ManagedParentQueue managedParentQueue;
 
-  private static final Log LOG = LogFactory.getLog(
-      GuaranteedOrZeroCapacityOverTimePolicy.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(GuaranteedOrZeroCapacityOverTimePolicy.class);
 
   private ReentrantReadWriteLock.WriteLock writeLock;
 
@@ -380,6 +382,17 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
             deactivateLeafQueuesIfInActive(managedParentQueue, nodeLabel,
                 leafQueueEntitlements);
 
+        if (LOG.isDebugEnabled()) {
+          if ( deactivatedLeafQueues.size() > 0) {
+              LOG.debug("Parent queue = {},  " +
+                   ", nodeLabel = {}, deactivated leaf queues = [{}] ",
+                  managedParentQueue.getQueueName(), nodeLabel,
+                  deactivatedLeafQueues.size() > 25 ? deactivatedLeafQueues
+                      .size() : deactivatedLeafQueues);
+
+          }
+        }
+
         float deactivatedCapacity = getTotalDeactivatedCapacity(
             deactivatedLeafQueues, nodeLabel);
 
@@ -392,7 +405,7 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
                 + deactivatedCapacity + EPSILON;
 
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Parent queue : " + managedParentQueue.getQueueName()
+          LOG.debug("Parent queue = " + managedParentQueue.getQueueName()
               + ", nodeLabel = " + nodeLabel + ", absCapacity = "
               + parentAbsoluteCapacity + ", leafQueueAbsoluteCapacity = "
               + leafQueueTemplateAbsoluteCapacity + ", deactivatedCapacity = "
@@ -409,7 +422,8 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
                 pendingApps.size());
 
             if (LOG.isDebugEnabled()) {
-              LOG.debug("Found " + maxLeafQueuesTobeActivated + " leaf queues"
+              LOG.debug("Parent queue = " + managedParentQueue.getQueueName()
+                  +  " : Found " + maxLeafQueuesTobeActivated + " leaf queues"
                   + " to be activated with " + pendingApps.size() + " apps ");
             }
 
@@ -424,8 +438,9 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
 
             if (LOG.isDebugEnabled()) {
               if (leafQueuesToBeActivated.size() > 0) {
-                LOG.debug("Activated leaf queues : [" + leafQueuesToBeActivated
-                    + "]");
+                LOG.debug("Activated leaf queues : [{}]",
+                    leafQueuesToBeActivated.size() < 25 ?
+                    leafQueuesToBeActivated : leafQueuesToBeActivated.size());
               }
             }
           }
@@ -492,8 +507,9 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
         String partition = e.getKey();
         if (!newPartitions.contains(partition)) {
           itr.remove();
-          LOG.info(
-              "Removed partition " + partition + " from leaf queue " + "state");
+          LOG.info(managedParentQueue.getQueueName()  +
+              " : Removed partition " + partition + " from leaf queue " +
+              "state");
         } else{
           Map<String, LeafQueueStatePerPartition> queues = e.getValue();
           for (
@@ -502,7 +518,9 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
             String queue = queueItr.next().getKey();
             if (!newQueues.contains(queue)) {
               queueItr.remove();
-              LOG.info("Removed queue " + queue + " from leaf queue "
+              LOG.info(managedParentQueue.getQueueName() + " : Removed queue"
+                  + queue + " from "
+                  + "leaf queue "
                   + "state from partition " + partition);
             }
           }
@@ -582,12 +600,6 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
           updateToZeroCapacity(capacities, nodeLabel);
           deactivatedQueues.put(leafQueue.getQueueName(),
               leafQueueTemplateCapacities);
-        } else{
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(" Leaf queue has pending applications or is " + "inactive"
-                + " : " + leafQueue.getNumApplications()
-                + ".Skipping deactivation for " + leafQueue);
-          }
         }
       } else{
         LOG.warn("Could not find queue in scheduler while trying" + " to "
@@ -659,7 +671,7 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
             if (isActive(leafQueue, nodeLabel)) {
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Queue is already active." + " Skipping activation : "
-                    + queue.getQueuePath());
+                    + leafQueue.getQueueName());
               }
             } else{
               activate(leafQueue, nodeLabel);
@@ -668,7 +680,7 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
             if (!isActive(leafQueue, nodeLabel)) {
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Queue is already de-activated. Skipping "
-                    + "de-activation : " + leafQueue.getQueuePath());
+                    + "de-activation : " + leafQueue.getQueueName());
               }
             } else{
               deactivate(leafQueue, nodeLabel);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/25] hadoop git commit: YARN-4946. RM should not consider an application as COMPLETED when log aggregation is not in a terminal state (snemeth via rkanter)

Posted by su...@apache.org.
YARN-4946. RM should not consider an application as COMPLETED when log aggregation is not in a terminal state (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2517dd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2517dd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2517dd6

Branch: refs/heads/HDFS-12943
Commit: b2517dd66b3c88fdd478411cf208921bd3023755
Parents: 8244abb
Author: Robert Kanter <rk...@apache.org>
Authored: Thu Aug 9 14:58:04 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Thu Aug 9 14:58:04 2018 -0700

----------------------------------------------------------------------
 .../server/resourcemanager/RMAppManager.java    |  81 +++++--
 .../server/resourcemanager/rmapp/RMApp.java     |   6 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |   8 +-
 .../server/resourcemanager/TestAppManager.java  | 241 +++++++++++++++----
 .../applicationsmanager/MockAsm.java            |  11 +
 .../server/resourcemanager/rmapp/MockRMApp.java |  20 ++
 6 files changed, 294 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2517dd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7011aaa..ee78c08 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -86,7 +86,7 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
   private int maxCompletedAppsInMemory;
   private int maxCompletedAppsInStateStore;
   protected int completedAppsInStateStore = 0;
-  private LinkedList<ApplicationId> completedApps = new LinkedList<ApplicationId>();
+  protected LinkedList<ApplicationId> completedApps = new LinkedList<>();
 
   private final RMContext rmContext;
   private final ApplicationMasterService masterService;
@@ -284,31 +284,72 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
    * check to see if hit the limit for max # completed apps kept
    */
   protected synchronized void checkAppNumCompletedLimit() {
-    // check apps kept in state store.
-    while (completedAppsInStateStore > this.maxCompletedAppsInStateStore) {
-      ApplicationId removeId =
-          completedApps.get(completedApps.size() - completedAppsInStateStore);
+    if (completedAppsInStateStore > maxCompletedAppsInStateStore) {
+      removeCompletedAppsFromStateStore();
+    }
+
+    if (completedApps.size() > maxCompletedAppsInMemory) {
+      removeCompletedAppsFromMemory();
+    }
+  }
+
+  private void removeCompletedAppsFromStateStore() {
+    int numDelete = completedAppsInStateStore - maxCompletedAppsInStateStore;
+    for (int i = 0; i < numDelete; i++) {
+      ApplicationId removeId = completedApps.get(i);
       RMApp removeApp = rmContext.getRMApps().get(removeId);
-      LOG.info("Max number of completed apps kept in state store met:"
-          + " maxCompletedAppsInStateStore = " + maxCompletedAppsInStateStore
-          + ", removing app " + removeApp.getApplicationId()
-          + " from state store.");
-      rmContext.getStateStore().removeApplication(removeApp);
-      completedAppsInStateStore--;
+      boolean deleteApp = shouldDeleteApp(removeApp);
+
+      if (deleteApp) {
+        LOG.info("Max number of completed apps kept in state store met:"
+            + " maxCompletedAppsInStateStore = "
+            + maxCompletedAppsInStateStore + ", removing app " + removeId
+            + " from state store.");
+        rmContext.getStateStore().removeApplication(removeApp);
+        completedAppsInStateStore--;
+      } else {
+        LOG.info("Max number of completed apps kept in state store met:"
+            + " maxCompletedAppsInStateStore = "
+            + maxCompletedAppsInStateStore + ", but not removing app "
+            + removeId
+            + " from state store as log aggregation have not finished yet.");
+      }
     }
+  }
 
-    // check apps kept in memorty.
-    while (completedApps.size() > this.maxCompletedAppsInMemory) {
-      ApplicationId removeId = completedApps.remove();
-      LOG.info("Application should be expired, max number of completed apps"
-          + " kept in memory met: maxCompletedAppsInMemory = "
-          + this.maxCompletedAppsInMemory + ", removing app " + removeId
-          + " from memory: ");
-      rmContext.getRMApps().remove(removeId);
-      this.applicationACLsManager.removeApplication(removeId);
+  private void removeCompletedAppsFromMemory() {
+    int numDelete = completedApps.size() - maxCompletedAppsInMemory;
+    int offset = 0;
+    for (int i = 0; i < numDelete; i++) {
+      int deletionIdx = i - offset;
+      ApplicationId removeId = completedApps.get(deletionIdx);
+      RMApp removeApp = rmContext.getRMApps().get(removeId);
+      boolean deleteApp = shouldDeleteApp(removeApp);
+
+      if (deleteApp) {
+        ++offset;
+        LOG.info("Application should be expired, max number of completed apps"
+                + " kept in memory met: maxCompletedAppsInMemory = "
+                + this.maxCompletedAppsInMemory + ", removing app " + removeId
+                + " from memory: ");
+        completedApps.remove(deletionIdx);
+        rmContext.getRMApps().remove(removeId);
+        this.applicationACLsManager.removeApplication(removeId);
+      } else {
+        LOG.info("Application should be expired, max number of completed apps"
+                + " kept in memory met: maxCompletedAppsInMemory = "
+                + this.maxCompletedAppsInMemory + ", but not removing app "
+                + removeId
+                + " from memory as log aggregation have not finished yet.");
+      }
     }
   }
 
+  private boolean shouldDeleteApp(RMApp app) {
+    return !app.isLogAggregationEnabled()
+            || app.isLogAggregationFinished();
+  }
+
   @SuppressWarnings("unchecked")
   protected void submitApplication(
       ApplicationSubmissionContext submissionContext, long submitTime,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2517dd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 99cce87..535888c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -242,7 +242,11 @@ public interface RMApp extends EventHandler<RMAppEvent> {
    * @return the number of max attempts of the application.
    */
   int getMaxAppAttempts();
-  
+
+  boolean isLogAggregationEnabled();
+
+  boolean isLogAggregationFinished();
+
   /**
    * Returns the application type
    * @return the application type.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2517dd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 9f1ea44..42e2bcf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -1912,7 +1912,13 @@ public class RMAppImpl implements RMApp, Recoverable {
     }
   }
 
-  private boolean isLogAggregationFinished() {
+  @Override
+  public boolean isLogAggregationEnabled() {
+    return logAggregationEnabled;
+  }
+
+  @Override
+  public boolean isLogAggregationFinished() {
     return this.logAggregationStatusForAppReport
       .equals(LogAggregationStatus.SUCCEEDED)
         || this.logAggregationStatusForAppReport

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2517dd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index 6a6f9cf..27e87bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -19,28 +19,9 @@
 package org.apache.hadoop.yarn.server.resourcemanager;
 
 
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Matchers.isA;
-import static org.mockito.Matchers.matches;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -88,28 +69,48 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptI
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
-    .CapacityScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
-    .CapacitySchedulerConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
-    .ManagedParentQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ManagedParentQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.ArgumentCaptor;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+import static java.util.stream.Collectors.toSet;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Matchers.matches;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 /**
  * Testing applications being retired from RM.
@@ -131,7 +132,7 @@ public class TestAppManager{
   } 
 
 
-  public static List<RMApp> newRMApps(int n, long time, RMAppState state) {
+  private static List<RMApp> newRMApps(int n, long time, RMAppState state) {
     List<RMApp> list = Lists.newArrayList();
     for (int i = 0; i < n; ++i) {
       list.add(new MockRMApp(i, time, state));
@@ -139,23 +140,63 @@ public class TestAppManager{
     return list;
   }
 
+  private static List<RMApp> newRMAppsMixedLogAggregationStatus(int n,
+      long time, RMAppState state) {
+    List<RMApp> list = Lists.newArrayList();
+    for (int i = 0; i < n; ++i) {
+      MockRMApp rmApp = new MockRMApp(i, time, state);
+      rmApp.setLogAggregationEnabled(true);
+      rmApp.setLogAggregationFinished(i % 2 == 0);
+      list.add(rmApp);
+    }
+    return list;
+  }
+
   public RMContext mockRMContext(int n, long time) {
+    final ConcurrentMap<ApplicationId, RMApp> map = createRMAppsMap(n, time);
+    return createMockRMContextInternal(map);
+  }
+
+  public RMContext mockRMContextWithMixedLogAggregationStatus(int n,
+      long time) {
+    final ConcurrentMap<ApplicationId, RMApp> map =
+        createRMAppsMapMixedLogAggStatus(n, time);
+    return createMockRMContextInternal(map);
+  }
+
+  private ConcurrentMap<ApplicationId, RMApp> createRMAppsMap(int n,
+      long time) {
     final List<RMApp> apps = newRMApps(n, time, RMAppState.FINISHED);
     final ConcurrentMap<ApplicationId, RMApp> map = Maps.newConcurrentMap();
     for (RMApp app : apps) {
       map.put(app.getApplicationId(), app);
     }
+    return map;
+  }
+
+  private ConcurrentMap<ApplicationId, RMApp> createRMAppsMapMixedLogAggStatus(
+      int n, long time) {
+    final List<RMApp> apps =
+        newRMAppsMixedLogAggregationStatus(n, time, RMAppState.FINISHED);
+    final ConcurrentMap<ApplicationId, RMApp> map = Maps.newConcurrentMap();
+    for (RMApp app : apps) {
+      map.put(app.getApplicationId(), app);
+    }
+    return map;
+  }
+
+  private RMContext createMockRMContextInternal(ConcurrentMap<ApplicationId, RMApp> map) {
     Dispatcher rmDispatcher = new AsyncDispatcher();
     ContainerAllocationExpirer containerAllocationExpirer = new ContainerAllocationExpirer(
-        rmDispatcher);
+            rmDispatcher);
     AMLivelinessMonitor amLivelinessMonitor = new AMLivelinessMonitor(
-        rmDispatcher);
+            rmDispatcher);
     AMLivelinessMonitor amFinishingMonitor = new AMLivelinessMonitor(
-        rmDispatcher);
+            rmDispatcher);
     RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
     RMContext context = new RMContextImpl(rmDispatcher,
-        containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor,
-        null, null, null, null, null) {
+            containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor,
+            null, null, null, null, null) {
       @Override
       public ConcurrentMap<ApplicationId, RMApp> getRMApps() {
         return map;
@@ -198,9 +239,11 @@ public class TestAppManager{
 
   // Extend and make the functions we want to test public
   public class TestRMAppManager extends RMAppManager {
+    private final RMStateStore stateStore;
 
     public TestRMAppManager(RMContext context, Configuration conf) {
       super(context, null, null, new ApplicationACLsManager(conf), conf);
+      this.stateStore = context.getStateStore();
     }
 
     public TestRMAppManager(RMContext context,
@@ -208,6 +251,7 @@ public class TestAppManager{
         YarnScheduler scheduler, ApplicationMasterService masterService,
         ApplicationACLsManager applicationACLsManager, Configuration conf) {
       super(context, scheduler, masterService, applicationACLsManager, conf);
+      this.stateStore = context.getStateStore();
     }
 
     public void checkAppNumCompletedLimit() {
@@ -222,10 +266,32 @@ public class TestAppManager{
       return super.getCompletedAppsListSize();
     }
 
-    public int getCompletedAppsInStateStore() {
+    public int getNumberOfCompletedAppsInStateStore() {
       return this.completedAppsInStateStore;
     }
 
+    List<ApplicationId> getCompletedApps() {
+      return completedApps;
+    }
+
+    Set<ApplicationId> getFirstNCompletedApps(int n) {
+      return getCompletedApps().stream().limit(n).collect(toSet());
+    }
+
+    Set<ApplicationId> getCompletedAppsWithEvenIdsInRange(int n) {
+      return getCompletedApps().stream().limit(n)
+          .filter(app -> app.getId() % 2 == 0).collect(toSet());
+    }
+
+    Set<ApplicationId> getRemovedAppsFromStateStore(int numRemoves) {
+      ArgumentCaptor<RMApp> argumentCaptor =
+          ArgumentCaptor.forClass(RMApp.class);
+      verify(stateStore, times(numRemoves))
+          .removeApplication(argumentCaptor.capture());
+      return argumentCaptor.getAllValues().stream().map(RMApp::getApplicationId)
+          .collect(toSet());
+    }
+
     public void submitApplication(
         ApplicationSubmissionContext submissionContext, String user)
             throws YarnException, IOException {
@@ -234,10 +300,14 @@ public class TestAppManager{
     }
   }
 
-  protected void addToCompletedApps(TestRMAppManager appMonitor, RMContext rmContext) {
-    for (RMApp app : rmContext.getRMApps().values()) {
+  private void addToCompletedApps(TestRMAppManager appMonitor,
+          RMContext rmContext) {
+    // ensure applications are finished in order by their IDs
+    List<RMApp> sortedApps = new ArrayList<>(rmContext.getRMApps().values());
+    sortedApps.sort(Comparator.comparingInt(o -> o.getApplicationId().getId()));
+    for (RMApp app : sortedApps) {
       if (app.getState() == RMAppState.FINISHED
-          || app.getState() == RMAppState.KILLED 
+          || app.getState() == RMAppState.KILLED
           || app.getState() == RMAppState.FAILED) {
         appMonitor.finishApplication(app.getApplicationId());
       }
@@ -631,7 +701,8 @@ public class TestAppManager{
   @Test
   public void testStateStoreAppLimitLessThanMemoryAppLimit() {
     long now = System.currentTimeMillis();
-    RMContext rmContext = mockRMContext(10, now - 20000);
+    final int allApps = 10;
+    RMContext rmContext = mockRMContext(allApps, now - 20000);
     Configuration conf = new YarnConfiguration();
     int maxAppsInMemory = 8;
     int maxAppsInStateStore = 4;
@@ -641,39 +712,57 @@ public class TestAppManager{
     TestRMAppManager appMonitor = new TestRMAppManager(rmContext, conf);
 
     addToCompletedApps(appMonitor, rmContext);
-    Assert.assertEquals("Number of completed apps incorrect", 10,
+    Assert.assertEquals("Number of completed apps incorrect", allApps,
         appMonitor.getCompletedAppsListSize());
+
+    int numRemoveAppsFromStateStore = allApps - maxAppsInStateStore;
+    Set<ApplicationId> appsShouldBeRemovedFromStateStore = appMonitor
+            .getFirstNCompletedApps(numRemoveAppsFromStateStore);
     appMonitor.checkAppNumCompletedLimit();
 
+    Set<ApplicationId> removedAppsFromStateStore = appMonitor
+            .getRemovedAppsFromStateStore(numRemoveAppsFromStateStore);
+
     Assert.assertEquals("Number of apps incorrect after # completed check",
       maxAppsInMemory, rmContext.getRMApps().size());
     Assert.assertEquals("Number of completed apps incorrect after check",
       maxAppsInMemory, appMonitor.getCompletedAppsListSize());
 
-    int numRemoveAppsFromStateStore = 10 - maxAppsInStateStore;
     verify(rmContext.getStateStore(), times(numRemoveAppsFromStateStore))
       .removeApplication(isA(RMApp.class));
     Assert.assertEquals(maxAppsInStateStore,
-      appMonitor.getCompletedAppsInStateStore());
+      appMonitor.getNumberOfCompletedAppsInStateStore());
+
+    List<ApplicationId> completedApps = appMonitor.getCompletedApps();
+    Assert.assertEquals(maxAppsInMemory, completedApps.size());
+    Assert.assertEquals(numRemoveAppsFromStateStore,
+        removedAppsFromStateStore.size());
+    Assert.assertEquals(numRemoveAppsFromStateStore,
+        Sets.intersection(appsShouldBeRemovedFromStateStore,
+            removedAppsFromStateStore).size());
   }
 
   @Test
-  public void testStateStoreAppLimitLargerThanMemoryAppLimit() {
+  public void testStateStoreAppLimitGreaterThanMemoryAppLimit() {
     long now = System.currentTimeMillis();
-    RMContext rmContext = mockRMContext(10, now - 20000);
+    final int allApps = 10;
+    RMContext rmContext = mockRMContext(allApps, now - 20000);
     Configuration conf = new YarnConfiguration();
     int maxAppsInMemory = 8;
     conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, maxAppsInMemory);
-    // larger than maxCompletedAppsInMemory, reset to RM_MAX_COMPLETED_APPLICATIONS.
+    // greater than maxCompletedAppsInMemory, reset to RM_MAX_COMPLETED_APPLICATIONS.
     conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS, 1000);
     TestRMAppManager appMonitor = new TestRMAppManager(rmContext, conf);
 
     addToCompletedApps(appMonitor, rmContext);
-    Assert.assertEquals("Number of completed apps incorrect", 10,
+    Assert.assertEquals("Number of completed apps incorrect", allApps,
         appMonitor.getCompletedAppsListSize());
+
+    int numRemoveApps = allApps - maxAppsInMemory;
+    Set<ApplicationId> appsShouldBeRemoved = appMonitor
+            .getFirstNCompletedApps(numRemoveApps);
     appMonitor.checkAppNumCompletedLimit();
 
-    int numRemoveApps = 10 - maxAppsInMemory;
     Assert.assertEquals("Number of apps incorrect after # completed check",
       maxAppsInMemory, rmContext.getRMApps().size());
     Assert.assertEquals("Number of completed apps incorrect after check",
@@ -681,7 +770,57 @@ public class TestAppManager{
     verify(rmContext.getStateStore(), times(numRemoveApps)).removeApplication(
       isA(RMApp.class));
     Assert.assertEquals(maxAppsInMemory,
-      appMonitor.getCompletedAppsInStateStore());
+      appMonitor.getNumberOfCompletedAppsInStateStore());
+
+    List<ApplicationId> completedApps = appMonitor.getCompletedApps();
+    Assert.assertEquals(maxAppsInMemory, completedApps.size());
+    Assert.assertEquals(numRemoveApps, appsShouldBeRemoved.size());
+    assertTrue(Collections.disjoint(completedApps, appsShouldBeRemoved));
+  }
+
+  @Test
+  public void testStateStoreAppLimitSomeAppsHaveNotFinishedLogAggregation() {
+    long now = System.currentTimeMillis();
+    final int allApps = 10;
+    RMContext rmContext =
+        mockRMContextWithMixedLogAggregationStatus(allApps, now - 20000);
+    Configuration conf = new YarnConfiguration();
+    int maxAppsInMemory = 2;
+    conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,
+        maxAppsInMemory);
+    // greater than maxCompletedAppsInMemory, reset to
+    // RM_MAX_COMPLETED_APPLICATIONS.
+    conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,
+        1000);
+    TestRMAppManager appMonitor = new TestRMAppManager(rmContext, conf);
+
+    addToCompletedApps(appMonitor, rmContext);
+    Assert.assertEquals("Number of completed apps incorrect", allApps,
+            appMonitor.getCompletedAppsListSize());
+
+    int numRemoveApps = allApps - maxAppsInMemory;
+    int effectiveNumRemoveApps = numRemoveApps / 2;
+    //only apps with even ID would be deleted due to log aggregation status
+    int expectedNumberOfAppsInMemory = maxAppsInMemory + effectiveNumRemoveApps;
+
+    Set<ApplicationId> appsShouldBeRemoved = appMonitor
+            .getCompletedAppsWithEvenIdsInRange(numRemoveApps);
+    appMonitor.checkAppNumCompletedLimit();
+
+    Assert.assertEquals("Number of apps incorrect after # completed check",
+        expectedNumberOfAppsInMemory, rmContext.getRMApps().size());
+    Assert.assertEquals("Number of completed apps incorrect after check",
+        expectedNumberOfAppsInMemory, appMonitor.getCompletedAppsListSize());
+    verify(rmContext.getStateStore(), times(effectiveNumRemoveApps))
+        .removeApplication(isA(RMApp.class));
+    Assert.assertEquals(expectedNumberOfAppsInMemory,
+        appMonitor.getNumberOfCompletedAppsInStateStore());
+
+    List<ApplicationId> completedApps = appMonitor.getCompletedApps();
+
+    Assert.assertEquals(expectedNumberOfAppsInMemory, completedApps.size());
+    Assert.assertEquals(effectiveNumRemoveApps, appsShouldBeRemoved.size());
+    assertTrue(Collections.disjoint(completedApps, appsShouldBeRemoved));
   }
 
   protected void setupDispatcher(RMContext rmContext, Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2517dd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 6c6c4b4..342dab8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -146,6 +146,17 @@ public abstract class MockAsm extends MockApps {
     public int getMaxAppAttempts() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
+
+    @Override
+    public boolean isLogAggregationEnabled() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
+
+    @Override
+    public boolean isLogAggregationFinished() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
+
     @Override
     public ApplicationReport createAndGetApplicationReport(
         String clientUserName,boolean allowAccess) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2517dd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index ad29d27..32ece34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -70,6 +70,8 @@ public class MockRMApp implements RMApp {
   int maxAppAttempts = 1;
   List<ResourceRequest> amReqs;
   private Set<String> applicationTags = null;
+  private boolean logAggregationEnabled;
+  private boolean logAggregationFinished;
 
   public MockRMApp(int newid, long time, RMAppState newState) {
     finish = time;
@@ -236,6 +238,24 @@ public class MockRMApp implements RMApp {
     return maxAppAttempts;
   }
 
+  @Override
+  public boolean isLogAggregationEnabled() {
+    return logAggregationEnabled;
+  }
+
+  @Override
+  public boolean isLogAggregationFinished() {
+    return logAggregationFinished;
+  }
+
+  public void setLogAggregationEnabled(boolean enabled) {
+    this.logAggregationEnabled = enabled;
+  }
+
+  public void setLogAggregationFinished(boolean finished) {
+    this.logAggregationFinished = finished;
+  }
+
   public void setNumMaxRetries(int maxAppAttempts) {
     this.maxAppAttempts = maxAppAttempts;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/25] hadoop git commit: HDFS-13735. Make QJM HTTP URL connection timeout configurable. Contributed by Chao Sun.

Posted by su...@apache.org.
HDFS-13735. Make QJM HTTP URL connection timeout configurable. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5326a790
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5326a790
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5326a790

Branch: refs/heads/HDFS-12943
Commit: 5326a7906de7c86a236d948012cabf3a9ba82310
Parents: d352f16
Author: Chen Liang <cl...@apache.org>
Authored: Thu Aug 9 10:11:47 2018 -0700
Committer: Chen Liang <cl...@apache.org>
Committed: Thu Aug 9 10:11:47 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java     |  5 +++++
 .../qjournal/client/QuorumJournalManager.java     | 11 +++++++++--
 .../src/main/resources/hdfs-default.xml           | 18 ++++++++++++++++++
 3 files changed, 32 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5326a790/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 4f21ee1..55085eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1033,6 +1034,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_KEY = "dfs.qjournal.get-journal-state.timeout.ms";
   public static final String  DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_KEY = "dfs.qjournal.new-epoch.timeout.ms";
   public static final String  DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY = "dfs.qjournal.write-txns.timeout.ms";
+  public static final String  DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_KEY = "dfs.qjournal.http.open.timeout.ms";
+  public static final String  DFS_QJOURNAL_HTTP_READ_TIMEOUT_KEY = "dfs.qjournal.http.read.timeout.ms";
   public static final int     DFS_QJOURNAL_START_SEGMENT_TIMEOUT_DEFAULT = 20000;
   public static final int     DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_DEFAULT = 120000;
   public static final int     DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_DEFAULT = 120000;
@@ -1041,6 +1044,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT = 120000;
   public static final int     DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT = 120000;
   public static final int     DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT = 20000;
+  public static final int     DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_DEFAULT = URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT;
+  public static final int     DFS_QJOURNAL_HTTP_READ_TIMEOUT_DEFAULT = URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT;
   
   public static final String DFS_MAX_NUM_BLOCKS_TO_LOG_KEY = "dfs.namenode.max-num-blocks-to-log";
   public static final long   DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT = 1000l;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5326a790/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 7a70a3d..4faaa98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -124,8 +124,6 @@ public class QuorumJournalManager implements JournalManager {
     this.nsInfo = nsInfo;
     this.nameServiceId = nameServiceId;
     this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory));
-    this.connectionFactory = URLConnectionFactory
-        .newDefaultURLConnectionFactory(conf);
 
     // Configure timeouts.
     this.startSegmentTimeoutMs = conf.getInt(
@@ -156,6 +154,15 @@ public class QuorumJournalManager implements JournalManager {
             .DFS_QJM_OPERATIONS_TIMEOUT,
         DFSConfigKeys.DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT, TimeUnit
             .MILLISECONDS);
+
+    int connectTimeoutMs = conf.getInt(
+        DFSConfigKeys.DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_KEY,
+        DFSConfigKeys.DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_DEFAULT);
+    int readTimeoutMs = conf.getInt(
+        DFSConfigKeys.DFS_QJOURNAL_HTTP_READ_TIMEOUT_KEY,
+        DFSConfigKeys.DFS_QJOURNAL_HTTP_READ_TIMEOUT_DEFAULT);
+    this.connectionFactory = URLConnectionFactory
+        .newDefaultURLConnectionFactory(connectTimeoutMs, readTimeoutMs, conf);
   }
   
   protected List<AsyncLogger> createLoggers(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5326a790/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index dea79f5..8eaf2a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4583,6 +4583,24 @@
 </property>
 
 <property>
+  <name>dfs.qjournal.http.open.timeout.ms</name>
+  <value>60000</value>
+  <description>
+    Timeout in milliseconds when open a new HTTP connection to remote
+    journals.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.http.read.timeout.ms</name>
+  <value>60000</value>
+  <description>
+    Timeout in milliseconds when reading from a HTTP connection from remote
+    journals.
+  </description>
+</property>
+
+<property>
   <name>dfs.quota.by.storage.type.enabled</name>
   <value>true</value>
   <description>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/25] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

Posted by su...@apache.org.
YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00013d6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00013d6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00013d6e

Branch: refs/heads/HDFS-12943
Commit: 00013d6ef7fdf65fa8a0f6eb56c0aef2f6e19444
Parents: da9a39e
Author: Sunil G <su...@apache.org>
Authored: Thu Aug 9 12:18:32 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Thu Aug 9 12:18:32 2018 +0530

----------------------------------------------------------------------
 LICENSE.txt                                     |   2 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |   8 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java       |   4 +-
 .../webapps/static/dt-1.10.7/css/demo_page.css  | 110 ++++
 .../webapps/static/dt-1.10.7/css/demo_table.css | 538 +++++++++++++++++++
 .../webapps/static/dt-1.10.7/css/jui-dt.css     | 322 +++++++++++
 .../static/dt-1.10.7/images/Sorting icons.psd   | Bin 0 -> 27490 bytes
 .../static/dt-1.10.7/images/back_disabled.jpg   | Bin 0 -> 612 bytes
 .../static/dt-1.10.7/images/back_enabled.jpg    | Bin 0 -> 807 bytes
 .../webapps/static/dt-1.10.7/images/favicon.ico | Bin 0 -> 894 bytes
 .../dt-1.10.7/images/forward_disabled.jpg       | Bin 0 -> 635 bytes
 .../static/dt-1.10.7/images/forward_enabled.jpg | Bin 0 -> 852 bytes
 .../static/dt-1.10.7/images/sort_asc.png        | Bin 0 -> 263 bytes
 .../dt-1.10.7/images/sort_asc_disabled.png      | Bin 0 -> 252 bytes
 .../static/dt-1.10.7/images/sort_both.png       | Bin 0 -> 282 bytes
 .../static/dt-1.10.7/images/sort_desc.png       | Bin 0 -> 260 bytes
 .../dt-1.10.7/images/sort_desc_disabled.png     | Bin 0 -> 251 bytes
 .../dt-1.10.7/js/jquery.dataTables.min.js       | 160 ++++++
 .../webapps/static/dt-1.9.4/css/demo_page.css   | 110 ----
 .../webapps/static/dt-1.9.4/css/demo_table.css  | 538 -------------------
 .../webapps/static/dt-1.9.4/css/jui-dt.css      | 322 -----------
 .../static/dt-1.9.4/images/Sorting icons.psd    | Bin 27490 -> 0 bytes
 .../static/dt-1.9.4/images/back_disabled.jpg    | Bin 612 -> 0 bytes
 .../static/dt-1.9.4/images/back_enabled.jpg     | Bin 807 -> 0 bytes
 .../webapps/static/dt-1.9.4/images/favicon.ico  | Bin 894 -> 0 bytes
 .../static/dt-1.9.4/images/forward_disabled.jpg | Bin 635 -> 0 bytes
 .../static/dt-1.9.4/images/forward_enabled.jpg  | Bin 852 -> 0 bytes
 .../webapps/static/dt-1.9.4/images/sort_asc.png | Bin 263 -> 0 bytes
 .../dt-1.9.4/images/sort_asc_disabled.png       | Bin 252 -> 0 bytes
 .../static/dt-1.9.4/images/sort_both.png        | Bin 282 -> 0 bytes
 .../static/dt-1.9.4/images/sort_desc.png        | Bin 260 -> 0 bytes
 .../dt-1.9.4/images/sort_desc_disabled.png      | Bin 251 -> 0 bytes
 .../static/dt-1.9.4/js/jquery.dataTables.min.js | 157 ------
 33 files changed, 1137 insertions(+), 1134 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/LICENSE.txt
----------------------------------------------------------------------
diff --git a/LICENSE.txt b/LICENSE.txt
index f8de86a..393ed0e 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -553,7 +553,7 @@ For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.js
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.css
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/
 --------------------------------------------------------------------------------
 Copyright (C) 2008-2016, SpryMedia Ltd.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index eddcbaa..685eac9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -237,10 +237,10 @@
             <exclude>src/main/resources/webapps/test/.keep</exclude>
             <exclude>src/main/resources/webapps/proxy/.keep</exclude>
             <exclude>src/main/resources/webapps/node/.keep</exclude>
-            <exclude>src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css</exclude>
-            <exclude>src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css</exclude>
-            <exclude>src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psd</exclude>
-            <exclude>src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js</exclude>
             <exclude>src/main/resources/webapps/static/jt/jquery.jstree.js</exclude>
             <exclude>src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js</exclude>
             <exclude>src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js</exclude>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
index b8e954d..eef33eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -66,10 +66,10 @@ public class JQueryUI extends HtmlBlock {
   @Override
   protected void render(Block html) {
     html.link(root_url("static/jquery/themes-1.9.1/base/jquery-ui.css"))
-        .link(root_url("static/dt-1.9.4/css/jui-dt.css"))
+        .link(root_url("static/dt-1.10.7/css/jui-dt.css"))
         .script(root_url("static/jquery/jquery-3.3.1.min.js"))
         .script(root_url("static/jquery/jquery-ui-1.12.1.custom.min.js"))
-        .script(root_url("static/dt-1.9.4/js/jquery.dataTables.min.js"))
+        .script(root_url("static/dt-1.10.7/js/jquery.dataTables.min.js"))
         .script(root_url("static/yarn.dt.plugins.js"))
         .script(root_url("static/dt-sorting/natural.js"))
         .style("#jsnotice { padding: 0.2em; text-align: center; }",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_page.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_page.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_page.css
new file mode 100644
index 0000000..b60ee7d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_page.css
@@ -0,0 +1,110 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * General page setup
+ */
+#dt_example {
+	font: 80%/1.45em "Lucida Grande", Verdana, Arial, Helvetica, sans-serif;
+	margin: 0;
+	padding: 0;
+	color: #333;
+	background-color: #fff;
+}
+
+
+#dt_example #container {
+	width: 800px;
+	margin: 30px auto;
+	padding: 0;
+}
+
+
+#dt_example #footer {
+	margin: 50px auto 0 auto;
+	padding: 0;
+}
+
+#dt_example #demo {
+	margin: 30px auto 0 auto;
+}
+
+#dt_example .demo_jui {
+	margin: 30px auto 0 auto;
+}
+
+#dt_example .big {
+	font-size: 1.3em;
+	font-weight: bold;
+	line-height: 1.6em;
+	color: #4E6CA3;
+}
+
+#dt_example .spacer {
+	height: 20px;
+	clear: both;
+}
+
+#dt_example .clear {
+	clear: both;
+}
+
+#dt_example pre {
+	padding: 15px;
+	background-color: #F5F5F5;
+	border: 1px solid #CCCCCC;
+}
+
+#dt_example h1 {
+	margin-top: 2em;
+	font-size: 1.3em;
+	font-weight: normal;
+	line-height: 1.6em;
+	color: #4E6CA3;
+	border-bottom: 1px solid #B0BED9;
+	clear: both;
+}
+
+#dt_example h2 {
+	font-size: 1.2em;
+	font-weight: normal;
+	line-height: 1.6em;
+	color: #4E6CA3;
+	clear: both;
+}
+
+#dt_example a {
+	color: #0063DC;
+	text-decoration: none;
+}
+
+#dt_example a:hover {
+	text-decoration: underline;
+}
+
+#dt_example ul {
+	color: #4E6CA3;
+}
+
+.css_right {
+	float: right;
+}
+
+.css_left {
+	float: left;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css
new file mode 100644
index 0000000..37b9203
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css
@@ -0,0 +1,538 @@
+/*
+ *  File:         demo_table.css
+ *  CVS:          $Id$
+ *  Description:  CSS descriptions for DataTables demo pages
+ *  Author:       Allan Jardine
+ *  Created:      Tue May 12 06:47:22 BST 2009
+ *  Modified:     $Date$ by $Author$
+ *  Language:     CSS
+ *  Project:      DataTables
+ *
+ *  Copyright 2009 Allan Jardine. All Rights Reserved.
+ *
+ * ***************************************************************************
+ * DESCRIPTION
+ *
+ * The styles given here are suitable for the demos that are used with the standard DataTables
+ * distribution (see www.datatables.net). You will most likely wish to modify these styles to
+ * meet the layout requirements of your site.
+ *
+ * Common issues:
+ *   'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
+ *     no conflict between the two pagination types. If you want to use full_numbers pagination
+ *     ensure that you either have "example_alt_pagination" as a body class name, or better yet,
+ *     modify that selector.
+ *   Note that the path used for Images is relative. All images are by default located in
+ *     ../images/ - relative to this CSS file.
+ */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables features
+ */
+
+.dataTables_wrapper {
+	position: relative;
+	min-height: 302px;
+	clear: both;
+	_height: 302px;
+	zoom: 1; /* Feeling sorry for IE */
+}
+
+.dataTables_processing {
+	position: absolute;
+	top: 50%;
+	left: 50%;
+	width: 250px;
+	height: 30px;
+	margin-left: -125px;
+	margin-top: -15px;
+	padding: 14px 0 2px 0;
+	border: 1px solid #ddd;
+	text-align: center;
+	color: #999;
+	font-size: 14px;
+	background-color: white;
+}
+
+.dataTables_length {
+	width: 40%;
+	float: left;
+}
+
+.dataTables_filter {
+	width: 50%;
+	float: right;
+	text-align: right;
+}
+
+.dataTables_info {
+	width: 60%;
+	float: left;
+}
+
+.dataTables_paginate {
+	width: 44px;
+	* width: 50px;
+	float: right;
+	text-align: right;
+}
+
+/* Pagination nested */
+.paginate_disabled_previous, .paginate_enabled_previous, .paginate_disabled_next, .paginate_enabled_next {
+	height: 19px;
+	width: 19px;
+	margin-left: 3px;
+	float: left;
+}
+
+.paginate_disabled_previous {
+	background-image: url('../images/back_disabled.jpg');
+}
+
+.paginate_enabled_previous {
+	background-image: url('../images/back_enabled.jpg');
+}
+
+.paginate_disabled_next {
+	background-image: url('../images/forward_disabled.jpg');
+}
+
+.paginate_enabled_next {
+	background-image: url('../images/forward_enabled.jpg');
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables display
+ */
+table.display {
+	margin: 0 auto;
+	clear: both;
+	width: 100%;
+
+	/* Note Firefox 3.5 and before have a bug with border-collapse
+	 * ( https://bugzilla.mozilla.org/show%5Fbug.cgi?id=155955 )
+	 * border-spacing: 0; is one possible option. Conditional-css.com is
+	 * useful for this kind of thing
+	 *
+	 * Further note IE 6/7 has problems when calculating widths with border width.
+	 * It subtracts one px relative to the other browsers from the first column, and
+	 * adds one to the end...
+	 *
+	 * If you want that effect I'd suggest setting a border-top/left on th/td's and
+	 * then filling in the gaps with other borders.
+	 */
+}
+
+table.display thead th {
+	padding: 3px 18px 3px 10px;
+	border-bottom: 1px solid black;
+	font-weight: bold;
+	cursor: pointer;
+	* cursor: hand;
+}
+
+table.display tfoot th {
+	padding: 3px 18px 3px 10px;
+	border-top: 1px solid black;
+	font-weight: bold;
+}
+
+table.display tr.heading2 td {
+	border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+	padding: 3px 10px;
+}
+
+table.display td.center {
+	text-align: center;
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables sorting
+ */
+
+.sorting_asc {
+	background: url('../images/sort_asc.png') no-repeat center right;
+}
+
+.sorting_desc {
+	background: url('../images/sort_desc.png') no-repeat center right;
+}
+
+.sorting {
+	background: url('../images/sort_both.png') no-repeat center right;
+}
+
+.sorting_asc_disabled {
+	background: url('../images/sort_asc_disabled.png') no-repeat center right;
+}
+
+.sorting_desc_disabled {
+	background: url('../images/sort_desc_disabled.png') no-repeat center right;
+}
+
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables row classes
+ */
+table.display tr.odd.gradeA {
+	background-color: #ddffdd;
+}
+
+table.display tr.even.gradeA {
+	background-color: #eeffee;
+}
+
+table.display tr.odd.gradeC {
+	background-color: #ddddff;
+}
+
+table.display tr.even.gradeC {
+	background-color: #eeeeff;
+}
+
+table.display tr.odd.gradeX {
+	background-color: #ffdddd;
+}
+
+table.display tr.even.gradeX {
+	background-color: #ffeeee;
+}
+
+table.display tr.odd.gradeU {
+	background-color: #ddd;
+}
+
+table.display tr.even.gradeU {
+	background-color: #eee;
+}
+
+
+tr.odd {
+	background-color: #E2E4FF;
+}
+
+tr.even {
+	background-color: white;
+}
+
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Misc
+ */
+.dataTables_scroll {
+	clear: both;
+}
+
+.dataTables_scrollBody {
+	*margin-top: -1px;
+}
+
+.top, .bottom {
+	padding: 15px;
+	background-color: #F5F5F5;
+	border: 1px solid #CCCCCC;
+}
+
+.top .dataTables_info {
+	float: none;
+}
+
+.clear {
+	clear: both;
+}
+
+.dataTables_empty {
+	text-align: center;
+}
+
+tfoot input {
+	margin: 0.5em 0;
+	width: 100%;
+	color: #444;
+}
+
+tfoot input.search_init {
+	color: #999;
+}
+
+td.group {
+	background-color: #d1cfd0;
+	border-bottom: 2px solid #A19B9E;
+	border-top: 2px solid #A19B9E;
+}
+
+td.details {
+	background-color: #d1cfd0;
+	border: 2px solid #A19B9E;
+}
+
+
+.example_alt_pagination div.dataTables_info {
+	width: 40%;
+}
+
+.paging_full_numbers {
+	width: 400px;
+	height: 22px;
+	line-height: 22px;
+}
+
+.paging_full_numbers span.paginate_button,
+ 	.paging_full_numbers span.paginate_active {
+	border: 1px solid #aaa;
+	-webkit-border-radius: 5px;
+	-moz-border-radius: 5px;
+	padding: 2px 5px;
+	margin: 0 3px;
+	cursor: pointer;
+	*cursor: hand;
+}
+
+.paging_full_numbers span.paginate_button {
+	background-color: #ddd;
+}
+
+.paging_full_numbers span.paginate_button:hover {
+	background-color: #ccc;
+}
+
+.paging_full_numbers span.paginate_active {
+	background-color: #99B3FF;
+}
+
+table.display tr.even.row_selected td {
+	background-color: #B0BED9;
+}
+
+table.display tr.odd.row_selected td {
+	background-color: #9FAFD1;
+}
+
+
+/*
+ * Sorting classes for columns
+ */
+/* For the standard odd/even */
+tr.odd td.sorting_1 {
+	background-color: #D3D6FF;
+}
+
+tr.odd td.sorting_2 {
+	background-color: #DADCFF;
+}
+
+tr.odd td.sorting_3 {
+	background-color: #E0E2FF;
+}
+
+tr.even td.sorting_1 {
+	background-color: #EAEBFF;
+}
+
+tr.even td.sorting_2 {
+	background-color: #F2F3FF;
+}
+
+tr.even td.sorting_3 {
+	background-color: #F9F9FF;
+}
+
+
+/* For the Conditional-CSS grading rows */
+/*
+ 	Colour calculations (based off the main row colours)
+  Level 1:
+		dd > c4
+		ee > d5
+	Level 2:
+	  dd > d1
+	  ee > e2
+ */
+tr.odd.gradeA td.sorting_1 {
+	background-color: #c4ffc4;
+}
+
+tr.odd.gradeA td.sorting_2 {
+	background-color: #d1ffd1;
+}
+
+tr.odd.gradeA td.sorting_3 {
+	background-color: #d1ffd1;
+}
+
+tr.even.gradeA td.sorting_1 {
+	background-color: #d5ffd5;
+}
+
+tr.even.gradeA td.sorting_2 {
+	background-color: #e2ffe2;
+}
+
+tr.even.gradeA td.sorting_3 {
+	background-color: #e2ffe2;
+}
+
+tr.odd.gradeC td.sorting_1 {
+	background-color: #c4c4ff;
+}
+
+tr.odd.gradeC td.sorting_2 {
+	background-color: #d1d1ff;
+}
+
+tr.odd.gradeC td.sorting_3 {
+	background-color: #d1d1ff;
+}
+
+tr.even.gradeC td.sorting_1 {
+	background-color: #d5d5ff;
+}
+
+tr.even.gradeC td.sorting_2 {
+	background-color: #e2e2ff;
+}
+
+tr.even.gradeC td.sorting_3 {
+	background-color: #e2e2ff;
+}
+
+tr.odd.gradeX td.sorting_1 {
+	background-color: #ffc4c4;
+}
+
+tr.odd.gradeX td.sorting_2 {
+	background-color: #ffd1d1;
+}
+
+tr.odd.gradeX td.sorting_3 {
+	background-color: #ffd1d1;
+}
+
+tr.even.gradeX td.sorting_1 {
+	background-color: #ffd5d5;
+}
+
+tr.even.gradeX td.sorting_2 {
+	background-color: #ffe2e2;
+}
+
+tr.even.gradeX td.sorting_3 {
+	background-color: #ffe2e2;
+}
+
+tr.odd.gradeU td.sorting_1 {
+	background-color: #c4c4c4;
+}
+
+tr.odd.gradeU td.sorting_2 {
+	background-color: #d1d1d1;
+}
+
+tr.odd.gradeU td.sorting_3 {
+	background-color: #d1d1d1;
+}
+
+tr.even.gradeU td.sorting_1 {
+	background-color: #d5d5d5;
+}
+
+tr.even.gradeU td.sorting_2 {
+	background-color: #e2e2e2;
+}
+
+tr.even.gradeU td.sorting_3 {
+	background-color: #e2e2e2;
+}
+
+
+/*
+ * Row highlighting example
+ */
+.ex_highlight #example tbody tr.even:hover, #example tbody tr.even td.highlighted {
+	background-color: #ECFFB3;
+}
+
+.ex_highlight #example tbody tr.odd:hover, #example tbody tr.odd td.highlighted {
+	background-color: #E6FF99;
+}
+
+.ex_highlight_row #example tr.even:hover {
+	background-color: #ECFFB3;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_1 {
+	background-color: #DDFF75;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_2 {
+	background-color: #E7FF9E;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_3 {
+	background-color: #E2FF89;
+}
+
+.ex_highlight_row #example tr.odd:hover {
+	background-color: #E6FF99;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_1 {
+	background-color: #D6FF5C;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_2 {
+	background-color: #E0FF84;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_3 {
+	background-color: #DBFF70;
+}
+
+
+/*
+ * KeyTable
+ */
+table.KeyTable td {
+	border: 3px solid transparent;
+}
+
+table.KeyTable td.focus {
+	border: 3px solid #3366FF;
+}
+
+table.display tr.gradeA {
+	background-color: #eeffee;
+}
+
+table.display tr.gradeC {
+	background-color: #ddddff;
+}
+
+table.display tr.gradeX {
+	background-color: #ffdddd;
+}
+
+table.display tr.gradeU {
+	background-color: #ddd;
+}
+
+div.box {
+	height: 100px;
+	padding: 10px;
+	overflow: auto;
+	border: 1px solid #8080FF;
+	background-color: #E5E5FF;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
new file mode 100644
index 0000000..de8faea
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
@@ -0,0 +1,322 @@
+/*
+ *  File:         demo_table_jui.css
+ *  CVS:          $Id$
+ *  Description:  CSS descriptions for DataTables demo pages
+ *  Author:       Allan Jardine
+ *  Created:      Tue May 12 06:47:22 BST 2009
+ *  Modified:     $Date$ by $Author$
+ *  Language:     CSS
+ *  Project:      DataTables
+ *
+ *  Copyright 2009 Allan Jardine. All Rights Reserved.
+ *
+ * ***************************************************************************
+ * DESCRIPTION
+ *
+ * The styles given here are suitable for the demos that are used with the standard DataTables
+ * distribution (see www.datatables.net). You will most likely wish to modify these styles to
+ * meet the layout requirements of your site.
+ *
+ * Common issues:
+ *   'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
+ *     no conflict between the two pagination types. If you want to use full_numbers pagination
+ *     ensure that you either have "example_alt_pagination" as a body class name, or better yet,
+ *     modify that selector.
+ *   Note that the path used for Images is relative. All images are by default located in
+ *     ../images/ - relative to this CSS file.
+ */
+
+
+/*
+ * jQuery UI specific styling
+ */
+
+.paging_two_button .ui-button {
+	float: left;
+	cursor: pointer;
+	* cursor: hand;
+}
+
+.paging_full_numbers .ui-button {
+	padding: 2px 6px;
+	margin: 0;
+	cursor: pointer;
+	* cursor: hand;
+}
+
+.ui-buttonset .ui-button {
+	margin-right: -0.1em !important;
+}
+
+.paging_full_numbers {
+	width: 350px !important;
+}
+
+.ui-toolbar {
+	padding: 5px;
+}
+
+.dataTables_paginate {
+	width: auto;
+}
+
+.dataTables_info {
+	padding-top: 3px;
+}
+
+table.display thead th {
+	padding: 3px 0px 3px 10px;
+	cursor: pointer;
+	* cursor: hand;
+}
+
+div.dataTables_wrapper .ui-widget-header {
+	font-weight: normal;
+}
+
+
+/*
+ * Sort arrow icon positioning
+ */
+table.display thead th div.DataTables_sort_wrapper {
+	position: relative;
+	padding-right: 20px;
+	padding-right: 20px;
+}
+
+table.display thead th div.DataTables_sort_wrapper span {
+	position: absolute;
+	top: 50%;
+	margin-top: -8px;
+	right: 0;
+}
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ *
+ * Everything below this line is the same as demo_table.css. This file is
+ * required for 'cleanliness' of the markup
+ *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables features
+ */
+
+.dataTables_wrapper {
+	position: relative;
+	min-height: 35px;
+	_height: 35px;
+	clear: both;
+}
+
+.dataTables_processing {
+	position: absolute;
+	top: 0px;
+	left: 50%;
+	width: 250px;
+	margin-left: -125px;
+	border: 1px solid #ddd;
+	text-align: center;
+	color: #999;
+	font-size: 11px;
+	padding: 2px 0;
+}
+
+.dataTables_length {
+	width: 40%;
+	float: left;
+}
+
+.dataTables_filter {
+	width: 50%;
+	float: right;
+	text-align: right;
+}
+
+.dataTables_info {
+	width: 50%;
+	float: left;
+}
+
+.dataTables_paginate {
+	float: right;
+	text-align: right;
+}
+
+/* Pagination nested */
+.paginate_disabled_previous, .paginate_enabled_previous, .paginate_disabled_next, .paginate_enabled_next {
+	height: 19px;
+	width: 19px;
+	margin-left: 3px;
+	float: left;
+}
+
+.paginate_disabled_previous {
+	background-image: url('../images/back_disabled.jpg');
+}
+
+.paginate_enabled_previous {
+	background-image: url('../images/back_enabled.jpg');
+}
+
+.paginate_disabled_next {
+	background-image: url('../images/forward_disabled.jpg');
+}
+
+.paginate_enabled_next {
+	background-image: url('../images/forward_enabled.jpg');
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables display
+ */
+table.display {
+	margin: 0 auto;
+	width: 100%;
+	clear: both;
+	border-collapse: collapse;
+}
+
+table.display tfoot th {
+	padding: 3px 0px 3px 10px;
+	font-weight: bold;
+	font-weight: normal;
+}
+
+table.display tr.heading2 td {
+	border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+	padding: 3px 10px;
+}
+
+table.display td.center {
+	text-align: center;
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables sorting
+ */
+
+.sorting_asc {
+	background: url('../images/sort_asc.jpg') no-repeat center right;
+}
+
+.sorting_desc {
+	background: url('../images/sort_desc.jpg') no-repeat center right;
+}
+
+.sorting {
+	background: url('../images/sort_both.jpg') no-repeat center right;
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Misc
+ */
+.dataTables_scroll {
+	clear: both;
+}
+
+.top, .bottom {
+	padding: 15px;
+	background-color: #F5F5F5;
+	border: 1px solid #CCCCCC;
+}
+
+.top .dataTables_info {
+	float: none;
+}
+
+.clear {
+	clear: both;
+}
+
+.dataTables_empty {
+	text-align: center;
+}
+
+tfoot input {
+	margin: 0.5em 0;
+	width: 100%;
+	color: #444;
+}
+
+tfoot input.search_init {
+	color: #999;
+}
+
+td.group {
+	background-color: #d1cfd0;
+	border-bottom: 2px solid #A19B9E;
+	border-top: 2px solid #A19B9E;
+}
+
+td.details {
+	background-color: #d1cfd0;
+	border: 2px solid #A19B9E;
+}
+
+
+.example_alt_pagination div.dataTables_info {
+	width: 40%;
+}
+
+.paging_full_numbers span.paginate_button,
+ 	.paging_full_numbers span.paginate_active {
+	border: 1px solid #aaa;
+	-webkit-border-radius: 5px;
+	-moz-border-radius: 5px;
+	padding: 2px 5px;
+	margin: 0 3px;
+	cursor: pointer;
+	*cursor: hand;
+}
+
+.paging_full_numbers span.paginate_button {
+	background-color: #ddd;
+}
+
+.paging_full_numbers span.paginate_button:hover {
+	background-color: #ccc;
+}
+
+.paging_full_numbers span.paginate_active {
+	background-color: #99B3FF;
+}
+
+table.display tr.even.row_selected td {
+	background-color: #B0BED9;
+}
+
+table.display tr.odd.row_selected td {
+	background-color: #9FAFD1;
+}
+
+/* Striping */
+tr.odd { background: rgba(255, 255, 255, 0.1); }
+tr.even { background: rgba(0, 0, 255, 0.05); }
+
+
+/*
+ * Sorting classes for columns
+ */
+tr.odd td.sorting_1 { background: rgba(0, 0, 0, 0.03); }
+tr.odd td.sorting_2 { background: rgba(0, 0, 0, 0.02); }
+tr.odd td.sorting_3 { background: rgba(0, 0, 0, 0.02); }
+tr.even td.sorting_1 { background: rgba(0, 0, 0, 0.08); }
+tr.even td.sorting_2 { background: rgba(0, 0, 0, 0.06); }
+tr.even td.sorting_3 { background: rgba(0, 0, 0, 0.06); }
+
+.css_left { position: relative; float: left; }
+.css_right { position: relative; float: right; }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd
new file mode 100644
index 0000000..53b2e06
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_disabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_disabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_disabled.jpg
new file mode 100644
index 0000000..1e73a54
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_disabled.jpg differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_enabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_enabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_enabled.jpg
new file mode 100644
index 0000000..a6d764c
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_enabled.jpg differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/favicon.ico
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/favicon.ico b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/favicon.ico
new file mode 100644
index 0000000..6eeaa2a
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/favicon.ico differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_disabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_disabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_disabled.jpg
new file mode 100644
index 0000000..28a9dc5
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_disabled.jpg differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_enabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_enabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_enabled.jpg
new file mode 100644
index 0000000..598c075
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_enabled.jpg differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc.png
new file mode 100644
index 0000000..a56d0e2
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc_disabled.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc_disabled.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc_disabled.png
new file mode 100644
index 0000000..b7e621e
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc_disabled.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_both.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_both.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_both.png
new file mode 100644
index 0000000..839ac4b
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_both.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc.png
new file mode 100644
index 0000000..90b2951
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc_disabled.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc_disabled.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc_disabled.png
new file mode 100644
index 0000000..2409653
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc_disabled.png differ


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/25] hadoop git commit: YARN-8559. Expose mutable-conf scheduler's configuration in RM /scheduler-conf endpoint. Contributed by Weiwei Yang.

Posted by su...@apache.org.
YARN-8559. Expose mutable-conf scheduler's configuration in RM /scheduler-conf endpoint. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d352f167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d352f167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d352f167

Branch: refs/heads/HDFS-12943
Commit: d352f167ebb865a6486afbbdac8e2a5e97a7bbad
Parents: cd04e95
Author: Weiwei Yang <ww...@apache.org>
Authored: Thu Aug 9 23:46:53 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Thu Aug 9 23:46:53 2018 +0800

----------------------------------------------------------------------
 .../scheduler/MutableConfigurationProvider.java |  7 ++
 .../conf/MutableCSConfigurationProvider.java    |  5 ++
 .../resourcemanager/webapp/RMWebServices.java   | 34 +++++++++
 .../resourcemanager/webapp/dao/ConfInfo.java    | 72 ++++++++++++++++++++
 .../TestRMWebServicesConfigurationMutation.java | 40 +++++++++++
 .../src/site/markdown/ResourceManagerRest.md    | 40 +++++++++++
 6 files changed, 198 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 2b9b25a..6e56f3d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
@@ -59,6 +60,12 @@ public interface MutableConfigurationProvider {
   void confirmPendingMutation(boolean isValid) throws Exception;
 
   /**
+   * Returns scheduler configuration cached in this provider.
+   * @return scheduler configuration.
+   */
+  Configuration getConfiguration();
+
+  /**
    * Closes the configuration provider, releasing any required resources.
    * @throws IOException on failure to close
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 9c3bf9d..51de437 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -126,6 +126,11 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
   }
 
   @Override
+  public Configuration getConfiguration() {
+    return new Configuration(schedConf);
+  }
+
+  @Override
   public ConfigurationMutationACLPolicy getAclMutationPolicy() {
     return aclMutationPolicy;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 7752fa2..0117376 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -188,6 +188,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ConfInfo;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
@@ -2373,6 +2374,39 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   }
 
   @GET
+  @Path(RMWSConsts.SCHEDULER_CONF)
+  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+      MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  public Response getSchedulerConfiguration(@Context HttpServletRequest hsr)
+      throws AuthorizationException {
+    // Only admin user is allowed to read scheduler conf,
+    // in order to avoid leaking sensitive info, such as ACLs
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    initForWritableEndpoints(callerUGI, true);
+
+    ResourceScheduler scheduler = rm.getResourceScheduler();
+    if (scheduler instanceof MutableConfScheduler
+        && ((MutableConfScheduler) scheduler).isConfigurationMutable()) {
+      MutableConfigurationProvider mutableConfigurationProvider =
+          ((MutableConfScheduler) scheduler).getMutableConfProvider();
+      // We load the cached configuration from configuration store,
+      // this should be the conf properties used by the scheduler.
+      Configuration schedulerConf = mutableConfigurationProvider
+          .getConfiguration();
+      return Response.status(Status.OK)
+          .entity(new ConfInfo(schedulerConf))
+          .build();
+    } else {
+      return Response.status(Status.BAD_REQUEST).entity(
+          "This API only supports to retrieve scheduler configuration"
+              + " from a mutable-conf scheduler, underneath scheduler "
+              + scheduler.getClass().getSimpleName()
+              + " is not an instance of MutableConfScheduler")
+          .build();
+    }
+  }
+
+  @GET
   @Path(RMWSConsts.CHECK_USER_ACCESS_TO_QUEUE)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
                 MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ConfInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ConfInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ConfInfo.java
new file mode 100644
index 0000000..bfc450b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ConfInfo.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import org.apache.hadoop.conf.Configuration;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.ArrayList;
+
+@XmlRootElement(name = "configuration")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class ConfInfo {
+
+  protected ArrayList<ConfItem> property = new ArrayList<>();
+
+  public ConfInfo() {
+  } // JAXB needs this
+
+  public ConfInfo(Configuration conf) {
+    conf.forEach(entry ->
+        add(new ConfItem(entry.getKey(), entry.getValue())));
+  }
+
+  public void add(ConfItem confItem) {
+    property.add(confItem);
+  }
+
+  public ArrayList<ConfItem> getItems() {
+    return property;
+  }
+
+  @XmlAccessorType(XmlAccessType.FIELD)
+  public static class ConfItem {
+
+    private String name;
+    private String value;
+
+    public ConfItem() {
+      // JAXB needs this
+    }
+
+    public ConfItem(String name, String value){
+      this.name = name;
+      this.value = value;
+    }
+
+    public String getKey() {
+      return name;
+    }
+
+    public String getValue() {
+      return value;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 99b5648..3e2542c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -39,6 +39,9 @@ import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -56,6 +59,7 @@ import java.util.Map;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
 
 /**
  * Test scheduler configuration mutation via REST API.
@@ -157,8 +161,40 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         .contextPath("jersey-guice-filter").servletPath("/").build());
   }
 
+  private CapacitySchedulerConfiguration getSchedulerConf()
+      throws JSONException {
+    WebResource r = resource();
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster")
+            .queryParam("user.name", userName).path("scheduler-conf")
+            .accept(MediaType.APPLICATION_JSON)
+            .get(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    JSONObject json = response.getEntity(JSONObject.class);
+    JSONArray items = (JSONArray) json.get("property");
+    CapacitySchedulerConfiguration parsedConf =
+        new CapacitySchedulerConfiguration();
+    for (int i=0; i<items.length(); i++) {
+      JSONObject obj = (JSONObject) items.get(i);
+      parsedConf.set(obj.get("name").toString(),
+          obj.get("value").toString());
+    }
+    return parsedConf;
+  }
+
+  @Test
+  public void testGetSchedulerConf() throws Exception {
+    CapacitySchedulerConfiguration orgConf = getSchedulerConf();
+    assertNotNull(orgConf);
+    assertEquals(3, orgConf.getQueues("root").length);
+  }
+
   @Test
   public void testAddNestedQueue() throws Exception {
+    CapacitySchedulerConfiguration orgConf = getSchedulerConf();
+    assertNotNull(orgConf);
+    assertEquals(3, orgConf.getQueues("root").length);
+
     WebResource r = resource();
 
     ClientResponse response;
@@ -198,6 +234,10 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         0.01f);
     assertEquals(75.0f, newCSConf.getNonLabeledQueueCapacity("root.d.d2"),
         0.01f);
+
+    CapacitySchedulerConfiguration newConf = getSchedulerConf();
+    assertNotNull(newConf);
+    assertEquals(4, newConf.getQueues("root").length);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index 24c2319..041af4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -4964,6 +4964,7 @@ Please note that this feature is currently in the alpha stage and is subject to
 
 ### HTTP Operations Supported
 
+      * GET
       * PUT
 
 ### Elements of the *sched-conf* object
@@ -4975,6 +4976,45 @@ Please note that this feature is currently in the alpha stage and is subject to
 | remove-queue | string | Full path name of a queue to remove |
 | global-updates | map | Map of key value pairs to update scheduler's global configuration |
 
+### GET Request Examples
+
+Get requests are used to retrieve the scheduler's configuration that is currently loaded into scheduler's context.
+
+**XML response**
+
+HTTP Request:
+
+      Accept: application/xml
+      Content-Type: application/xml
+      GET http://rm-http-address:port/ws/v1/cluster/scheduler-conf
+
+Response Header:
+
+      TTP/1.1 200 OK
+      Content-Type: application/xml; charset=utf-8
+      Transfer-Encoding: chunked
+
+Response Body:
+
+
+```xml
+      <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+      <configuration>
+        <property>
+          <name>yarn.scheduler.capacity.root.queues</name>
+          <value>default</value>
+        </property>
+        <property>
+          <name>yarn.scheduler.capacity.maximum-applications</name>
+          <value>10000</value>
+        </property>
+        <property>
+          <name>yarn.scheduler.capacity.root.default.capacity</name>
+          <value>100</value>
+        </property>
+      </configuration>
+```
+
 ### PUT Request Examples
 
 Put requests are used to modify the scheduler configuration. A successful mutation results in a 200 response. A malformed request or one which resulted in an invalid scheduler configuration results in a 400 response.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/25] hadoop git commit: YARN-8136. Add version attribute to site doc examples and quickstart. (Eric Yang via wangda)

Posted by su...@apache.org.
YARN-8136. Add version attribute to site doc examples and quickstart. (Eric Yang via wangda)

Change-Id: I4541b239f490ca0a6edf9698e0d3deaf83669151


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8244abb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8244abb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8244abb7

Branch: refs/heads/HDFS-12943
Commit: 8244abb7aeb768b73682b8c9a26516a9cf06bca5
Parents: 344c335
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Aug 9 11:03:46 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Thu Aug 9 11:04:02 2018 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md | 5 ++++-
 .../src/site/markdown/yarn-service/QuickStart.md                | 1 +
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8244abb7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
index 03fec79..73e00b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
@@ -30,6 +30,7 @@ Note this example requires registry DNS.
 ```
 {
   "name": "httpd-service",
+  "version": "1.0",
   "lifetime": "3600",
   "components": [
     {
@@ -169,9 +170,10 @@ Then visit port 8080 for each IP to view the pages.
 Docker images may have built with ENTRYPOINT to enable start up of docker image without any parameters.
 When passing parameters to ENTRYPOINT enabled image, `launch_command` is delimited by comma (,).
 
+```
 {
   "name": "sleeper-service",
-  "version": "1",
+  "version": "1.0",
   "components" :
   [
     {
@@ -198,3 +200,4 @@ When passing parameters to ENTRYPOINT enabled image, `launch_command` is delimit
     }
   ]
 }
+```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8244abb7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md
index e144320..bba9bb8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md
@@ -40,6 +40,7 @@ Below is a simple service definition that launches sleep containers on YARN by w
 ```
 {
   "name": "sleeper-service",
+  "version": "1.0",
   "components" : 
     [
       {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/25] hadoop git commit: HDDS-267. Handle consistency issues during container update/close.

Posted by su...@apache.org.
HDDS-267. Handle consistency issues during container update/close.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d81cd361
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d81cd361
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d81cd361

Branch: refs/heads/HDFS-12943
Commit: d81cd3611a449bcd7970ff2f1392a5e868e28f7e
Parents: 8478732
Author: Hanisha Koneru <ha...@apache.org>
Authored: Wed Aug 8 16:47:25 2018 -0700
Committer: Hanisha Koneru <ha...@apache.org>
Committed: Wed Aug 8 16:47:25 2018 -0700

----------------------------------------------------------------------
 .../container/common/impl/ContainerData.java    |  1 -
 .../container/keyvalue/KeyValueContainer.java   | 54 ++++++-------------
 .../container/keyvalue/KeyValueHandler.java     | 21 ++++++--
 .../keyvalue/TestKeyValueContainer.java         | 16 ------
 .../container/keyvalue/TestKeyValueHandler.java | 55 ++++++++++++++++----
 .../common/impl/TestContainerPersistence.java   |  8 ---
 6 files changed, 80 insertions(+), 75 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81cd361/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 5803628..26954a7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -257,7 +257,6 @@ public abstract class ContainerData {
    * Marks this container as closed.
    */
   public synchronized void closeContainer() {
-    // TODO: closed or closing here
     setState(ContainerLifeCycleState.CLOSED);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81cd361/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 353fe4f..c96f997 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -138,7 +138,7 @@ public class KeyValueContainer implements Container {
 
       // Create .container file
       File containerFile = getContainerFile();
-      writeToContainerFile(containerFile, true);
+      createContainerFile(containerFile);
 
     } catch (StorageContainerException ex) {
       if (containerMetaDataPath != null && containerMetaDataPath.getParentFile()
@@ -165,11 +165,11 @@ public class KeyValueContainer implements Container {
   }
 
   /**
-   * Creates .container file and checksum file.
+   * Writes to .container file.
    *
-   * @param containerFile
-   * @param isCreate true if we are creating a new container file and false if
-   *                we are updating an existing container file.
+   * @param containerFile container file name
+   * @param isCreate True if creating a new file. False is updating an
+   *                 existing container file.
    * @throws StorageContainerException
    */
   private void writeToContainerFile(File containerFile, boolean isCreate)
@@ -181,19 +181,18 @@ public class KeyValueContainer implements Container {
       ContainerDataYaml.createContainerFile(
           ContainerType.KeyValueContainer, containerData, tempContainerFile);
 
+      // NativeIO.renameTo is an atomic function. But it might fail if the
+      // container file already exists. Hence, we handle the two cases
+      // separately.
       if (isCreate) {
-        // When creating a new container, .container file should not exist
-        // already.
         NativeIO.renameTo(tempContainerFile, containerFile);
       } else {
-        // When updating a container, the .container file should exist. If
-        // not, the container is in an inconsistent state.
         Files.move(tempContainerFile.toPath(), containerFile.toPath(),
             StandardCopyOption.REPLACE_EXISTING);
       }
 
     } catch (IOException ex) {
-      throw new StorageContainerException("Error during creation of " +
+      throw new StorageContainerException("Error while creating/ updating " +
           ".container file. ContainerID: " + containerId, ex,
           CONTAINER_FILES_CREATE_ERROR);
     } finally {
@@ -206,27 +205,14 @@ public class KeyValueContainer implements Container {
     }
   }
 
+  private void createContainerFile(File containerFile)
+      throws StorageContainerException {
+    writeToContainerFile(containerFile, true);
+  }
 
   private void updateContainerFile(File containerFile)
       throws StorageContainerException {
-
-    long containerId = containerData.getContainerID();
-
-    if (!containerFile.exists()) {
-      throw new StorageContainerException("Container is an Inconsistent " +
-          "state, missing .container file. ContainerID: " + containerId,
-          INVALID_CONTAINER_STATE);
-    }
-
-    try {
-      writeToContainerFile(containerFile, false);
-    } catch (IOException e) {
-      //TODO : Container update failure is not handled currently. Might
-      // lead to loss of .container file. When Update container feature
-      // support is added, this failure should also be handled.
-      throw new StorageContainerException("Container update failed. " +
-          "ContainerID: " + containerId, CONTAINER_FILES_CREATE_ERROR);
-    }
+    writeToContainerFile(containerFile, false);
   }
 
 
@@ -256,19 +242,15 @@ public class KeyValueContainer implements Container {
     // complete this action
     try {
       writeLock();
-      long containerId = containerData.getContainerID();
-      if(!containerData.isValid()) {
-        LOG.debug("Invalid container data. Container Id: {}", containerId);
-        throw new StorageContainerException("Invalid container data. " +
-            "ContainerID: " + containerId, INVALID_CONTAINER_STATE);
-      }
+
       containerData.closeContainer();
       File containerFile = getContainerFile();
-
       // update the new container data to .container File
       updateContainerFile(containerFile);
 
     } catch (StorageContainerException ex) {
+      // Failed to update .container file. Reset the state to CLOSING
+      containerData.setState(ContainerLifeCycleState.CLOSING);
       throw ex;
     } finally {
       writeUnlock();
@@ -332,8 +314,6 @@ public class KeyValueContainer implements Container {
       // update the new container data to .container File
       updateContainerFile(containerFile);
     } catch (StorageContainerException  ex) {
-      // TODO:
-      // On error, reset the metadata.
       containerData.setMetadata(oldMetadata);
       throw ex;
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81cd361/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index a281a53..f4699dd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerLifeCycleState;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetSmallFileRequestProto;
@@ -77,6 +79,8 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.CLOSED_CONTAINER_RETRY;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.CONTAINER_INTERNAL_ERROR;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.CLOSED_CONTAINER_IO;
@@ -378,8 +382,18 @@ public class KeyValueHandler extends Handler {
       return ContainerUtils.malformedRequest(request);
     }
 
+    long containerID = kvContainer.getContainerData().getContainerID();
+    ContainerLifeCycleState containerState = kvContainer.getContainerState();
+
     try {
-      checkContainerOpen(kvContainer);
+      if (containerState == ContainerLifeCycleState.CLOSED) {
+        throw new StorageContainerException("Container already closed. " +
+            "ContainerID: " + containerID, CLOSED_CONTAINER_RETRY);
+      } else if (containerState == ContainerLifeCycleState.INVALID) {
+        LOG.debug("Invalid container data. ContainerID: {}", containerID);
+        throw new StorageContainerException("Invalid container data. " +
+            "ContainerID: " + containerID, INVALID_CONTAINER_STATE);
+      }
 
       KeyValueContainerData kvData = kvContainer.getContainerData();
 
@@ -773,10 +787,9 @@ public class KeyValueHandler extends Handler {
   private void checkContainerOpen(KeyValueContainer kvContainer)
       throws StorageContainerException {
 
-    ContainerProtos.ContainerLifeCycleState containerState =
-        kvContainer.getContainerState();
+    ContainerLifeCycleState containerState = kvContainer.getContainerState();
 
-    if (containerState == ContainerProtos.ContainerLifeCycleState.OPEN) {
+    if (containerState == ContainerLifeCycleState.OPEN) {
       return;
     } else {
       String msg = "Requested operation not allowed as ContainerState is " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81cd361/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 37c7f8a..6ff2eca 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume
     .RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker;
@@ -243,21 +242,6 @@ public class TestKeyValueContainer {
   }
 
   @Test
-  public void testCloseInvalidContainer() throws Exception {
-    try {
-      keyValueContainerData.setState(ContainerProtos.ContainerLifeCycleState
-          .INVALID);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      keyValueContainer.close();
-      fail("testCloseInvalidContainer failed");
-    } catch (StorageContainerException ex) {
-      assertEquals(ContainerProtos.Result.INVALID_CONTAINER_STATE,
-          ex.getResult());
-      GenericTestUtils.assertExceptionContains("Invalid container data", ex);
-    }
-  }
-
-  @Test
   public void testUpdateContainer() throws IOException {
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
     Map<String, String> metadata = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81cd361/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 747687b..ce12e1f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -25,12 +25,16 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+    .StorageContainerException;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestRule;
@@ -59,8 +63,8 @@ public class TestKeyValueHandler {
   @Rule
   public TestRule timeout = new Timeout(300000);
 
-  private HddsDispatcher dispatcher;
-  private KeyValueHandler handler;
+  private static HddsDispatcher dispatcher;
+  private static KeyValueHandler handler;
 
   private final static String DATANODE_UUID = UUID.randomUUID().toString();
 
@@ -69,14 +73,11 @@ public class TestKeyValueHandler {
 
   private static final long DUMMY_CONTAINER_ID = 9999;
 
-  @Test
-  /**
-   * Test that Handler handles different command types correctly.
-   */
-  public void testHandlerCommandHandling() throws Exception{
+  @BeforeClass
+  public static void setup() throws StorageContainerException {
     // Create mock HddsDispatcher and KeyValueHandler.
-    this.handler = Mockito.mock(KeyValueHandler.class);
-    this.dispatcher = Mockito.mock(HddsDispatcher.class);
+    handler = Mockito.mock(KeyValueHandler.class);
+    dispatcher = Mockito.mock(HddsDispatcher.class);
     Mockito.when(dispatcher.getHandler(any())).thenReturn(handler);
     Mockito.when(dispatcher.dispatch(any())).thenCallRealMethod();
     Mockito.when(dispatcher.getContainer(anyLong())).thenReturn(
@@ -84,6 +85,13 @@ public class TestKeyValueHandler {
     Mockito.when(handler.handle(any(), any())).thenCallRealMethod();
     doCallRealMethod().when(dispatcher).setMetricsForTesting(any());
     dispatcher.setMetricsForTesting(Mockito.mock(ContainerMetrics.class));
+  }
+
+  @Test
+  /**
+   * Test that Handler handles different command types correctly.
+   */
+  public void testHandlerCommandHandling() throws Exception {
 
     // Test Create Container Request handling
     ContainerCommandRequestProto createContainerRequest =
@@ -250,4 +258,33 @@ public class TestKeyValueHandler {
   }
 
 
+  @Test
+  public void testCloseInvalidContainer() {
+    long containerID = 1234L;
+    Configuration conf = new Configuration();
+    KeyValueContainerData kvData = new KeyValueContainerData(containerID, 1);
+    KeyValueContainer container = new KeyValueContainer(kvData, conf);
+    kvData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);
+
+    // Create Close container request
+    ContainerCommandRequestProto closeContainerRequest =
+        ContainerProtos.ContainerCommandRequestProto.newBuilder()
+            .setCmdType(ContainerProtos.Type.CloseContainer)
+            .setContainerID(DUMMY_CONTAINER_ID)
+            .setDatanodeUuid(DATANODE_UUID)
+            .setCloseContainer(ContainerProtos.CloseContainerRequestProto
+                .getDefaultInstance())
+            .build();
+    dispatcher.dispatch(closeContainerRequest);
+
+    Mockito.when(handler.handleCloseContainer(any(), any()))
+        .thenCallRealMethod();
+    // Closing invalid container should return error response.
+    ContainerProtos.ContainerCommandResponseProto response =
+        handler.handleCloseContainer(closeContainerRequest, container);
+
+    Assert.assertTrue("Close container should return Invalid container error",
+        response.getResult().equals(
+            ContainerProtos.Result.INVALID_CONTAINER_STATE));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81cd361/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 5322c8e..016b94c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -775,14 +775,6 @@ public class TestContainerPersistence {
     Assert.assertEquals("bilbo_new_1",
         actualNewData.getMetadata().get("owner"));
 
-    // Update a non-existing container
-    exception.expect(StorageContainerException.class);
-    exception.expectMessage("Container is an Inconsistent " +
-        "state, missing .container file.");
-    Container nonExistentContainer = new KeyValueContainer(
-        new KeyValueContainerData(RandomUtils.nextLong(),
-            ContainerTestHelper.CONTAINER_MAX_SIZE_GB), conf);
-    nonExistentContainer.update(newMetadata, false);
   }
 
   private KeyData writeKeyHelper(BlockID blockID)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/25] hadoop git commit: YARN-8521. NPE in AllocationTagsManager when a container is removed more than once. Contributed by Weiwei Yang.

Posted by su...@apache.org.
YARN-8521. NPE in AllocationTagsManager when a container is removed more than once. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08d50606
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08d50606
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08d50606

Branch: refs/heads/HDFS-12943
Commit: 08d5060605af81a3d6048044176dc656c0dad56c
Parents: f5dbbfe
Author: Weiwei Yang <ww...@apache.org>
Authored: Fri Aug 10 08:32:02 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Fri Aug 10 08:32:02 2018 +0800

----------------------------------------------------------------------
 .../constraint/AllocationTagsManager.java       |  5 ++
 .../constraint/TestAllocationTagsManager.java   | 37 ++++++++++++++
 .../TestPlacementConstraintsUtil.java           | 51 ++++++++++----------
 3 files changed, 68 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08d50606/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index a690767..6f160b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -115,6 +115,11 @@ public class AllocationTagsManager {
 
     private void removeTagFromInnerMap(Map<String, Long> innerMap, String tag) {
       Long count = innerMap.get(tag);
+      if (count == null) {
+        LOG.warn("Trying to remove tags, however the tag " + tag
+            + " no longer exists on this node/rack.");
+        return;
+      }
       if (count > 1) {
         innerMap.put(tag, count - 1);
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08d50606/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
index 3f2aaed..9095ac1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
 
 import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
@@ -38,6 +39,7 @@ import org.junit.Test;
 import org.mockito.Mockito;
 
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
@@ -61,6 +63,41 @@ public class TestAllocationTagsManager {
   }
 
   @Test
+  public void testMultipleAddRemoveContainer() {
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
+
+    NodeId nodeId = NodeId.fromString("host1:123");
+    ContainerId cid1 = TestUtils.getMockContainerId(1, 1);
+    ContainerId cid2 = TestUtils.getMockContainerId(1, 2);
+    ContainerId cid3 = TestUtils.getMockContainerId(1, 3);
+    Set<String> tags1 = ImmutableSet.of("mapper", "reducer");
+    Set<String> tags2 = ImmutableSet.of("mapper");
+    Set<String> tags3 = ImmutableSet.of("zk");
+
+    // node - mapper : 2
+    //      - reduce : 1
+    atm.addContainer(nodeId, cid1, tags1);
+    atm.addContainer(nodeId, cid2, tags2);
+    atm.addContainer(nodeId, cid3, tags3);
+    Assert.assertEquals(2L,
+        (long) atm.getAllocationTagsWithCount(nodeId).get("mapper"));
+    Assert.assertEquals(1L,
+        (long) atm.getAllocationTagsWithCount(nodeId).get("reducer"));
+
+    // remove container1
+    atm.removeContainer(nodeId, cid1, tags1);
+    Assert.assertEquals(1L,
+        (long) atm.getAllocationTagsWithCount(nodeId).get("mapper"));
+    Assert.assertNull(atm.getAllocationTagsWithCount(nodeId).get("reducer"));
+
+    // remove the same container again, the reducer no longer exists,
+    // make sure there is no NPE here
+    atm.removeContainer(nodeId, cid1, tags1);
+    Assert.assertNull(atm.getAllocationTagsWithCount(nodeId).get("mapper"));
+    Assert.assertNull(atm.getAllocationTagsWithCount(nodeId).get("reducer"));
+  }
+
+  @Test
   public void testAllocationTagsManagerSimpleCases()
       throws InvalidAllocationTagsQueryException {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08d50606/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
index dc61981..5dbdc8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
@@ -163,6 +163,11 @@ public class TestPlacementConstraintsUtil {
         ApplicationAttemptId.newInstance(appId, 0), 0);
   }
 
+  private ContainerId newContainerId(ApplicationId appId, int containerId) {
+    return ContainerId.newContainerId(
+        ApplicationAttemptId.newInstance(appId, 0), containerId);
+  }
+
   private SchedulerNode newSchedulerNode(String hostname, String rackName,
       NodeId nodeId) {
     SchedulerNode node = mock(SchedulerNode.class);
@@ -271,12 +276,10 @@ public class TestPlacementConstraintsUtil {
     SchedulerNode schedulerNode3 =newSchedulerNode(n3_r2.getHostName(),
         n3_r2.getRackName(), n3_r2.getNodeID());
 
-    ContainerId ca = ContainerId
-        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    ContainerId ca = newContainerId(appId1, 0);
     tm.addContainer(n0_r1.getNodeID(), ca, ImmutableSet.of("A"));
 
-    ContainerId cb = ContainerId
-        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    ContainerId cb = newContainerId(appId1, 1);
     tm.addContainer(n1_r1.getNodeID(), cb, ImmutableSet.of("B"));
 
     // n0 and n1 has A/B so they cannot satisfy the PC
@@ -297,11 +300,9 @@ public class TestPlacementConstraintsUtil {
      * n2: A(1), B(1)
      * n3:
      */
-    ContainerId ca1 = ContainerId
-        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    ContainerId ca1 = newContainerId(appId1, 2);
     tm.addContainer(n2_r2.getNodeID(), ca1, ImmutableSet.of("A"));
-    ContainerId cb1 = ContainerId
-        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    ContainerId cb1 = newContainerId(appId1, 3);
     tm.addContainer(n2_r2.getNodeID(), cb1, ImmutableSet.of("B"));
 
     // Only n2 has both A and B so only it can satisfy the PC
@@ -468,9 +469,9 @@ public class TestPlacementConstraintsUtil {
      *  n3: ""
      */
     tm.addContainer(n0r1.getNodeID(),
-        newContainerId(appId1), ImmutableSet.of("hbase-m"));
+        newContainerId(appId1, 1), ImmutableSet.of("hbase-m"));
     tm.addContainer(n2r2.getNodeID(),
-        newContainerId(appId1), ImmutableSet.of("hbase-rs"));
+        newContainerId(appId1, 2), ImmutableSet.of("hbase-rs"));
     Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n0r1.getNodeID())
         .get("hbase-m").longValue());
     Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n2r2.getNodeID())
@@ -504,7 +505,7 @@ public class TestPlacementConstraintsUtil {
      *  n3: hbase-rs(1)
      */
     tm.addContainer(n3r2.getNodeID(),
-        newContainerId(appId1), ImmutableSet.of("hbase-rs"));
+        newContainerId(appId1, 2), ImmutableSet.of("hbase-rs"));
     // n3 is qualified now because it is allocated with hbase-rs tag
     Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
         createSchedulingRequest(sourceTag1), schedulerNode3, pcm, tm));
@@ -518,7 +519,7 @@ public class TestPlacementConstraintsUtil {
      */
     // Place
     tm.addContainer(n2r2.getNodeID(),
-        newContainerId(appId1), ImmutableSet.of("spark"));
+        newContainerId(appId1, 3), ImmutableSet.of("spark"));
     // According to constraint, "zk" is allowed to be placed on a node
     // has "hbase-m" tag OR a node has both "hbase-rs" and "spark" tags.
     Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
@@ -552,9 +553,9 @@ public class TestPlacementConstraintsUtil {
      *  n3: ""
      */
     tm.addContainer(n0r1.getNodeID(),
-        newContainerId(appId1), ImmutableSet.of("hbase-m"));
+        newContainerId(appId1, 0), ImmutableSet.of("hbase-m"));
     tm.addContainer(n2r2.getNodeID(),
-        newContainerId(appId1), ImmutableSet.of("hbase-m"));
+        newContainerId(appId1, 1), ImmutableSet.of("hbase-m"));
     Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n0r1.getNodeID())
         .get("hbase-m").longValue());
     Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n2r2.getNodeID())
@@ -589,7 +590,7 @@ public class TestPlacementConstraintsUtil {
      */
     for (int i=0; i<4; i++) {
       tm.addContainer(n1r1.getNodeID(),
-          newContainerId(appId1), ImmutableSet.of("spark"));
+          newContainerId(appId1, i+2), ImmutableSet.of("spark"));
     }
     Assert.assertEquals(4L, tm.getAllocationTagsWithCount(n1r1.getNodeID())
         .get("spark").longValue());
@@ -633,19 +634,19 @@ public class TestPlacementConstraintsUtil {
      *  n3: ""
      */
     tm.addContainer(n0r1.getNodeID(),
-        newContainerId(application1), ImmutableSet.of("A"));
+        newContainerId(application1, 0), ImmutableSet.of("A"));
     tm.addContainer(n0r1.getNodeID(),
-        newContainerId(application2), ImmutableSet.of("A"));
+        newContainerId(application2, 1), ImmutableSet.of("A"));
     tm.addContainer(n1r1.getNodeID(),
-        newContainerId(application3), ImmutableSet.of("A"));
+        newContainerId(application3, 2), ImmutableSet.of("A"));
     tm.addContainer(n1r1.getNodeID(),
-        newContainerId(application3), ImmutableSet.of("A"));
+        newContainerId(application3, 3), ImmutableSet.of("A"));
     tm.addContainer(n1r1.getNodeID(),
-        newContainerId(application3), ImmutableSet.of("A"));
+        newContainerId(application3, 4), ImmutableSet.of("A"));
     tm.addContainer(n2r2.getNodeID(),
-        newContainerId(application1), ImmutableSet.of("A"));
+        newContainerId(application1, 5), ImmutableSet.of("A"));
     tm.addContainer(n2r2.getNodeID(),
-        newContainerId(application1), ImmutableSet.of("A"));
+        newContainerId(application1, 6), ImmutableSet.of("A"));
 
     SchedulerNode schedulerNode0 = newSchedulerNode(n0r1.getHostName(),
         n0r1.getRackName(), n0r1.getNodeID());
@@ -888,9 +889,9 @@ public class TestPlacementConstraintsUtil {
      *  n3: ""
      */
     tm.addContainer(n0r1.getNodeID(),
-        newContainerId(application1), ImmutableSet.of("hbase-m"));
+        newContainerId(application1, 0), ImmutableSet.of("hbase-m"));
     tm.addContainer(n2r2.getNodeID(),
-        newContainerId(application1), ImmutableSet.of("hbase-m"));
+        newContainerId(application1, 1), ImmutableSet.of("hbase-m"));
     Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n0r1.getNodeID())
         .get("hbase-m").longValue());
     Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n2r2.getNodeID())
@@ -958,7 +959,7 @@ public class TestPlacementConstraintsUtil {
      *  n3: ""
      */
     tm.addContainer(n0r1.getNodeID(),
-        newContainerId(application3), ImmutableSet.of("hbase-m"));
+        newContainerId(application3, 0), ImmutableSet.of("hbase-m"));
 
     // Anti-affinity to self/hbase-m
     Assert.assertFalse(PlacementConstraintsUtil


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/25] hadoop git commit: HDDS-327. CloseContainer command handler should not throw exception if the container is already closed. Contributed by Shashikant Banerjee.

Posted by su...@apache.org.
HDDS-327. CloseContainer command handler should not throw exception if the container is already closed. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a28624d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a28624d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a28624d2

Branch: refs/heads/HDFS-12943
Commit: a28624d2a42bf6ad35f859b6ba92fd541d72a67d
Parents: 398d895
Author: Hanisha Koneru <ha...@apache.org>
Authored: Fri Aug 10 11:43:22 2018 -0700
Committer: Hanisha Koneru <ha...@apache.org>
Committed: Fri Aug 10 11:43:22 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a28624d2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 8364a77..9ddb474 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -387,8 +387,8 @@ public class KeyValueHandler extends Handler {
 
     try {
       if (containerState == ContainerLifeCycleState.CLOSED) {
-        throw new StorageContainerException("Container already closed. " +
-            "ContainerID: " + containerID, CLOSED_CONTAINER_RETRY);
+        LOG.debug("Container {} is already closed.", containerID);
+        return ContainerUtils.getSuccessResponse(request);
       } else if (containerState == ContainerLifeCycleState.INVALID) {
         LOG.debug("Invalid container data. ContainerID: {}", containerID);
         throw new StorageContainerException("Invalid container data. " +


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/25] hadoop git commit: YARN-8520. Document best practice for user management. Contributed by Eric Yang

Posted by su...@apache.org.
YARN-8520. Document best practice for user management. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7951c69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7951c69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7951c69

Branch: refs/heads/HDFS-12943
Commit: e7951c69cbc85604f72cdd3559122d4e2c1ea127
Parents: a28624d
Author: Shane Kumpf <sk...@apache.org>
Authored: Fri Aug 10 14:32:03 2018 -0600
Committer: Shane Kumpf <sk...@apache.org>
Committed: Fri Aug 10 14:32:03 2018 -0600

----------------------------------------------------------------------
 .../src/site/markdown/DockerContainers.md       | 194 ++++++++++++++++++-
 1 file changed, 193 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7951c69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index d435495..447155c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -296,7 +296,8 @@ owner as the container user.  If the application owner is not a valid user
 in the Docker image, the application will fail. The container user is specified
 by the user's UID. If the user's UID is different between the NodeManager host
 and the Docker image, the container may be launched as the wrong user or may
-fail to launch because the UID does not exist.
+fail to launch because the UID does not exist.  See
+[User Management in Docker Container](#user-management) section for more details.
 
 Second, the Docker image must have whatever is expected by the application
 in order to execute.  In the case of Hadoop (MapReduce or Spark), the Docker
@@ -412,6 +413,197 @@ the environment variable would be set to "/sys/fs/cgroup:/sys/fs/cgroup:ro".
 The destination path is not restricted, "/sys/fs/cgroup:/cgroup:ro" would also
 be valid given the example admin whitelist.
 
+<a href="#user-management"></a>User Management in Docker Container
+-----------------------------------
+
+YARN's Docker container support launches container processes using the uid:gid
+identity of the user, as defined on the NodeManager host. User and group name
+mismatches between the NodeManager host and container can lead to permission
+issues, failed container launches, or even security holes. Centralizing user and
+group management for both hosts and containers greatly reduces these risks. When
+running containerized applications on YARN, it is necessary to understand which
+uid:gid pair will be used to launch the container's process.
+
+As an example of what is meant by uid:gid pair, consider the following. By
+default, in non-secure mode, YARN will launch processes as the user `nobody`
+(see the table at the bottom of
+[Using CGroups with YARN](./NodeManagerCgroups.html) for how the run as user is
+determined in non-secure mode). On CentOS based systems, the `nobody` user's uid
+is `99` and the `nobody` group is `99`. As a result, YARN will call `docker run`
+with `--user 99:99`. If the `nobody` user does not have the uid `99` in the
+container, the launch may fail or have unexpected results.
+
+One exception to this rule is the use of Privileged Docker containers.
+Privileged containers will not set the uid:gid pair when launching the container
+and will honor the USER or GROUP entries in the Dockerfile. This allows running
+privileged containers as any user which has security implications. Please
+understand these implications before enabling Privileged Docker containers.
+
+There are many ways to address user and group management. Docker, by default,
+will authenticate users against `/etc/passwd` (and `/etc/shadow`) within the
+container. Using the default `/etc/passwd` supplied in the Docker image is
+unlikely to contain the appropriate user entries and will result in launch
+failures. It is highly recommended to centralize user and group management.
+Several approaches to user and group management are outlined below.
+
+### Static user management
+
+The most basic approach to managing user and groups is to modify the user and
+group within the Docker image. This approach is only viable in non-secure mode
+where all container processes will be launched as a single known user, for
+instance `nobody`. In this case, the only requirement is that the uid:gid pair
+of the nobody user and group must match between the host and container. On a
+CentOS based system, this means that the nobody user in the container needs the
+UID `99` and the nobody group in the container needs GID `99`.
+
+One approach to change the UID and GID is by leveraging `usermod` and
+`groupmod`. The following sets the correct UID and GID for the nobody
+user/group.
+```
+usermod -u 99 nobody
+groupmod -g 99 nobody
+```
+
+This approach is not recommended beyond testing given the inflexibility to add
+users.
+
+### Bind mounting
+
+When organizations already have automation in place to create local users on
+each system, it may be appropriate to bind mount /etc/passwd and /etc/group
+into the container as an alternative to modifying the container image directly.
+To enable the ability to bind mount /etc/passwd and /etc/group, update
+`docker.allowed.ro-mounts` in `container-executor.cfg` to include those paths.
+When submitting the application, `YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS` will
+need to include `/etc/passwd:/etc/passwd:ro` and `/etc/group:/etc/group:ro`.
+
+There are several challenges with this bind mount approach that need to be
+considered.
+
+1. Any users and groups defined in the image will be overwritten by the host's users and groups
+2. No users and groups can be added once the container is started, as /etc/passwd and /etc/group are immutible in the container. Do not mount these read-write as it can render the host inoperable.
+
+This approach is not recommended beyond testing given the inflexibility to
+modify running containers.
+
+### SSSD
+
+An alternative approach that allows for centrally managing users and groups is
+SSSD. System Security Services Daemon (SSSD) provides access to different
+identity and authentication providers, such as LDAP or Active Directory.
+
+The traditional schema for Linux authentication is as follows:
+```
+application -> libpam -> pam_authenticate -> pam_unix.so -> /etc/passwd
+```
+
+If we use SSSD for user lookup, it becomes:
+```
+application -> libpam -> pam_authenticate -> pam_sss.so -> SSSD -> pam_unix.so -> /etc/passwd
+```
+
+We can bind-mount the UNIX sockets SSSD communicates over into the container.
+This will allow the SSSD client side libraries to authenticate against the SSSD
+running on the host. As a result, user information does not need to exist in
+/etc/passwd of the docker image and will instead be serviced by SSSD.
+
+Step by step configuration for host and container:
+
+1. Host config
+
+   - Install packages
+     ```
+     # yum -y install sssd-common sssd-proxy
+     ```
+   - create a PAM service for the container.
+     ```
+     # cat /etc/pam.d/sss_proxy
+     auth required pam_unix.so
+     account required pam_unix.so
+     password required pam_unix.so
+     session required pam_unix.so
+     ```
+   - create SSSD config file, /etc/sssd/sssd.conf
+     Please note that the permissions must be 0600 and the file must be owned by root:root.
+     ```
+     # cat /etc/sssd/sssd/conf
+     [sssd]
+     services = nss,pam
+     config_file_version = 2
+     domains = proxy
+     [nss]
+     [pam]
+     [domain/proxy]
+     id_provider = proxy
+     proxy_lib_name = files
+     proxy_pam_target = sss_proxy
+     ```
+   - start sssd
+     ```
+     # systemctl start sssd
+     ```
+   - verify a user can be retrieved with sssd
+     ```
+     # getent passwd -s sss localuser
+     ```
+
+2. Container setup
+
+   It's important to bind-mount the /var/lib/sss/pipes directory from the host to the container since SSSD UNIX sockets are located there.
+   ```
+   -v /var/lib/sss/pipes:/var/lib/sss/pipes:rw
+   ```
+
+3. Container config
+
+   All the steps below should be executed on the container itself.
+
+   - Install only the sss client libraries
+     ```
+     # yum -y install sssd-client
+     ```
+
+   - make sure sss is configured for passwd and group databases in
+     ```
+     /etc/nsswitch.conf
+     ```
+
+   - configure the PAM service that the application uses to call into SSSD
+     ```
+     # cat /etc/pam.d/system-auth
+     #%PAM-1.0
+     # This file is auto-generated.
+     # User changes will be destroyed the next time authconfig is run.
+     auth        required      pam_env.so
+     auth        sufficient    pam_unix.so try_first_pass nullok
+     auth        sufficient    pam_sss.so forward_pass
+     auth        required      pam_deny.so
+
+     account     required      pam_unix.so
+     account     [default=bad success=ok user_unknown=ignore] pam_sss.so
+     account     required      pam_permit.so
+
+     password    requisite     pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type=
+     password    sufficient    pam_unix.so try_first_pass use_authtok nullok sha512 shadow
+     password    sufficient    pam_sss.so use_authtok
+     password    required      pam_deny.so
+
+     session     optional      pam_keyinit.so revoke
+     session     required      pam_limits.so
+     -session     optional      pam_systemd.so
+     session     [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
+     session     required      pam_unix.so
+     session     optional      pam_sss.so
+     ```
+
+   - Save the docker image and use the docker image as base image for your applications.
+
+   - test the docker image launched in YARN environment.
+     ```
+     $ id
+     uid=5000(localuser) gid=5000(localuser) groups=5000(localuser),1337(hadoop)
+     ```
+
 Privileged Container Security Consideration
 -------------------------------------------
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/25] hadoop git commit: HADOOP-15583. Stabilize S3A Assumed Role support. Contributed by Steve Loughran.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
index 3afd63f..8af0457 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
@@ -29,7 +29,7 @@ assumed roles for different buckets.
 *IAM Assumed Roles are unlikely to be supported by third-party systems
 supporting the S3 APIs.*
 
-## Using IAM Assumed Roles
+## <a name="using_assumed_roles"></a> Using IAM Assumed Roles
 
 ### Before You Begin
 
@@ -40,6 +40,8 @@ are, how to configure their policies, etc.
 * You need a pair of long-lived IAM User credentials, not the root account set.
 * Have the AWS CLI installed, and test that it works there.
 * Give the role access to S3, and, if using S3Guard, to DynamoDB.
+* For working with data encrypted with SSE-KMS, the role must
+have access to the appropriate KMS keys.
 
 Trying to learn how IAM Assumed Roles work by debugging stack traces from
 the S3A client is "suboptimal".
@@ -51,7 +53,7 @@ To use assumed roles, the client must be configured to use the
 in the configuration option `fs.s3a.aws.credentials.provider`.
 
 This AWS Credential provider will read in the `fs.s3a.assumed.role` options needed to connect to the
-Session Token Service [Assumed Role API](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html),
+Security Token Service [Assumed Role API](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html),
 first authenticating with the full credentials, then assuming the specific role
 specified. It will then refresh this login at the configured rate of
 `fs.s3a.assumed.role.session.duration`
@@ -69,7 +71,7 @@ which uses `fs.s3a.access.key` and `fs.s3a.secret.key`.
 Note: although you can list other AWS credential providers in  to the
 Assumed Role Credential Provider, it can only cause confusion.
 
-### <a name="using"></a> Using Assumed Roles
+### <a name="using"></a> Configuring Assumed Roles
 
 To use assumed roles, the S3A client credentials provider must be set to
 the `AssumedRoleCredentialProvider`, and `fs.s3a.assumed.role.arn` to
@@ -78,7 +80,6 @@ the previously created ARN.
 ```xml
 <property>
   <name>fs.s3a.aws.credentials.provider</name>
-  <value>org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider</value>
   <value>org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider</value>
 </property>
 
@@ -159,7 +160,18 @@ Here are the full set of configuration options.
   <name>fs.s3a.assumed.role.sts.endpoint</name>
   <value/>
   <description>
-    AWS Simple Token Service Endpoint. If unset, uses the default endpoint.
+    AWS Security Token Service Endpoint. If unset, uses the default endpoint.
+    Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+  </description>
+</property>
+
+<property>
+  <name>fs.s3a.assumed.role.sts.endpoint.region</name>
+  <value>us-west-1</value>
+  <description>
+    AWS Security Token Service Endpoint's region;
+    Needed if fs.s3a.assumed.role.sts.endpoint points to an endpoint
+    other than the default one and the v4 signature is used.
     Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   </description>
 </property>
@@ -194,39 +206,101 @@ These lists represent the minimum actions to which the client's principal
 must have in order to work with a bucket.
 
 
-### Read Access Permissions
+### <a name="read-permissions"></a> Read Access Permissions
 
 Permissions which must be granted when reading from a bucket:
 
 
-| Action | S3A operations |
-|--------|----------|
-| `s3:ListBucket` | `listStatus()`, `getFileStatus()` and elsewhere |
-| `s3:GetObject` | `getFileStatus()`, `open()` and elsewhere |
-| `s3:ListBucketMultipartUploads` |  Aborting/cleaning up S3A commit operations|
+```
+s3:Get*
+s3:ListBucket
+```
+
+When using S3Guard, the client needs the appropriate
+<a href="s3guard-permissions">DynamoDB access permissions</a>
+
+To use SSE-KMS encryption, the client needs the
+<a href="sse-kms-permissions">SSE-KMS Permissions</a> to access the
+KMS key(s).
+
+### <a name="write-permissions"></a> Write Access Permissions
+
+These permissions must all be granted for write access:
+
+```
+s3:Get*
+s3:Delete*
+s3:Put*
+s3:ListBucket
+s3:ListBucketMultipartUploads
+s3:AbortMultipartUpload
+```
+
+### <a name="sse-kms-permissions"></a> SSE-KMS Permissions
+
+When to read data encrypted using SSE-KMS, the client must have
+ `kms:Decrypt` permission for the specific key a file was encrypted with.
+
+```
+kms:Decrypt
+```
+
+To write data using SSE-KMS, the client must have all the following permissions.
+
+```
+kms:Decrypt
+kms:GenerateDataKey
+```
 
+This includes renaming: renamed files are encrypted with the encryption key
+of the current S3A client; it must decrypt the source file first.
 
-The `s3:ListBucketMultipartUploads` is only needed when committing work
-via the [S3A committers](committers.html).
-However, it must be granted to the root path in order to safely clean up jobs.
-It is simplest to permit this in all buckets, even if it is only actually
-needed when writing data.
+If the caller doesn't have these permissions, the operation will fail with an
+`AccessDeniedException`: the S3 Store does not provide the specifics of
+the cause of the failure.
 
+### <a name="s3guard-permissions"></a> S3Guard Permissions
 
-### Write Access Permissions
+To use S3Guard, all clients must have a subset of the
+[AWS DynamoDB Permissions](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/api-permissions-reference.html).
 
-These permissions must *also* be granted for write access:
+To work with buckets protected with S3Guard, the client must have
+all the following rights on the DynamoDB Table used to protect that bucket.
 
+```
+dynamodb:BatchGetItem
+dynamodb:BatchWriteItem
+dynamodb:DeleteItem
+dynamodb:DescribeTable
+dynamodb:GetItem
+dynamodb:PutItem
+dynamodb:Query
+dynamodb:UpdateItem
+```
 
-| Action | S3A operations |
-|--------|----------|
-| `s3:PutObject` | `mkdir()`, `create()`, `rename()`, `delete()` |
-| `s3:DeleteObject` | `mkdir()`, `create()`, `rename()`, `delete()` |
-| `s3:AbortMultipartUpload` | S3A committer `abortJob()` and `cleanup()` operations |
-| `s3:ListMultipartUploadParts` | S3A committer `abortJob()` and `cleanup()` operations |
+This is true, *even if the client only has read access to the data*.
 
+For the `hadoop s3guard` table management commands, _extra_ permissions are required:
 
-### Mixed Permissions in a single S3 Bucket
+```
+dynamodb:CreateTable
+dynamodb:DescribeLimits
+dynamodb:DeleteTable
+dynamodb:Scan
+dynamodb:TagResource
+dynamodb:UntagResource
+dynamodb:UpdateTable
+```
+
+Without these permissions, tables cannot be created, destroyed or have their IO capacity
+changed through the `s3guard set-capacity` call.
+The `dynamodb:Scan` permission is needed for `s3guard prune`
+
+The `dynamodb:CreateTable` permission is needed by a client it tries to
+create the DynamoDB table on startup, that is
+`fs.s3a.s3guard.ddb.table.create` is `true` and the table does not already exist.
+
+### <a name="mixed-permissions"></a> Mixed Permissions in a single S3 Bucket
 
 Mixing permissions down the "directory tree" is limited
 only to the extent of supporting writeable directories under
@@ -274,7 +348,7 @@ This example has the base bucket read only, and a directory underneath,
     "Action" : [
       "s3:ListBucket",
       "s3:ListBucketMultipartUploads",
-      "s3:GetObject"
+      "s3:Get*"
       ],
     "Resource" : "arn:aws:s3:::example-bucket/*"
   }, {
@@ -320,7 +394,7 @@ the command line before trying to use the S3A client.
 `hadoop fs -mkdirs -p s3a://bucket/path/p1/`
 
 
-### <a name="no_role"></a>IOException: "Unset property fs.s3a.assumed.role.arn"
+### <a name="no_role"></a> IOException: "Unset property fs.s3a.assumed.role.arn"
 
 The Assumed Role Credential Provider is enabled, but `fs.s3a.assumed.role.arn` is unset.
 
@@ -339,7 +413,7 @@ java.io.IOException: Unset property fs.s3a.assumed.role.arn
   at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:474)
 ```
 
-### <a name="not_authorized_for_assumed_role"></a>"Not authorized to perform sts:AssumeRole"
+### <a name="not_authorized_for_assumed_role"></a> "Not authorized to perform sts:AssumeRole"
 
 This can arise if the role ARN set in `fs.s3a.assumed.role.arn` is invalid
 or one to which the caller has no access.
@@ -399,7 +473,8 @@ Caused by: com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceExc
 The value of `fs.s3a.assumed.role.session.duration` is out of range.
 
 ```
-java.lang.IllegalArgumentException: Assume Role session duration should be in the range of 15min - 1Hr
+java.lang.IllegalArgumentException: Assume Role session duration should be in the range of 15min
+- 1Hr
   at com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider$Builder.withRoleSessionDurationSeconds(STSAssumeRoleSessionCredentialsProvider.java:437)
   at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:86)
 ```
@@ -603,7 +678,7 @@ Caused by: com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceExc
 
 ### <a name="invalid_token"></a> `AccessDeniedException/InvalidClientTokenId`: "The security token included in the request is invalid"
 
-The credentials used to authenticate with the AWS Simple Token Service are invalid.
+The credentials used to authenticate with the AWS Security Token Service are invalid.
 
 ```
 [ERROR] Failures:
@@ -682,26 +757,7 @@ org.apache.hadoop.fs.s3a.AWSBadRequestException: Instantiate org.apache.hadoop.f
   at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3354)
   at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:474)
   at org.apache.hadoop.fs.Path.getFileSystem(Path.java:361)
-  at org.apache.hadoop.fs.s3a.ITestAssumeRole.lambda$expectFileSystemFailure$0(ITestAssumeRole.java:70)
-  at org.apache.hadoop.fs.s3a.ITestAssumeRole.lambda$interceptC$1(ITestAssumeRole.java:84)
-  at org.apache.hadoop.test.LambdaTestUtils.intercept(LambdaTestUtils.java:491)
-  at org.apache.hadoop.test.LambdaTestUtils.intercept(LambdaTestUtils.java:377)
-  at org.apache.hadoop.test.LambdaTestUtils.intercept(LambdaTestUtils.java:446)
-  at org.apache.hadoop.fs.s3a.ITestAssumeRole.interceptC(ITestAssumeRole.java:82)
-  at org.apache.hadoop.fs.s3a.ITestAssumeRole.expectFileSystemFailure(ITestAssumeRole.java:68)
-  at org.apache.hadoop.fs.s3a.ITestAssumeRole.testAssumeRoleBadSession(ITestAssumeRole.java:216)
-  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
-  at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
-  at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
-  at java.lang.reflect.Method.invoke(Method.java:498)
-  at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
-  at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
-  at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
-  at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
-  at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
-  at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
-  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
-  at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
+
 Caused by: com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException:
     1 validation error detected: Value 'Session Names cannot Hava Spaces!' at 'roleSessionName'
     failed to satisfy constraint:
@@ -742,10 +798,11 @@ Caused by: com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceExc
 ### <a name="access_denied"></a> `java.nio.file.AccessDeniedException` within a FileSystem API call
 
 If an operation fails with an `AccessDeniedException`, then the role does not have
-the permission for the S3 Operation invoked during the call
+the permission for the S3 Operation invoked during the call.
 
 ```
-java.nio.file.AccessDeniedException: s3a://bucket/readonlyDir: rename(s3a://bucket/readonlyDir, s3a://bucket/renameDest)
+java.nio.file.AccessDeniedException: s3a://bucket/readonlyDir:
+  rename(s3a://bucket/readonlyDir, s3a://bucket/renameDest)
  on s3a://bucket/readonlyDir:
   com.amazonaws.services.s3.model.AmazonS3Exception: Access Denied
   (Service: Amazon S3; Status Code: 403; Error Code: AccessDenied; Request ID: 2805F2ABF5246BB1;
@@ -795,3 +852,33 @@ check the path for the operation.
 Make sure that all the read and write permissions are allowed for any bucket/path
 to which data is being written to, and read permissions for all
 buckets read from.
+
+If the bucket is using SSE-KMS to encrypt data:
+
+1. The caller must have the `kms:Decrypt` permission to read the data.
+1. The caller needs `kms:Decrypt` and `kms:GenerateDataKey`.
+
+Without permissions, the request fails *and there is no explicit message indicating
+that this is an encryption-key issue*.
+
+### <a name="dynamodb_exception"></a> `AccessDeniedException` + `AmazonDynamoDBException`
+
+```
+java.nio.file.AccessDeniedException: bucket1:
+  com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException:
+  User: arn:aws:sts::980678866538:assumed-role/s3guard-test-role/test is not authorized to perform:
+  dynamodb:DescribeTable on resource: arn:aws:dynamodb:us-west-1:980678866538:table/bucket1
+   (Service: AmazonDynamoDBv2; Status Code: 400;
+```
+
+The caller is trying to access an S3 bucket which uses S3Guard, but the caller
+lacks the relevant DynamoDB access permissions.
+
+The `dynamodb:DescribeTable` operation is the first one used in S3Guard to access,
+the DynamoDB table, so it is often the first to fail. It can be a sign
+that the role has no permissions at all to access the table named in the exception,
+or just that this specific permission has been omitted.
+
+If the role policy requested for the assumed role didn't ask for any DynamoDB
+permissions, this is where all attempts to work with a S3Guarded bucket will
+fail. Check the value of `fs.s3a.assumed.role.policy`

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 7d0f67b..2dee10a 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -33,7 +33,7 @@ See also:
 * [Working with IAM Assumed Roles](./assumed_roles.html)
 * [Testing](./testing.html)
 
-##<a name="overview"></a> Overview
+## <a name="overview"></a> Overview
 
 Apache Hadoop's `hadoop-aws` module provides support for AWS integration.
 applications to easily use this support.
@@ -88,7 +88,7 @@ maintain it.
    This connector is no longer available: users must migrate to the newer `s3a:` client.
 
 
-##<a name="getting_started"></a> Getting Started
+## <a name="getting_started"></a> Getting Started
 
 S3A depends upon two JARs, alongside `hadoop-common` and its dependencies.
 
@@ -1698,6 +1698,6 @@ as configured by the value `fs.s3a.multipart.size`.
 To disable checksum verification in `distcp`, use the `-skipcrccheck` option:
 
 ```bash
-hadoop distcp -update -skipcrccheck /user/alice/datasets s3a://alice-backup/datasets
+hadoop distcp -update -skipcrccheck -numListstatusThreads 40 /user/alice/datasets s3a://alice-backup/datasets
 ```
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
index aa6b5d8..3214c76 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
@@ -36,14 +36,6 @@ import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.hadoop.fs.s3a.S3ATestConstants.TEST_FS_S3A_NAME;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
 import java.io.File;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
@@ -60,6 +52,9 @@ import org.junit.rules.TemporaryFolder;
 import static org.apache.hadoop.fs.s3a.Constants.*;
 import static org.apache.hadoop.fs.s3a.S3AUtils.*;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.apache.hadoop.fs.s3a.S3ATestConstants.TEST_FS_S3A_NAME;
+import static org.junit.Assert.*;
 
 /**
  * S3A tests for configuration.
@@ -134,12 +129,26 @@ public class ITestS3AConfiguration {
     conf.setInt(Constants.PROXY_PORT, 1);
     String proxy =
         conf.get(Constants.PROXY_HOST) + ":" + conf.get(Constants.PROXY_PORT);
-    try {
-      fs = S3ATestUtils.createTestFileSystem(conf);
-      fail("Expected a connection error for proxy server at " + proxy);
-    } catch (AWSClientIOException e) {
-      // expected
-    }
+    expectFSCreateFailure(AWSClientIOException.class,
+        conf, "when using proxy " + proxy);
+  }
+
+  /**
+   * Expect a filesystem to not be created from a configuration
+   * @return the exception intercepted
+   * @throws Exception any other exception
+   */
+  private <E extends Throwable> E expectFSCreateFailure(
+      Class<E> clazz,
+      Configuration conf,
+      String text)
+      throws Exception {
+
+    return intercept(clazz,
+        () -> {
+          fs = S3ATestUtils.createTestFileSystem(conf);
+          return "expected failure creating FS " + text + " got " + fs;
+        });
   }
 
   @Test
@@ -148,15 +157,13 @@ public class ITestS3AConfiguration {
     conf.unset(Constants.PROXY_HOST);
     conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
     conf.setInt(Constants.PROXY_PORT, 1);
-    try {
-      fs = S3ATestUtils.createTestFileSystem(conf);
-      fail("Expected a proxy configuration error");
-    } catch (IllegalArgumentException e) {
-      String msg = e.toString();
-      if (!msg.contains(Constants.PROXY_HOST) &&
-          !msg.contains(Constants.PROXY_PORT)) {
-        throw e;
-      }
+    IllegalArgumentException e = expectFSCreateFailure(
+        IllegalArgumentException.class,
+        conf, "Expected a connection error for proxy server");
+    String msg = e.toString();
+    if (!msg.contains(Constants.PROXY_HOST) &&
+        !msg.contains(Constants.PROXY_PORT)) {
+      throw e;
     }
   }
 
@@ -167,19 +174,11 @@ public class ITestS3AConfiguration {
     conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
     conf.set(Constants.PROXY_HOST, "127.0.0.1");
     conf.set(Constants.SECURE_CONNECTIONS, "true");
-    try {
-      fs = S3ATestUtils.createTestFileSystem(conf);
-      fail("Expected a connection error for proxy server");
-    } catch (AWSClientIOException e) {
-      // expected
-    }
+    expectFSCreateFailure(AWSClientIOException.class,
+        conf, "Expected a connection error for proxy server");
     conf.set(Constants.SECURE_CONNECTIONS, "false");
-    try {
-      fs = S3ATestUtils.createTestFileSystem(conf);
-      fail("Expected a connection error for proxy server");
-    } catch (AWSClientIOException e) {
-      // expected
-    }
+    expectFSCreateFailure(AWSClientIOException.class,
+        conf, "Expected a connection error for proxy server");
   }
 
   @Test
@@ -189,31 +188,31 @@ public class ITestS3AConfiguration {
     conf.set(Constants.PROXY_HOST, "127.0.0.1");
     conf.setInt(Constants.PROXY_PORT, 1);
     conf.set(Constants.PROXY_USERNAME, "user");
-    try {
-      fs = S3ATestUtils.createTestFileSystem(conf);
-      fail("Expected a connection error for proxy server");
-    } catch (IllegalArgumentException e) {
-      String msg = e.toString();
-      if (!msg.contains(Constants.PROXY_USERNAME) &&
-          !msg.contains(Constants.PROXY_PASSWORD)) {
-        throw e;
-      }
+    IllegalArgumentException e = expectFSCreateFailure(
+        IllegalArgumentException.class,
+        conf, "Expected a connection error for proxy server");
+    assertIsProxyUsernameError(e);
+  }
+
+  private void assertIsProxyUsernameError(final IllegalArgumentException e) {
+    String msg = e.toString();
+    if (!msg.contains(Constants.PROXY_USERNAME) &&
+        !msg.contains(Constants.PROXY_PASSWORD)) {
+      throw e;
     }
+  }
+
+  @Test
+  public void testUsernameInconsistentWithPassword2() throws Exception {
     conf = new Configuration();
     conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
     conf.set(Constants.PROXY_HOST, "127.0.0.1");
     conf.setInt(Constants.PROXY_PORT, 1);
     conf.set(Constants.PROXY_PASSWORD, "password");
-    try {
-      fs = S3ATestUtils.createTestFileSystem(conf);
-      fail("Expected a connection error for proxy server");
-    } catch (IllegalArgumentException e) {
-      String msg = e.toString();
-      if (!msg.contains(Constants.PROXY_USERNAME) &&
-          !msg.contains(Constants.PROXY_PASSWORD)) {
-        throw e;
-      }
-    }
+    IllegalArgumentException e = expectFSCreateFailure(
+        IllegalArgumentException.class,
+        conf, "Expected a connection error for proxy server");
+    assertIsProxyUsernameError(e);
   }
 
   @Test
@@ -393,7 +392,7 @@ public class ITestS3AConfiguration {
       // Catch/pass standard path style access behaviour when live bucket
       // isn't in the same region as the s3 client default. See
       // http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
-      assertEquals(e.getStatusCode(), HttpStatus.SC_MOVED_PERMANENTLY);
+      assertEquals(HttpStatus.SC_MOVED_PERMANENTLY, e.getStatusCode());
     }
   }
 
@@ -428,8 +427,16 @@ public class ITestS3AConfiguration {
   public void testCloseIdempotent() throws Throwable {
     conf = new Configuration();
     fs = S3ATestUtils.createTestFileSystem(conf);
+    AWSCredentialProviderList credentials =
+        fs.shareCredentials("testCloseIdempotent");
+    credentials.close();
     fs.close();
+    assertTrue("Closing FS didn't close credentials " + credentials,
+        credentials.isClosed());
+    assertEquals("refcount not zero in " + credentials, 0, credentials.getRefCount());
     fs.close();
+    // and the numbers should not change
+    assertEquals("refcount not zero in " + credentials, 0, credentials.getRefCount());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java
index 44a2beb..afc4086 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java
@@ -19,15 +19,14 @@
 package org.apache.hadoop.fs.s3a;
 
 import java.io.IOException;
-import java.net.URI;
 
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
+import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
+import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
 import com.amazonaws.services.securitytoken.model.GetSessionTokenRequest;
 import com.amazonaws.services.securitytoken.model.GetSessionTokenResult;
 import com.amazonaws.services.securitytoken.model.Credentials;
 
-import org.apache.hadoop.fs.s3native.S3xLoginHelper;
+import org.apache.hadoop.fs.s3a.auth.STSClientFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.LambdaTestUtils;
 
@@ -55,6 +54,14 @@ public class ITestS3ATemporaryCredentials extends AbstractS3ATestBase {
 
   private static final long TEST_FILE_SIZE = 1024;
 
+  private AWSCredentialProviderList credentials;
+
+  @Override
+  public void teardown() throws Exception {
+    S3AUtils.closeAutocloseables(LOG, credentials);
+    super.teardown();
+  }
+
   /**
    * Test use of STS for requesting temporary credentials.
    *
@@ -63,7 +70,7 @@ public class ITestS3ATemporaryCredentials extends AbstractS3ATestBase {
    * S3A tests to request temporary credentials, then attempt to use those
    * credentials instead.
    *
-   * @throws IOException
+   * @throws IOException failure
    */
   @Test
   public void testSTS() throws IOException {
@@ -71,21 +78,20 @@ public class ITestS3ATemporaryCredentials extends AbstractS3ATestBase {
     if (!conf.getBoolean(TEST_STS_ENABLED, true)) {
       skip("STS functional tests disabled");
     }
-
-    S3xLoginHelper.Login login = S3AUtils.getAWSAccessKeys(
-        URI.create("s3a://foobar"), conf);
-    if (!login.hasLogin()) {
-      skip("testSTS disabled because AWS credentials not configured");
-    }
-    AWSCredentialsProvider parentCredentials = new BasicAWSCredentialsProvider(
-        login.getUser(), login.getPassword());
-
-    String stsEndpoint = conf.getTrimmed(TEST_STS_ENDPOINT, "");
-    AWSSecurityTokenServiceClient stsClient;
-    stsClient = new AWSSecurityTokenServiceClient(parentCredentials);
-    if (!stsEndpoint.isEmpty()) {
-      LOG.debug("STS Endpoint ={}", stsEndpoint);
-      stsClient.setEndpoint(stsEndpoint);
+    S3AFileSystem testFS = getFileSystem();
+    credentials = testFS.shareCredentials("testSTS");
+
+    String bucket = testFS.getBucket();
+    AWSSecurityTokenServiceClientBuilder builder = STSClientFactory.builder(
+        conf,
+        bucket,
+        credentials,
+        conf.getTrimmed(TEST_STS_ENDPOINT, ""), "");
+    AWSSecurityTokenService stsClient = builder.build();
+
+    if (!conf.getTrimmed(TEST_STS_ENDPOINT, "").isEmpty()) {
+      LOG.debug("STS Endpoint ={}", conf.getTrimmed(TEST_STS_ENDPOINT, ""));
+      stsClient.setEndpoint(conf.getTrimmed(TEST_STS_ENDPOINT, ""));
     }
     GetSessionTokenRequest sessionTokenRequest = new GetSessionTokenRequest();
     sessionTokenRequest.setDurationSeconds(900);
@@ -93,23 +99,28 @@ public class ITestS3ATemporaryCredentials extends AbstractS3ATestBase {
     sessionTokenResult = stsClient.getSessionToken(sessionTokenRequest);
     Credentials sessionCreds = sessionTokenResult.getCredentials();
 
-    String childAccessKey = sessionCreds.getAccessKeyId();
-    conf.set(ACCESS_KEY, childAccessKey);
-    String childSecretKey = sessionCreds.getSecretAccessKey();
-    conf.set(SECRET_KEY, childSecretKey);
-    String sessionToken = sessionCreds.getSessionToken();
-    conf.set(SESSION_TOKEN, sessionToken);
+    // clone configuration so changes here do not affect the base FS.
+    Configuration conf2 = new Configuration(conf);
+    S3AUtils.clearBucketOption(conf2, bucket, AWS_CREDENTIALS_PROVIDER);
+    S3AUtils.clearBucketOption(conf2, bucket, ACCESS_KEY);
+    S3AUtils.clearBucketOption(conf2, bucket, SECRET_KEY);
+    S3AUtils.clearBucketOption(conf2, bucket, SESSION_TOKEN);
+
+    conf2.set(ACCESS_KEY, sessionCreds.getAccessKeyId());
+    conf2.set(SECRET_KEY, sessionCreds.getSecretAccessKey());
+    conf2.set(SESSION_TOKEN, sessionCreds.getSessionToken());
 
-    conf.set(AWS_CREDENTIALS_PROVIDER, PROVIDER_CLASS);
+    conf2.set(AWS_CREDENTIALS_PROVIDER, PROVIDER_CLASS);
 
-    try(S3AFileSystem fs = S3ATestUtils.createTestFileSystem(conf)) {
+    // with valid credentials, we can set properties.
+    try(S3AFileSystem fs = S3ATestUtils.createTestFileSystem(conf2)) {
       createAndVerifyFile(fs, path("testSTS"), TEST_FILE_SIZE);
     }
 
     // now create an invalid set of credentials by changing the session
     // token
-    conf.set(SESSION_TOKEN, "invalid-" + sessionToken);
-    try (S3AFileSystem fs = S3ATestUtils.createTestFileSystem(conf)) {
+    conf2.set(SESSION_TOKEN, "invalid-" + sessionCreds.getSessionToken());
+    try (S3AFileSystem fs = S3ATestUtils.createTestFileSystem(conf2)) {
       createAndVerifyFile(fs, path("testSTSInvalidToken"), TEST_FILE_SIZE);
       fail("Expected an access exception, but file access to "
           + fs.getUri() + " was allowed: " + fs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
index 763819b..a1df1a5 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.s3a;
 
 import com.amazonaws.services.s3.model.ListObjectsV2Request;
 import com.amazonaws.services.s3.model.ListObjectsV2Result;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -28,6 +29,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.s3a.S3AContract;
+
 import org.junit.Assume;
 import org.junit.Test;
 
@@ -37,6 +39,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.writeTextFile;
@@ -71,7 +74,9 @@ public class ITestS3GuardListConsistency extends AbstractS3ATestBase {
     // Other configs would break test assumptions
     conf.set(FAIL_INJECT_INCONSISTENCY_KEY, DEFAULT_DELAY_KEY_SUBSTRING);
     conf.setFloat(FAIL_INJECT_INCONSISTENCY_PROBABILITY, 1.0f);
-    conf.setLong(FAIL_INJECT_INCONSISTENCY_MSEC, DEFAULT_DELAY_KEY_MSEC);
+    // this is a long value to guarantee that the inconsistency holds
+    // even over long-haul connections, and in the debugger too/
+    conf.setLong(FAIL_INJECT_INCONSISTENCY_MSEC, (long) (60 * 1000));
     return new S3AContract(conf);
   }
 
@@ -524,37 +529,60 @@ public class ITestS3GuardListConsistency extends AbstractS3ATestBase {
 
     ListObjectsV2Result postDeleteDelimited = listObjectsV2(fs, key, "/");
     ListObjectsV2Result postDeleteUndelimited = listObjectsV2(fs, key, null);
-
-    assertEquals("InconsistentAmazonS3Client added back objects incorrectly " +
+    assertListSizeEqual(
+        "InconsistentAmazonS3Client added back objects incorrectly " +
             "in a non-recursive listing",
-        preDeleteDelimited.getObjectSummaries().size(),
-        postDeleteDelimited.getObjectSummaries().size()
-    );
-    assertEquals("InconsistentAmazonS3Client added back prefixes incorrectly " +
+        preDeleteDelimited.getObjectSummaries(),
+        postDeleteDelimited.getObjectSummaries());
+
+    assertListSizeEqual("InconsistentAmazonS3Client added back prefixes incorrectly " +
             "in a non-recursive listing",
-        preDeleteDelimited.getCommonPrefixes().size(),
-        postDeleteDelimited.getCommonPrefixes().size()
+        preDeleteDelimited.getCommonPrefixes(),
+        postDeleteDelimited.getCommonPrefixes()
     );
-    assertEquals("InconsistentAmazonS3Client added back objects incorrectly " +
+    assertListSizeEqual("InconsistentAmazonS3Client added back objects incorrectly " +
             "in a recursive listing",
-        preDeleteUndelimited.getObjectSummaries().size(),
-        postDeleteUndelimited.getObjectSummaries().size()
+        preDeleteUndelimited.getObjectSummaries(),
+        postDeleteUndelimited.getObjectSummaries()
     );
-    assertEquals("InconsistentAmazonS3Client added back prefixes incorrectly " +
+
+    assertListSizeEqual("InconsistentAmazonS3Client added back prefixes incorrectly " +
             "in a recursive listing",
-        preDeleteUndelimited.getCommonPrefixes().size(),
-        postDeleteUndelimited.getCommonPrefixes().size()
+        preDeleteUndelimited.getCommonPrefixes(),
+        postDeleteUndelimited.getCommonPrefixes()
     );
   }
 
   /**
-   * retrying v2 list.
-   * @param fs
-   * @param key
-   * @param delimiter
-   * @return
+   * Assert that the two list sizes match; failure message includes the lists.
+   * @param message text for the assertion
+   * @param expected expected list
+   * @param actual actual list
+   * @param <T> type of list
+   */
+  private <T> void assertListSizeEqual(String message,
+      List<T> expected,
+      List<T> actual) {
+    String leftContents = expected.stream()
+        .map(n -> n.toString())
+        .collect(Collectors.joining("\n"));
+    String rightContents = actual.stream()
+        .map(n -> n.toString())
+        .collect(Collectors.joining("\n"));
+    String summary = "\nExpected:" + leftContents
+        + "\n-----------\nActual:" + rightContents;
+    assertEquals(message + summary, expected.size(), actual.size());
+  }
+
+  /**
+   * Retrying v2 list directly through the s3 client.
+   * @param fs filesystem
+   * @param key key to list under
+   * @param delimiter any delimiter
+   * @return the listing
    * @throws IOException on error
    */
+  @Retries.RetryRaw
   private ListObjectsV2Result listObjectsV2(S3AFileSystem fs,
       String key, String delimiter) throws IOException {
     ListObjectsV2Request k = fs.createListObjectsRequest(key, delimiter)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardWriteBack.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardWriteBack.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardWriteBack.java
index c8a54b8..d5cd4d4 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardWriteBack.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardWriteBack.java
@@ -65,11 +65,12 @@ public class ITestS3GuardWriteBack extends AbstractS3ATestBase {
     // delete the existing directory (in case of last test failure)
     noS3Guard.delete(directory, true);
     // Create a directory on S3 only
-    noS3Guard.mkdirs(new Path(directory, "OnS3"));
+    Path onS3 = new Path(directory, "OnS3");
+    noS3Guard.mkdirs(onS3);
     // Create a directory on both S3 and metadata store
-    Path p = new Path(directory, "OnS3AndMS");
-    ContractTestUtils.assertPathDoesNotExist(noWriteBack, "path", p);
-    noWriteBack.mkdirs(p);
+    Path onS3AndMS = new Path(directory, "OnS3AndMS");
+    ContractTestUtils.assertPathDoesNotExist(noWriteBack, "path", onS3AndMS);
+    noWriteBack.mkdirs(onS3AndMS);
 
     FileStatus[] fsResults;
     DirListingMetadata mdResults;
@@ -83,6 +84,8 @@ public class ITestS3GuardWriteBack extends AbstractS3ATestBase {
     // Metadata store without write-back should still only contain /OnS3AndMS,
     // because newly discovered /OnS3 is not written back to metadata store
     mdResults = noWriteBack.getMetadataStore().listChildren(directory);
+    assertNotNull("No results from noWriteBack listChildren " + directory,
+        mdResults);
     assertEquals("Metadata store without write back should still only know "
             + "about /OnS3AndMS, but it has: " + mdResults,
         1, mdResults.numEntries());
@@ -102,8 +105,7 @@ public class ITestS3GuardWriteBack extends AbstractS3ATestBase {
 
     // If we don't clean this up, the next test run will fail because it will
     // have recorded /OnS3 being deleted even after it's written to noS3Guard.
-    getFileSystem().getMetadataStore().forgetMetadata(
-        new Path(directory, "OnS3"));
+    getFileSystem().getMetadataStore().forgetMetadata(onS3);
   }
 
   /**
@@ -118,26 +120,33 @@ public class ITestS3GuardWriteBack extends AbstractS3ATestBase {
 
     // Create a FileSystem that is S3-backed only
     conf = createConfiguration();
-    S3ATestUtils.disableFilesystemCaching(conf);
     String host = fsURI.getHost();
-    if (disableS3Guard) {
-      conf.set(Constants.S3_METADATA_STORE_IMPL,
-          Constants.S3GUARD_METASTORE_NULL);
-      S3AUtils.setBucketOption(conf, host,
-          S3_METADATA_STORE_IMPL,
-          S3GUARD_METASTORE_NULL);
-    } else {
-      S3ATestUtils.maybeEnableS3Guard(conf);
-      conf.setBoolean(METADATASTORE_AUTHORITATIVE, authoritativeMeta);
-      S3AUtils.setBucketOption(conf, host,
-          METADATASTORE_AUTHORITATIVE,
-          Boolean.toString(authoritativeMeta));
-      S3AUtils.setBucketOption(conf, host,
-          S3_METADATA_STORE_IMPL,
-          conf.get(S3_METADATA_STORE_IMPL));
+    String metastore;
+
+    metastore = S3GUARD_METASTORE_NULL;
+    if (!disableS3Guard) {
+      // pick up the metadata store used by the main test
+      metastore = getFileSystem().getConf().get(S3_METADATA_STORE_IMPL);
+      assertNotEquals(S3GUARD_METASTORE_NULL, metastore);
     }
-    FileSystem fs = FileSystem.get(fsURI, conf);
-    return asS3AFS(fs);
+
+    conf.set(Constants.S3_METADATA_STORE_IMPL, metastore);
+    conf.setBoolean(METADATASTORE_AUTHORITATIVE, authoritativeMeta);
+    S3AUtils.setBucketOption(conf, host,
+        METADATASTORE_AUTHORITATIVE,
+        Boolean.toString(authoritativeMeta));
+    S3AUtils.setBucketOption(conf, host,
+        S3_METADATA_STORE_IMPL, metastore);
+
+    S3AFileSystem fs = asS3AFS(FileSystem.newInstance(fsURI, conf));
+    // do a check to verify that everything got through
+    assertEquals("Metadata store should have been disabled: " + fs,
+        disableS3Guard, !fs.hasMetadataStore());
+    assertEquals("metastore option did not propagate",
+        metastore, fs.getConf().get(S3_METADATA_STORE_IMPL));
+
+    return fs;
+
   }
 
   private static S3AFileSystem asS3AFS(FileSystem fs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java
index b746bfe5..dbf228d 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java
@@ -23,6 +23,7 @@ import static org.mockito.Mockito.*;
 import java.net.URI;
 import java.util.ArrayList;
 
+import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.services.s3.AmazonS3;
 import com.amazonaws.services.s3.model.MultipartUploadListing;
 import com.amazonaws.services.s3.model.Region;
@@ -34,8 +35,9 @@ import com.amazonaws.services.s3.model.Region;
 public class MockS3ClientFactory implements S3ClientFactory {
 
   @Override
-  public AmazonS3 createS3Client(URI name) {
-    String bucket = name.getHost();
+  public AmazonS3 createS3Client(URI name,
+      final String bucket,
+      final AWSCredentialsProvider credentialSet) {
     AmazonS3 s3 = mock(AmazonS3.class);
     when(s3.doesBucketExist(bucket)).thenReturn(true);
     // this listing is used in startup if purging is enabled, so

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
index d731ae7..b28925c 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.s3a;
 
 import java.io.IOException;
 import java.net.URI;
+import java.nio.file.AccessDeniedException;
 import java.util.Arrays;
 import java.util.List;
 
@@ -34,11 +35,15 @@ import org.junit.rules.ExpectedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider;
+import org.apache.hadoop.fs.s3a.auth.NoAuthWithAWSException;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.apache.hadoop.fs.s3a.Constants.*;
 import static org.apache.hadoop.fs.s3a.S3ATestConstants.*;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
 import static org.apache.hadoop.fs.s3a.S3AUtils.*;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.junit.Assert.*;
 
 /**
@@ -221,14 +226,13 @@ public class TestS3AAWSCredentialsProvider {
   }
 
   private void expectProviderInstantiationFailure(String option,
-      String expectedErrorText) throws IOException {
+      String expectedErrorText) throws Exception {
     Configuration conf = new Configuration();
     conf.set(AWS_CREDENTIALS_PROVIDER, option);
     Path testFile = new Path(
         conf.getTrimmed(KEY_CSVTEST_FILE, DEFAULT_CSVTEST_FILE));
-    expectException(IOException.class, expectedErrorText);
-    URI uri = testFile.toUri();
-    S3AUtils.createAWSCredentialProviderSet(uri, conf);
+    intercept(IOException.class, expectedErrorText,
+        () -> S3AUtils.createAWSCredentialProviderSet(testFile.toUri(), conf));
   }
 
   /**
@@ -288,4 +292,68 @@ public class TestS3AAWSCredentialsProvider {
         authenticationContains(conf, AssumedRoleCredentialProvider.NAME));
   }
 
+  @Test
+  public void testExceptionLogic() throws Throwable {
+    AWSCredentialProviderList providers
+        = new AWSCredentialProviderList();
+    // verify you can't get credentials from it
+    NoAuthWithAWSException noAuth = intercept(NoAuthWithAWSException.class,
+        AWSCredentialProviderList.NO_AWS_CREDENTIAL_PROVIDERS,
+        () -> providers.getCredentials());
+    // but that it closes safely
+    providers.close();
+
+    S3ARetryPolicy retryPolicy = new S3ARetryPolicy(new Configuration());
+    assertEquals("Expected no retry on auth failure",
+        RetryPolicy.RetryAction.FAIL.action,
+        retryPolicy.shouldRetry(noAuth, 0, 0, true).action);
+
+    try {
+      throw S3AUtils.translateException("login", "", noAuth);
+    } catch (AccessDeniedException expected) {
+      // this is what we want; other exceptions will be passed up
+      assertEquals("Expected no retry on AccessDeniedException",
+          RetryPolicy.RetryAction.FAIL.action,
+          retryPolicy.shouldRetry(expected, 0, 0, true).action);
+    }
+
+  }
+
+  @Test
+  public void testRefCounting() throws Throwable {
+    AWSCredentialProviderList providers
+        = new AWSCredentialProviderList();
+    assertEquals("Ref count for " + providers,
+        1, providers.getRefCount());
+    AWSCredentialProviderList replicate = providers.share();
+    assertEquals(providers, replicate);
+    assertEquals("Ref count after replication for " + providers,
+        2, providers.getRefCount());
+    assertFalse("Was closed " + providers, providers.isClosed());
+    providers.close();
+    assertFalse("Was closed " + providers, providers.isClosed());
+    assertEquals("Ref count after close() for " + providers,
+        1, providers.getRefCount());
+
+    // this should now close it
+    providers.close();
+    assertTrue("Was not closed " + providers, providers.isClosed());
+    assertEquals("Ref count after close() for " + providers,
+        0, providers.getRefCount());
+    assertEquals("Ref count after second close() for " + providers,
+        0, providers.getRefCount());
+    intercept(IllegalStateException.class, "closed",
+        () -> providers.share());
+    // final call harmless
+    providers.close();
+    assertEquals("Ref count after close() for " + providers,
+        0, providers.getRefCount());
+    providers.refresh();
+
+    intercept(NoAuthWithAWSException.class,
+        AWSCredentialProviderList.CREDENTIALS_REQUESTED_WHEN_CLOSED,
+        () -> providers.getCredentials());
+  }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
index c6985b0..7451ef1 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
@@ -61,6 +61,7 @@ import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.*;
 import static org.apache.hadoop.fs.s3a.auth.RoleModel.*;
 import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
 import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.forbidden;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.LambdaTestUtils.*;
 
 /**
@@ -85,6 +86,24 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
    */
   private S3AFileSystem roleFS;
 
+  /**
+   * Duration range exception text on SDKs which check client-side.
+   */
+  protected static final String E_DURATION_RANGE_1
+      = "Assume Role session duration should be in the range of 15min - 1Hr";
+
+  /**
+   * Duration range too high text on SDKs which check on the server.
+   */
+  protected static final String E_DURATION_RANGE_2
+      = "Member must have value less than or equal to 43200";
+
+  /**
+   * Duration range too low text on SDKs which check on the server.
+   */
+  protected static final String E_DURATION_RANGE_3
+      = "Member must have value greater than or equal to 900";
+
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -112,13 +131,14 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
    * @param clazz class of exception to expect
    * @param text text in exception
    * @param <E> type of exception as inferred from clazz
+   * @return the caught exception if it was of the expected type and contents
    * @throws Exception if the exception was the wrong class
    */
-  private <E extends Throwable> void expectFileSystemCreateFailure(
+  private <E extends Throwable> E expectFileSystemCreateFailure(
       Configuration conf,
       Class<E> clazz,
       String text) throws Exception {
-    interceptClosing(clazz,
+    return interceptClosing(clazz,
         text,
         () -> new Path(getFileSystem().getUri()).getFileSystem(conf));
   }
@@ -246,6 +266,60 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
         "Member must satisfy regular expression pattern");
   }
 
+  /**
+   * A duration >1h is forbidden client-side in AWS SDK 1.11.271;
+   * with the ability to extend durations deployed in March 2018,
+   * duration checks will need to go server-side, and, presumably,
+   * later SDKs will remove the client side checks.
+   * This code exists to see when this happens.
+   */
+  @Test
+  public void testAssumeRoleThreeHourSessionDuration() throws Exception {
+    describe("Try to authenticate with a long session duration");
+
+    Configuration conf = createAssumedRoleConfig();
+    // add a duration of three hours
+    conf.setInt(ASSUMED_ROLE_SESSION_DURATION, 3 * 60 * 60);
+    try {
+      new Path(getFileSystem().getUri()).getFileSystem(conf).close();
+      LOG.info("Successfully created token of a duration >3h");
+    } catch (IOException ioe) {
+      assertExceptionContains(E_DURATION_RANGE_1, ioe);
+    }
+  }
+
+  /**
+   * A duration >1h is forbidden client-side in AWS SDK 1.11.271;
+   * with the ability to extend durations deployed in March 2018.
+   * with the later SDKs, the checks go server-side and
+   * later SDKs will remove the client side checks.
+   * This test asks for a duration which will still be rejected, and
+   * looks for either of the error messages raised.
+   */
+  @Test
+  public void testAssumeRoleThirtySixHourSessionDuration() throws Exception {
+    describe("Try to authenticate with a long session duration");
+
+    Configuration conf = createAssumedRoleConfig();
+    conf.setInt(ASSUMED_ROLE_SESSION_DURATION, 36 * 60 * 60);
+    IOException ioe = expectFileSystemCreateFailure(conf,
+        IOException.class, null);
+    assertIsRangeException(ioe);
+  }
+
+  /**
+   * Look for either the client-side or STS-side range exception
+   * @param e exception
+   * @throws Exception the exception, if its text doesn't match
+   */
+  private void assertIsRangeException(final Exception e) throws Exception {
+    String message = e.toString();
+    if (!message.contains(E_DURATION_RANGE_1)
+        && !message.contains(E_DURATION_RANGE_2)
+        && !message.contains(E_DURATION_RANGE_3)) {
+      throw e;
+    }
+  }
 
   /**
    * Create the assumed role configuration.
@@ -280,11 +354,11 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
     describe("Expect the constructor to fail if the session is to short");
     Configuration conf = new Configuration();
     conf.set(ASSUMED_ROLE_SESSION_DURATION, "30s");
-    interceptClosing(IllegalArgumentException.class, "",
+    Exception ex = interceptClosing(Exception.class, "",
         () -> new AssumedRoleCredentialProvider(uri, conf));
+    assertIsRangeException(ex);
   }
 
-
   @Test
   public void testAssumeRoleCreateFS() throws IOException {
     describe("Create an FS client with the role and do some basic IO");
@@ -296,24 +370,32 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
         conf.get(ACCESS_KEY), roleARN);
 
     try (FileSystem fs = path.getFileSystem(conf)) {
-      fs.getFileStatus(new Path("/"));
+      fs.getFileStatus(ROOT);
       fs.mkdirs(path("testAssumeRoleFS"));
     }
   }
 
   @Test
   public void testAssumeRoleRestrictedPolicyFS() throws Exception {
-    describe("Restrict the policy for this session; verify that reads fail");
+    describe("Restrict the policy for this session; verify that reads fail.");
 
+    // there's some special handling of S3Guard here as operations
+    // which only go to DDB don't fail the way S3 would reject them.
     Configuration conf = createAssumedRoleConfig();
     bindRolePolicy(conf, RESTRICTED_POLICY);
     Path path = new Path(getFileSystem().getUri());
+    boolean guarded = getFileSystem().hasMetadataStore();
     try (FileSystem fs = path.getFileSystem(conf)) {
-      forbidden("getFileStatus",
-          () -> fs.getFileStatus(new Path("/")));
-      forbidden("getFileStatus",
-          () -> fs.listStatus(new Path("/")));
-      forbidden("getFileStatus",
+      if (!guarded) {
+        // when S3Guard is enabled, the restricted policy still
+        // permits S3Guard record lookup, so getFileStatus calls
+        // will work iff the record is in the database.
+        forbidden("getFileStatus",
+            () -> fs.getFileStatus(ROOT));
+      }
+      forbidden("",
+          () -> fs.listStatus(ROOT));
+      forbidden("",
           () -> fs.mkdirs(path("testAssumeRoleFS")));
     }
   }
@@ -333,7 +415,11 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
     Configuration conf = createAssumedRoleConfig();
 
     bindRolePolicy(conf,
-        policy(statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT_TORRENT)));
+        policy(
+            statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT_TORRENT),
+            ALLOW_S3_GET_BUCKET_LOCATION,
+            STATEMENT_S3GUARD_CLIENT,
+            STATEMENT_ALLOW_SSE_KMS_RW));
     Path path = path("testAssumeRoleStillIncludesRolePerms");
     roleFS = (S3AFileSystem) path.getFileSystem(conf);
     assertTouchForbidden(roleFS, path);
@@ -342,6 +428,8 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
   /**
    * After blocking all write verbs used by S3A, try to write data (fail)
    * and read data (succeed).
+   * For S3Guard: full DDB RW access is retained.
+   * SSE-KMS key access is set to decrypt only.
    */
   @Test
   public void testReadOnlyOperations() throws Throwable {
@@ -352,7 +440,9 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
     bindRolePolicy(conf,
         policy(
             statement(false, S3_ALL_BUCKETS, S3_PATH_WRITE_OPERATIONS),
-            STATEMENT_ALL_S3, STATEMENT_ALL_DDB));
+            STATEMENT_ALL_S3,
+            STATEMENT_S3GUARD_CLIENT,
+            STATEMENT_ALLOW_SSE_KMS_READ));
     Path path = methodPath();
     roleFS = (S3AFileSystem) path.getFileSystem(conf);
     // list the root path, expect happy
@@ -399,8 +489,9 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
     Configuration conf = createAssumedRoleConfig();
 
     bindRolePolicyStatements(conf,
-        STATEMENT_ALL_DDB,
+        STATEMENT_S3GUARD_CLIENT,
         statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
+        STATEMENT_ALLOW_SSE_KMS_RW,
         new Statement(Effects.Allow)
           .addActions(S3_ALL_OPERATIONS)
           .addResources(directory(restrictedDir)));
@@ -447,7 +538,7 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
   }
 
   /**
-   * Execute a sequence of rename operations.
+   * Execute a sequence of rename operations with access locked down.
    * @param conf FS configuration
    */
   public void executeRestrictedRename(final Configuration conf)
@@ -461,7 +552,8 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
     fs.delete(basePath, true);
 
     bindRolePolicyStatements(conf,
-        STATEMENT_ALL_DDB,
+        STATEMENT_S3GUARD_CLIENT,
+        STATEMENT_ALLOW_SSE_KMS_RW,
         statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
         new Statement(Effects.Allow)
           .addActions(S3_PATH_RW_OPERATIONS)
@@ -503,6 +595,25 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
   }
 
   /**
+   * Without simulation of STS failures, and with STS overload likely to
+   * be very rare, there'll be no implicit test coverage of
+   * {@link AssumedRoleCredentialProvider#operationRetried(String, Exception, int, boolean)}.
+   * This test simply invokes the callback for both the first and second retry event.
+   *
+   * If the handler ever adds more than logging, this test ensures that things
+   * don't break.
+   */
+  @Test
+  public void testAssumedRoleRetryHandler() throws Throwable {
+    try(AssumedRoleCredentialProvider provider
+            = new AssumedRoleCredentialProvider(getFileSystem().getUri(),
+        createAssumedRoleConfig())) {
+      provider.operationRetried("retry", new IOException("failure"), 0, true);
+      provider.operationRetried("retry", new IOException("failure"), 1, true);
+    }
+  }
+
+  /**
    * Execute a sequence of rename operations where the source
    * data is read only to the client calling rename().
    * This will cause the inner delete() operations to fail, whose outcomes
@@ -534,7 +645,7 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
     touch(fs, readOnlyFile);
 
     bindRolePolicyStatements(conf,
-        STATEMENT_ALL_DDB,
+        STATEMENT_S3GUARD_CLIENT,
         statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
           new Statement(Effects.Allow)
             .addActions(S3_PATH_RW_OPERATIONS)
@@ -614,7 +725,8 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
     fs.mkdirs(readOnlyDir);
 
     bindRolePolicyStatements(conf,
-        STATEMENT_ALL_DDB,
+        STATEMENT_S3GUARD_CLIENT,
+        STATEMENT_ALLOW_SSE_KMS_RW,
         statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
         new Statement(Effects.Allow)
             .addActions(S3_PATH_RW_OPERATIONS)
@@ -752,7 +864,8 @@ public class ITestAssumeRole extends AbstractS3ATestBase {
     fs.delete(destDir, true);
 
     bindRolePolicyStatements(conf,
-        STATEMENT_ALL_DDB,
+        STATEMENT_S3GUARD_CLIENT,
+        STATEMENT_ALLOW_SSE_KMS_RW,
         statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
         new Statement(Effects.Deny)
             .addActions(S3_PATH_WRITE_OPERATIONS)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
index bb66268..834826e 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
@@ -72,7 +72,8 @@ public class ITestAssumedRoleCommitOperations extends ITestCommitOperations {
     Configuration conf = newAssumedRoleConfig(getConfiguration(),
         getAssumedRoleARN());
     bindRolePolicyStatements(conf,
-        STATEMENT_ALL_DDB,
+        STATEMENT_S3GUARD_CLIENT,
+        STATEMENT_ALLOW_SSE_KMS_RW,
         statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
         new RoleModel.Statement(RoleModel.Effects.Allow)
             .addActions(S3_PATH_RW_OPERATIONS)
@@ -81,7 +82,6 @@ public class ITestAssumedRoleCommitOperations extends ITestCommitOperations {
     roleFS = (S3AFileSystem) restrictedDir.getFileSystem(conf);
   }
 
-
   @Override
   public void teardown() throws Exception {
     S3AUtils.closeAll(LOG, roleFS);
@@ -122,7 +122,6 @@ public class ITestAssumedRoleCommitOperations extends ITestCommitOperations {
     return new Path(restrictedDir, filepath);
   }
 
-
   private String getAssumedRoleARN() {
     return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java
index 9fa2600..854e7ec 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java
@@ -58,14 +58,23 @@ public final class RoleTestUtils {
 
 
   /** Deny GET requests to all buckets. */
-  public static final Statement DENY_GET_ALL =
+  public static final Statement DENY_S3_GET_OBJECT =
       statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT);
 
+  public static final Statement ALLOW_S3_GET_BUCKET_LOCATION
+      =  statement(true, S3_ALL_BUCKETS, S3_GET_BUCKET_LOCATION);
+
   /**
-   * This is AWS policy removes read access.
+   * This is AWS policy removes read access from S3, leaves S3Guard access up.
+   * This will allow clients to use S3Guard list/HEAD operations, even
+   * the ability to write records, but not actually access the underlying
+   * data.
+   * The client does need {@link RolePolicies#S3_GET_BUCKET_LOCATION} to
+   * get the bucket location.
    */
-  public static final Policy RESTRICTED_POLICY = policy(DENY_GET_ALL);
-
+  public static final Policy RESTRICTED_POLICY = policy(
+      DENY_S3_GET_OBJECT, STATEMENT_ALL_DDB, ALLOW_S3_GET_BUCKET_LOCATION
+      );
 
   /**
    * Error message to get from the AWS SDK if you can't assume the role.
@@ -145,7 +154,7 @@ public final class RoleTestUtils {
     Configuration conf = new Configuration(srcConf);
     conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
     conf.set(ASSUMED_ROLE_ARN, roleARN);
-    conf.set(ASSUMED_ROLE_SESSION_NAME, "valid");
+    conf.set(ASSUMED_ROLE_SESSION_NAME, "test");
     conf.set(ASSUMED_ROLE_SESSION_DURATION, "15m");
     disableFilesystemCaching(conf);
     return conf;
@@ -163,9 +172,8 @@ public final class RoleTestUtils {
       String contained,
       Callable<T> eval)
       throws Exception {
-    AccessDeniedException ex = intercept(AccessDeniedException.class, eval);
-    GenericTestUtils.assertExceptionContains(contained, ex);
-    return ex;
+    return intercept(AccessDeniedException.class,
+        contained, eval);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index f591e32..9185fc5 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -32,6 +32,7 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.fs.s3a.S3AUtils;
 import org.apache.hadoop.util.StopWatch;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,6 +52,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 
+import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE;
 import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_NAME_KEY;
 import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL;
 import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
@@ -144,8 +146,11 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
 
     // Also create a "raw" fs without any MetadataStore configured
     Configuration conf = new Configuration(getConfiguration());
-    conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL);
     URI fsUri = getFileSystem().getUri();
+    conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL);
+    S3AUtils.setBucketOption(conf,fsUri.getHost(),
+        METADATASTORE_AUTHORITATIVE,
+        S3GUARD_METASTORE_NULL);
     rawFs = (S3AFileSystem) FileSystem.newInstance(fsUri, conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardConcurrentOps.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardConcurrentOps.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardConcurrentOps.java
index c6838a0..22a1efd 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardConcurrentOps.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardConcurrentOps.java
@@ -40,8 +40,10 @@ import org.junit.rules.Timeout;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.AWSCredentialProviderList;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.Constants;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
 
 import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_REGION_KEY;
 
@@ -80,81 +82,102 @@ public class ITestS3GuardConcurrentOps extends AbstractS3ATestBase {
 
   @Test
   public void testConcurrentTableCreations() throws Exception {
-    final Configuration conf = getConfiguration();
+    S3AFileSystem fs = getFileSystem();
+    final Configuration conf = fs.getConf();
     Assume.assumeTrue("Test only applies when DynamoDB is used for S3Guard",
         conf.get(Constants.S3_METADATA_STORE_IMPL).equals(
             Constants.S3GUARD_METASTORE_DYNAMO));
 
-    DynamoDBMetadataStore ms = new DynamoDBMetadataStore();
-    ms.initialize(getFileSystem());
-    DynamoDB db = ms.getDynamoDB();
-
-    String tableName = "testConcurrentTableCreations" + new Random().nextInt();
-    conf.setBoolean(Constants.S3GUARD_DDB_TABLE_CREATE_KEY, true);
-    conf.set(Constants.S3GUARD_DDB_TABLE_NAME_KEY, tableName);
+    AWSCredentialProviderList sharedCreds =
+        fs.shareCredentials("testConcurrentTableCreations");
+    // close that shared copy.
+    sharedCreds.close();
+    // this is the original reference count.
+    int originalRefCount = sharedCreds.getRefCount();
 
-    String region = conf.getTrimmed(S3GUARD_DDB_REGION_KEY);
-    if (StringUtils.isEmpty(region)) {
-      // no region set, so pick it up from the test bucket
-      conf.set(S3GUARD_DDB_REGION_KEY, getFileSystem().getBucketLocation());
-    }
-    int concurrentOps = 16;
-    int iterations = 4;
+    //now init the store; this should increment the ref count.
+    DynamoDBMetadataStore ms = new DynamoDBMetadataStore();
+    ms.initialize(fs);
 
-    failIfTableExists(db, tableName);
+    // the ref count should have gone up
+    assertEquals("Credential Ref count unchanged after initializing metastore "
+        + sharedCreds,
+        originalRefCount + 1, sharedCreds.getRefCount());
+    try {
+      DynamoDB db = ms.getDynamoDB();
 
-    for (int i = 0; i < iterations; i++) {
-      ExecutorService executor = Executors.newFixedThreadPool(
-          concurrentOps, new ThreadFactory() {
-            private AtomicInteger count = new AtomicInteger(0);
+      String tableName = "testConcurrentTableCreations" + new Random().nextInt();
+      conf.setBoolean(Constants.S3GUARD_DDB_TABLE_CREATE_KEY, true);
+      conf.set(Constants.S3GUARD_DDB_TABLE_NAME_KEY, tableName);
 
-            public Thread newThread(Runnable r) {
-              return new Thread(r,
-                  "testConcurrentTableCreations" + count.getAndIncrement());
+      String region = conf.getTrimmed(S3GUARD_DDB_REGION_KEY);
+      if (StringUtils.isEmpty(region)) {
+        // no region set, so pick it up from the test bucket
+        conf.set(S3GUARD_DDB_REGION_KEY, fs.getBucketLocation());
+      }
+      int concurrentOps = 16;
+      int iterations = 4;
+
+      failIfTableExists(db, tableName);
+
+      for (int i = 0; i < iterations; i++) {
+        ExecutorService executor = Executors.newFixedThreadPool(
+            concurrentOps, new ThreadFactory() {
+              private AtomicInteger count = new AtomicInteger(0);
+
+              public Thread newThread(Runnable r) {
+                return new Thread(r,
+                    "testConcurrentTableCreations" + count.getAndIncrement());
+              }
+            });
+        ((ThreadPoolExecutor) executor).prestartAllCoreThreads();
+        Future<Exception>[] futures = new Future[concurrentOps];
+        for (int f = 0; f < concurrentOps; f++) {
+          final int index = f;
+          futures[f] = executor.submit(new Callable<Exception>() {
+            @Override
+            public Exception call() throws Exception {
+
+              ContractTestUtils.NanoTimer timer =
+                  new ContractTestUtils.NanoTimer();
+
+              Exception result = null;
+              try (DynamoDBMetadataStore store = new DynamoDBMetadataStore()) {
+                store.initialize(conf);
+              } catch (Exception e) {
+                LOG.error(e.getClass() + ": " + e.getMessage());
+                result = e;
+              }
+
+              timer.end("Parallel DynamoDB client creation %d", index);
+              LOG.info("Parallel DynamoDB client creation {} ran from {} to {}",
+                  index, timer.getStartTime(), timer.getEndTime());
+              return result;
             }
           });
-      ((ThreadPoolExecutor) executor).prestartAllCoreThreads();
-      Future<Exception>[] futures = new Future[concurrentOps];
-      for (int f = 0; f < concurrentOps; f++) {
-        final int index = f;
-        futures[f] = executor.submit(new Callable<Exception>() {
-          @Override
-          public Exception call() throws Exception {
-
-            ContractTestUtils.NanoTimer timer =
-                new ContractTestUtils.NanoTimer();
-
-            Exception result = null;
-            try (DynamoDBMetadataStore store = new DynamoDBMetadataStore()) {
-              store.initialize(conf);
-            } catch (Exception e) {
-              LOG.error(e.getClass() + ": " + e.getMessage());
-              result = e;
-            }
-
-            timer.end("Parallel DynamoDB client creation %d", index);
-            LOG.info("Parallel DynamoDB client creation {} ran from {} to {}",
-                index, timer.getStartTime(), timer.getEndTime());
-            return result;
+        }
+        List<Exception> exceptions = new ArrayList<>(concurrentOps);
+        for (int f = 0; f < concurrentOps; f++) {
+          Exception outcome = futures[f].get();
+          if (outcome != null) {
+            exceptions.add(outcome);
           }
-        });
-      }
-      List<Exception> exceptions = new ArrayList<>(concurrentOps);
-      for (int f = 0; f < concurrentOps; f++) {
-        Exception outcome = futures[f].get();
-        if (outcome != null) {
-          exceptions.add(outcome);
+        }
+        deleteTable(db, tableName);
+        int exceptionsThrown = exceptions.size();
+        if (exceptionsThrown > 0) {
+          // at least one exception was thrown. Fail the test & nest the first
+          // exception caught
+          throw new AssertionError(exceptionsThrown + "/" + concurrentOps +
+              " threads threw exceptions while initializing on iteration " + i,
+              exceptions.get(0));
         }
       }
-      deleteTable(db, tableName);
-      int exceptionsThrown = exceptions.size();
-      if (exceptionsThrown > 0) {
-        // at least one exception was thrown. Fail the test & nest the first
-        // exception caught
-        throw new AssertionError(exceptionsThrown + "/" + concurrentOps +
-            " threads threw exceptions while initializing on iteration " + i,
-            exceptions.get(0));
-      }
+    } finally {
+      ms.close();
     }
+    assertEquals("Credential Ref count unchanged after closing metastore: "
+            + sharedCreds,
+        originalRefCount, sharedCreds.getRefCount());
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/25] hadoop git commit: HADOOP-15583. Stabilize S3A Assumed Role support. Contributed by Steve Loughran.

Posted by su...@apache.org.
HADOOP-15583. Stabilize S3A Assumed Role support.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da9a39ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da9a39ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da9a39ee

Branch: refs/heads/HDFS-12943
Commit: da9a39eed138210de29b59b90c449b28da1c04f9
Parents: d81cd36
Author: Steve Loughran <st...@apache.org>
Authored: Wed Aug 8 22:57:10 2018 -0700
Committer: Steve Loughran <st...@apache.org>
Committed: Wed Aug 8 22:57:24 2018 -0700

----------------------------------------------------------------------
 .../src/main/resources/core-default.xml         |  18 +-
 .../fs/s3a/AWSCredentialProviderList.java       | 101 ++++++--
 .../org/apache/hadoop/fs/s3a/Constants.java     |  19 +-
 .../hadoop/fs/s3a/DefaultS3ClientFactory.java   | 190 ++++----------
 .../fs/s3a/InconsistentAmazonS3Client.java      |  10 +
 .../fs/s3a/InconsistentS3ClientFactory.java     |  11 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  35 ++-
 .../apache/hadoop/fs/s3a/S3ARetryPolicy.java    |   4 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 245 +++++++++++++++++--
 .../apache/hadoop/fs/s3a/S3ClientFactory.java   |   7 +-
 .../s3a/auth/AssumedRoleCredentialProvider.java |  78 +++++-
 .../fs/s3a/auth/NoAuthWithAWSException.java     |  37 +++
 .../apache/hadoop/fs/s3a/auth/RoleModel.java    |   8 +
 .../apache/hadoop/fs/s3a/auth/RolePolicies.java | 143 +++++++++--
 .../hadoop/fs/s3a/auth/STSClientFactory.java    |  78 ++++++
 .../fs/s3a/s3guard/DynamoDBClientFactory.java   |  18 +-
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   |  62 ++++-
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 191 +++++++++++----
 .../src/site/markdown/tools/hadoop-aws/index.md |   6 +-
 .../hadoop/fs/s3a/ITestS3AConfiguration.java    | 117 ++++-----
 .../fs/s3a/ITestS3ATemporaryCredentials.java    |  71 +++---
 .../fs/s3a/ITestS3GuardListConsistency.java     |  68 +++--
 .../hadoop/fs/s3a/ITestS3GuardWriteBack.java    |  57 +++--
 .../hadoop/fs/s3a/MockS3ClientFactory.java      |   6 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  76 +++++-
 .../hadoop/fs/s3a/auth/ITestAssumeRole.java     | 151 ++++++++++--
 .../auth/ITestAssumedRoleCommitOperations.java  |   5 +-
 .../hadoop/fs/s3a/auth/RoleTestUtils.java       |  24 +-
 .../s3guard/AbstractS3GuardToolTestBase.java    |   7 +-
 .../s3a/s3guard/ITestS3GuardConcurrentOps.java  | 147 ++++++-----
 30 files changed, 1461 insertions(+), 529 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 75acf48..29c2bc2 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1033,7 +1033,19 @@
   <name>fs.s3a.assumed.role.sts.endpoint</name>
   <value/>
   <description>
-    AWS Simple Token Service Endpoint. If unset, uses the default endpoint.
+    AWS Security Token Service Endpoint.
+    If unset, uses the default endpoint.
+    Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+  </description>
+</property>
+
+<property>
+  <name>fs.s3a.assumed.role.sts.endpoint.region</name>
+  <value>us-west-1</value>
+  <description>
+    AWS Security Token Service Endpoint's region;
+    Needed if fs.s3a.assumed.role.sts.endpoint points to an endpoint
+    other than the default one and the v4 signature is used.
     Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   </description>
 </property>
@@ -1058,7 +1070,9 @@
 <property>
   <name>fs.s3a.connection.ssl.enabled</name>
   <value>true</value>
-  <description>Enables or disables SSL connections to S3.</description>
+  <description>Enables or disables SSL connections to AWS services.
+    Also sets the default port to use for the s3a proxy settings,
+    when not explicitly set in fs.s3a.proxy.port.</description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
index 10201f0..f9052fa 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
@@ -18,25 +18,29 @@
 
 package org.apache.hadoop.fs.s3a;
 
+import java.io.Closeable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.auth.AWSCredentials;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.AnonymousAWSCredentials;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.s3a.auth.NoAuthWithAWSException;
 import org.apache.hadoop.io.IOUtils;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.stream.Collectors;
-
 /**
  * A list of providers.
  *
@@ -62,10 +66,18 @@ public class AWSCredentialProviderList implements AWSCredentialsProvider,
   public static final String NO_AWS_CREDENTIAL_PROVIDERS
       = "No AWS Credential Providers";
 
+  static final String
+      CREDENTIALS_REQUESTED_WHEN_CLOSED
+      = "Credentials requested after provider list was closed";
+
   private final List<AWSCredentialsProvider> providers = new ArrayList<>(1);
   private boolean reuseLastProvider = true;
   private AWSCredentialsProvider lastProvider;
 
+  private final AtomicInteger refCount = new AtomicInteger(1);
+
+  private final AtomicBoolean closed = new AtomicBoolean(false);
+
   /**
    * Empty instance. This is not ready to be used.
    */
@@ -94,6 +106,9 @@ public class AWSCredentialProviderList implements AWSCredentialsProvider,
    */
   @Override
   public void refresh() {
+    if (isClosed()) {
+      return;
+    }
     for (AWSCredentialsProvider provider : providers) {
       provider.refresh();
     }
@@ -106,6 +121,11 @@ public class AWSCredentialProviderList implements AWSCredentialsProvider,
    */
   @Override
   public AWSCredentials getCredentials() {
+    if (isClosed()) {
+      LOG.warn(CREDENTIALS_REQUESTED_WHEN_CLOSED);
+      throw new NoAuthWithAWSException(
+          CREDENTIALS_REQUESTED_WHEN_CLOSED);
+    }
     checkNotEmpty();
     if (reuseLastProvider && lastProvider != null) {
       return lastProvider.getCredentials();
@@ -136,8 +156,7 @@ public class AWSCredentialProviderList implements AWSCredentialsProvider,
     if (lastException != null) {
       message += ": " + lastException;
     }
-    throw new AmazonClientException(message, lastException);
-
+    throw new NoAuthWithAWSException(message, lastException);
   }
 
   /**
@@ -156,7 +175,7 @@ public class AWSCredentialProviderList implements AWSCredentialsProvider,
    */
   public void checkNotEmpty() {
     if (providers.isEmpty()) {
-      throw new AmazonClientException(NO_AWS_CREDENTIAL_PROVIDERS);
+      throw new NoAuthWithAWSException(NO_AWS_CREDENTIAL_PROVIDERS);
     }
   }
 
@@ -178,8 +197,38 @@ public class AWSCredentialProviderList implements AWSCredentialsProvider,
    */
   @Override
   public String toString() {
-    return "AWSCredentialProviderList: " +
-        StringUtils.join(providers, " ");
+    return "AWSCredentialProviderList[" +
+        "refcount= " + refCount.get() + ": [" +
+        StringUtils.join(providers, ", ") + ']';
+  }
+
+  /**
+   * Get a reference to this object with an updated reference count.
+   *
+   * @return a reference to this
+   */
+  public synchronized AWSCredentialProviderList share() {
+    Preconditions.checkState(!closed.get(), "Provider list is closed");
+    refCount.incrementAndGet();
+    return this;
+  }
+
+  /**
+   * Get the current reference count.
+   * @return the current ref count
+   */
+  @VisibleForTesting
+  public int getRefCount() {
+    return refCount.get();
+  }
+
+  /**
+   * Get the closed flag.
+   * @return true iff the list is closed.
+   */
+  @VisibleForTesting
+  public boolean isClosed() {
+    return closed.get();
   }
 
   /**
@@ -190,9 +239,29 @@ public class AWSCredentialProviderList implements AWSCredentialsProvider,
    */
   @Override
   public void close() {
-    for(AWSCredentialsProvider p: providers) {
+    synchronized (this) {
+      if (closed.get()) {
+        // already closed: no-op
+        return;
+      }
+      int remainder = refCount.decrementAndGet();
+      if (remainder != 0) {
+        // still actively used, or somehow things are
+        // now negative
+        LOG.debug("Not closing {}", this);
+        return;
+      }
+      // at this point, the closing is going to happen
+      LOG.debug("Closing {}", this);
+      closed.set(true);
+    }
+
+    // do this outside the synchronized block.
+    for (AWSCredentialsProvider p : providers) {
       if (p instanceof Closeable) {
-        IOUtils.closeStream((Closeable)p);
+        IOUtils.closeStream((Closeable) p);
+      } else if (p instanceof AutoCloseable) {
+        S3AUtils.closeAutocloseables(LOG, (AutoCloseable)p);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index c521936..a8da6ec 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -84,10 +84,27 @@ public final class Constants {
   public static final String ASSUMED_ROLE_SESSION_DURATION =
       "fs.s3a.assumed.role.session.duration";
 
-  /** Simple Token Service Endpoint. If unset, uses the default endpoint. */
+  /** Security Token Service Endpoint. If unset, uses the default endpoint. */
   public static final String ASSUMED_ROLE_STS_ENDPOINT =
       "fs.s3a.assumed.role.sts.endpoint";
 
+  /**
+   * Region for the STS endpoint; only relevant if the endpoint
+   * is set.
+   */
+  public static final String ASSUMED_ROLE_STS_ENDPOINT_REGION =
+      "fs.s3a.assumed.role.sts.endpoint.region";
+
+  /**
+   * Default value for the STS endpoint region; needed for
+   * v4 signing.
+   */
+  public static final String ASSUMED_ROLE_STS_ENDPOINT_REGION_DEFAULT =
+      "us-west-1";
+
+  /**
+   * Default duration of an assumed role.
+   */
   public static final String ASSUMED_ROLE_SESSION_DURATION_DEFAULT = "30m";
 
   /** list of providers to authenticate for the assumed role. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java
index f33b25e..ade317f 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java
@@ -18,59 +18,45 @@
 
 package org.apache.hadoop.fs.s3a;
 
+import java.io.IOException;
+import java.net.URI;
+
 import com.amazonaws.ClientConfiguration;
-import com.amazonaws.Protocol;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.services.s3.AmazonS3;
 import com.amazonaws.services.s3.AmazonS3Client;
 import com.amazonaws.services.s3.S3ClientOptions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.util.VersionInfo;
 import org.slf4j.Logger;
 
-import java.io.IOException;
-import java.net.URI;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProviderSet;
-import static org.apache.hadoop.fs.s3a.S3AUtils.intOption;
+import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT;
+import static org.apache.hadoop.fs.s3a.Constants.PATH_STYLE_ACCESS;
 
 /**
- * The default factory implementation, which calls the AWS SDK to configure
- * and create an {@link AmazonS3Client} that communicates with the S3 service.
+ * The default {@link S3ClientFactory} implementation.
+ * This which calls the AWS SDK to configure and create an
+ * {@link AmazonS3Client} that communicates with the S3 service.
  */
-public class DefaultS3ClientFactory extends Configured implements
-    S3ClientFactory {
+public class DefaultS3ClientFactory extends Configured
+    implements S3ClientFactory {
 
   protected static final Logger LOG = S3AFileSystem.LOG;
 
   @Override
-  public AmazonS3 createS3Client(URI name) throws IOException {
+  public AmazonS3 createS3Client(URI name,
+      final String bucket,
+      final AWSCredentialsProvider credentials) throws IOException {
     Configuration conf = getConf();
-    AWSCredentialsProvider credentials =
-        createAWSCredentialProviderSet(name, conf);
-    final ClientConfiguration awsConf = createAwsConf(getConf());
-    AmazonS3 s3 = newAmazonS3Client(credentials, awsConf);
-    return createAmazonS3Client(s3, conf, credentials, awsConf);
+    final ClientConfiguration awsConf = S3AUtils.createAwsConf(getConf(), bucket);
+    return configureAmazonS3Client(
+        newAmazonS3Client(credentials, awsConf), conf);
   }
 
   /**
-   * Create a new {@link ClientConfiguration}.
-   * @param conf The Hadoop configuration
-   * @return new AWS client configuration
-   */
-  public static ClientConfiguration createAwsConf(Configuration conf) {
-    final ClientConfiguration awsConf = new ClientConfiguration();
-    initConnectionSettings(conf, awsConf);
-    initProxySupport(conf, awsConf);
-    initUserAgent(conf, awsConf);
-    return awsConf;
-  }
-
-  /**
-   * Wrapper around constructor for {@link AmazonS3} client.  Override this to
-   * provide an extended version of the client
+   * Wrapper around constructor for {@link AmazonS3} client.
+   * Override this to provide an extended version of the client
    * @param credentials credentials to use
    * @param awsConf  AWS configuration
    * @return  new AmazonS3 client
@@ -81,120 +67,17 @@ public class DefaultS3ClientFactory extends Configured implements
   }
 
   /**
-   * Initializes all AWS SDK settings related to connection management.
-   *
-   * @param conf Hadoop configuration
-   * @param awsConf AWS SDK configuration
-   */
-  private static void initConnectionSettings(Configuration conf,
-      ClientConfiguration awsConf) {
-    awsConf.setMaxConnections(intOption(conf, MAXIMUM_CONNECTIONS,
-        DEFAULT_MAXIMUM_CONNECTIONS, 1));
-    boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS,
-        DEFAULT_SECURE_CONNECTIONS);
-    awsConf.setProtocol(secureConnections ?  Protocol.HTTPS : Protocol.HTTP);
-    awsConf.setMaxErrorRetry(intOption(conf, MAX_ERROR_RETRIES,
-        DEFAULT_MAX_ERROR_RETRIES, 0));
-    awsConf.setConnectionTimeout(intOption(conf, ESTABLISH_TIMEOUT,
-        DEFAULT_ESTABLISH_TIMEOUT, 0));
-    awsConf.setSocketTimeout(intOption(conf, SOCKET_TIMEOUT,
-        DEFAULT_SOCKET_TIMEOUT, 0));
-    int sockSendBuffer = intOption(conf, SOCKET_SEND_BUFFER,
-        DEFAULT_SOCKET_SEND_BUFFER, 2048);
-    int sockRecvBuffer = intOption(conf, SOCKET_RECV_BUFFER,
-        DEFAULT_SOCKET_RECV_BUFFER, 2048);
-    awsConf.setSocketBufferSizeHints(sockSendBuffer, sockRecvBuffer);
-    String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
-    if (!signerOverride.isEmpty()) {
-      LOG.debug("Signer override = {}", signerOverride);
-      awsConf.setSignerOverride(signerOverride);
-    }
-  }
-
-  /**
-   * Initializes AWS SDK proxy support if configured.
-   *
-   * @param conf Hadoop configuration
-   * @param awsConf AWS SDK configuration
-   * @throws IllegalArgumentException if misconfigured
-   */
-  private static void initProxySupport(Configuration conf,
-      ClientConfiguration awsConf) throws IllegalArgumentException {
-    String proxyHost = conf.getTrimmed(PROXY_HOST, "");
-    int proxyPort = conf.getInt(PROXY_PORT, -1);
-    if (!proxyHost.isEmpty()) {
-      awsConf.setProxyHost(proxyHost);
-      if (proxyPort >= 0) {
-        awsConf.setProxyPort(proxyPort);
-      } else {
-        if (conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS)) {
-          LOG.warn("Proxy host set without port. Using HTTPS default 443");
-          awsConf.setProxyPort(443);
-        } else {
-          LOG.warn("Proxy host set without port. Using HTTP default 80");
-          awsConf.setProxyPort(80);
-        }
-      }
-      String proxyUsername = conf.getTrimmed(PROXY_USERNAME);
-      String proxyPassword = conf.getTrimmed(PROXY_PASSWORD);
-      if ((proxyUsername == null) != (proxyPassword == null)) {
-        String msg = "Proxy error: " + PROXY_USERNAME + " or " +
-            PROXY_PASSWORD + " set without the other.";
-        LOG.error(msg);
-        throw new IllegalArgumentException(msg);
-      }
-      awsConf.setProxyUsername(proxyUsername);
-      awsConf.setProxyPassword(proxyPassword);
-      awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
-      awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Using proxy server {}:{} as user {} with password {} on " +
-                "domain {} as workstation {}", awsConf.getProxyHost(),
-            awsConf.getProxyPort(),
-            String.valueOf(awsConf.getProxyUsername()),
-            awsConf.getProxyPassword(), awsConf.getProxyDomain(),
-            awsConf.getProxyWorkstation());
-      }
-    } else if (proxyPort >= 0) {
-      String msg =
-          "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
-      LOG.error(msg);
-      throw new IllegalArgumentException(msg);
-    }
-  }
-
-  /**
-   * Initializes the User-Agent header to send in HTTP requests to the S3
-   * back-end.  We always include the Hadoop version number.  The user also
-   * may set an optional custom prefix to put in front of the Hadoop version
-   * number.  The AWS SDK interally appends its own information, which seems
-   * to include the AWS SDK version, OS and JVM version.
+   * Configure S3 client from the Hadoop configuration.
    *
-   * @param conf Hadoop configuration
-   * @param awsConf AWS SDK configuration
-   */
-  private static void initUserAgent(Configuration conf,
-      ClientConfiguration awsConf) {
-    String userAgent = "Hadoop " + VersionInfo.getVersion();
-    String userAgentPrefix = conf.getTrimmed(USER_AGENT_PREFIX, "");
-    if (!userAgentPrefix.isEmpty()) {
-      userAgent = userAgentPrefix + ", " + userAgent;
-    }
-    LOG.debug("Using User-Agent: {}", userAgent);
-    awsConf.setUserAgentPrefix(userAgent);
-  }
-
-  /**
-   * Creates an {@link AmazonS3Client} from the established configuration.
+   * This includes: endpoint, Path Access and possibly other
+   * options.
    *
    * @param conf Hadoop configuration
-   * @param credentials AWS credentials
-   * @param awsConf AWS SDK configuration
    * @return S3 client
    * @throws IllegalArgumentException if misconfigured
    */
-  private static AmazonS3 createAmazonS3Client(AmazonS3 s3, Configuration conf,
-      AWSCredentialsProvider credentials, ClientConfiguration awsConf)
+  private static AmazonS3 configureAmazonS3Client(AmazonS3 s3,
+      Configuration conf)
       throws IllegalArgumentException {
     String endPoint = conf.getTrimmed(ENDPOINT, "");
     if (!endPoint.isEmpty()) {
@@ -206,21 +89,29 @@ public class DefaultS3ClientFactory extends Configured implements
         throw new IllegalArgumentException(msg, e);
       }
     }
-    enablePathStyleAccessIfRequired(s3, conf);
-    return s3;
+    return applyS3ClientOptions(s3, conf);
   }
 
   /**
-   * Enables path-style access to S3 buckets if configured.  By default, the
+   * Perform any tuning of the {@code S3ClientOptions} settings based on
+   * the Hadoop configuration.
+   * This is different from the general AWS configuration creation as
+   * it is unique to S3 connections.
+   *
+   * The {@link Constants#PATH_STYLE_ACCESS} option enables path-style access
+   * to S3 buckets if configured.  By default, the
    * behavior is to use virtual hosted-style access with URIs of the form
-   * http://bucketname.s3.amazonaws.com.  Enabling path-style access and a
+   * {@code http://bucketname.s3.amazonaws.com}
+   * Enabling path-style access and a
    * region-specific endpoint switches the behavior to use URIs of the form
-   * http://s3-eu-west-1.amazonaws.com/bucketname.
-   *
+   * {@code http://s3-eu-west-1.amazonaws.com/bucketname}.
+   * It is common to use this when connecting to private S3 servers, as it
+   * avoids the need to play with DNS entries.
    * @param s3 S3 client
    * @param conf Hadoop configuration
+   * @return the S3 client
    */
-  private static void enablePathStyleAccessIfRequired(AmazonS3 s3,
+  private static AmazonS3 applyS3ClientOptions(AmazonS3 s3,
       Configuration conf) {
     final boolean pathStyleAccess = conf.getBoolean(PATH_STYLE_ACCESS, false);
     if (pathStyleAccess) {
@@ -229,5 +120,6 @@ public class DefaultS3ClientFactory extends Configured implements
           .setPathStyleAccess(true)
           .build());
     }
+    return s3;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java
index 99ed87d..2cd1aae 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java
@@ -114,6 +114,16 @@ public class InconsistentAmazonS3Client extends AmazonS3Client {
   /** Map of key to delay -> time it was created. */
   private Map<String, Long> delayedPutKeys = new HashMap<>();
 
+  /**
+   * Instantiate.
+   * This subclasses a deprecated constructor of the parent
+   * {@code AmazonS3Client} class; we can't use the builder API because,
+   * that only creates the consistent client.
+   * @param credentials credentials to auth.
+   * @param clientConfiguration connection settings
+   * @param conf hadoop configuration.
+   */
+  @SuppressWarnings("deprecation")
   public InconsistentAmazonS3Client(AWSCredentialsProvider credentials,
       ClientConfiguration clientConfiguration, Configuration conf) {
     super(credentials, clientConfiguration);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java
index 17d268b..932c472 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java
@@ -21,16 +21,27 @@ package org.apache.hadoop.fs.s3a;
 import com.amazonaws.ClientConfiguration;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.services.s3.AmazonS3;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * S3 Client factory used for testing with eventual consistency fault injection.
+ * This client is for testing <i>only</i>; it is in the production
+ * {@code hadoop-aws} module to enable integration tests to use this
+ * just by editing the Hadoop configuration used to bring up the client.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class InconsistentS3ClientFactory extends DefaultS3ClientFactory {
 
+  /**
+   * Create the inconsistent client.
+   * Logs a warning that this is being done.
+   * @param credentials credentials to use
+   * @param awsConf  AWS configuration
+   * @return an inconsistent client.
+   */
   @Override
   protected AmazonS3 newAmazonS3Client(AWSCredentialsProvider credentials,
       ClientConfiguration awsConf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 737d7da..72a5fde 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -77,8 +77,9 @@ import com.amazonaws.event.ProgressListener;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ListeningExecutorService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -124,9 +125,6 @@ import static org.apache.hadoop.fs.s3a.Statistic.*;
 import static org.apache.commons.lang3.StringUtils.isNotBlank;
 import static org.apache.commons.lang3.StringUtils.isNotEmpty;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 /**
  * The core S3A Filesystem implementation.
  *
@@ -205,6 +203,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   private boolean useListV1;
   private MagicCommitIntegration committerIntegration;
 
+  private AWSCredentialProviderList credentials;
+
   /** Add any deprecated keys. */
   @SuppressWarnings("deprecation")
   private static void addDeprecatedKeys() {
@@ -252,8 +252,10 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
       Class<? extends S3ClientFactory> s3ClientFactoryClass = conf.getClass(
           S3_CLIENT_FACTORY_IMPL, DEFAULT_S3_CLIENT_FACTORY_IMPL,
           S3ClientFactory.class);
+
+      credentials = createAWSCredentialProviderSet(name, conf);
       s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf)
-          .createS3Client(name);
+          .createS3Client(name, bucket, credentials);
       invoker = new Invoker(new S3ARetryPolicy(getConf()), onRetry);
       s3guardInvoker = new Invoker(new S3GuardExistsRetryPolicy(getConf()),
           onRetry);
@@ -2470,12 +2472,11 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
         transfers.shutdownNow(true);
         transfers = null;
       }
-      if (metadataStore != null) {
-        metadataStore.close();
-        metadataStore = null;
-      }
-      IOUtils.closeQuietly(instrumentation);
+      S3AUtils.closeAll(LOG, metadataStore, instrumentation);
+      metadataStore = null;
       instrumentation = null;
+      closeAutocloseables(LOG, credentials);
+      credentials = null;
     }
   }
 
@@ -2885,6 +2886,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
     }
     sb.append(", boundedExecutor=").append(boundedThreadPool);
     sb.append(", unboundedExecutor=").append(unboundedThreadPool);
+    sb.append(", credentials=").append(credentials);
     sb.append(", statistics {")
         .append(statistics)
         .append("}");
@@ -3319,4 +3321,17 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
       return false;
     }
   }
+
+  /**
+   * Get a shared copy of the AWS credentials, with its reference
+   * counter updated.
+   * Caller is required to call {@code close()} on this after
+   * they have finished using it.
+   * @param purpose what is this for? This is initially for logging
+   * @return a reference to shared credentials.
+   */
+  public AWSCredentialProviderList shareCredentials(final String purpose) {
+    LOG.debug("Sharing credentials for: {}", purpose);
+    return credentials.share();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
index 2b361fd..e6e7895 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
@@ -37,6 +37,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.InvalidRequestException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.auth.NoAuthWithAWSException;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.net.ConnectTimeoutException;
@@ -154,8 +155,9 @@ public class S3ARetryPolicy implements RetryPolicy {
     policyMap.put(InterruptedException.class, fail);
     // note this does not pick up subclasses (like socket timeout)
     policyMap.put(InterruptedIOException.class, fail);
-    // interesting question: should this be retried ever?
+    // Access denial and auth exceptions are not retried
     policyMap.put(AccessDeniedException.class, fail);
+    policyMap.put(NoAuthWithAWSException.class, fail);
     policyMap.put(FileNotFoundException.class, fail);
     policyMap.put(InvalidRequestException.class, fail);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index a5f7d75..9908fd1 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.fs.s3a;
 import com.amazonaws.AbortedException;
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.Protocol;
 import com.amazonaws.SdkBaseException;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
@@ -44,15 +46,18 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.s3a.auth.NoAuthWithAWSException;
 import org.apache.hadoop.fs.s3native.S3xLoginHelper;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.security.ProviderUtils;
+import org.apache.hadoop.util.VersionInfo;
 
 import com.google.common.collect.Lists;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+import java.io.Closeable;
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -174,11 +179,17 @@ public final class S3AUtils {
         // call considered an sign of connectivity failure
         return (EOFException)new EOFException(message).initCause(exception);
       }
+      if (exception instanceof NoAuthWithAWSException) {
+        // the exception raised by AWSCredentialProvider list if the
+        // credentials were not accepted.
+        return (AccessDeniedException)new AccessDeniedException(path, null,
+            exception.toString()).initCause(exception);
+      }
       return new AWSClientIOException(message, exception);
     } else {
       if (exception instanceof AmazonDynamoDBException) {
         // special handling for dynamo DB exceptions
-        return translateDynamoDBException(message,
+        return translateDynamoDBException(path, message,
             (AmazonDynamoDBException)exception);
       }
       IOException ioe;
@@ -373,20 +384,45 @@ public final class S3AUtils {
 
   /**
    * Translate a DynamoDB exception into an IOException.
+   *
+   * @param path path in the DDB
    * @param message preformatted message for the exception
-   * @param ex exception
+   * @param ddbException exception
    * @return an exception to throw.
    */
-  public static IOException translateDynamoDBException(String message,
-      AmazonDynamoDBException ex) {
-    if (isThrottleException(ex)) {
-      return new AWSServiceThrottledException(message, ex);
+  public static IOException translateDynamoDBException(final String path,
+      final String message,
+      final AmazonDynamoDBException ddbException) {
+    if (isThrottleException(ddbException)) {
+      return new AWSServiceThrottledException(message, ddbException);
     }
-    if (ex instanceof ResourceNotFoundException) {
+    if (ddbException instanceof ResourceNotFoundException) {
       return (FileNotFoundException) new FileNotFoundException(message)
-          .initCause(ex);
+          .initCause(ddbException);
+    }
+    final int statusCode = ddbException.getStatusCode();
+    final String errorCode = ddbException.getErrorCode();
+    IOException result = null;
+    // 400 gets used a lot by DDB
+    if (statusCode == 400) {
+      switch (errorCode) {
+      case "AccessDeniedException":
+        result = (IOException) new AccessDeniedException(
+            path,
+            null,
+            ddbException.toString())
+            .initCause(ddbException);
+        break;
+
+      default:
+        result = new AWSBadRequestException(message, ddbException);
+      }
+
     }
-    return new AWSServiceIOException(message, ex);
+    if (result ==  null) {
+      result = new AWSServiceIOException(message, ddbException);
+    }
+    return result;
   }
 
   /**
@@ -738,6 +774,29 @@ public final class S3AUtils {
       String baseKey,
       String overrideVal)
       throws IOException {
+    return lookupPassword(bucket, conf, baseKey, overrideVal, "");
+  }
+
+  /**
+   * Get a password from a configuration, including JCEKS files, handling both
+   * the absolute key and bucket override.
+   * @param bucket bucket or "" if none known
+   * @param conf configuration
+   * @param baseKey base key to look up, e.g "fs.s3a.secret.key"
+   * @param overrideVal override value: if non empty this is used instead of
+   * querying the configuration.
+   * @param defVal value to return if there is no password
+   * @return a password or the value of defVal.
+   * @throws IOException on any IO problem
+   * @throws IllegalArgumentException bad arguments
+   */
+  public static String lookupPassword(
+      String bucket,
+      Configuration conf,
+      String baseKey,
+      String overrideVal,
+      String defVal)
+      throws IOException {
     String initialVal;
     Preconditions.checkArgument(baseKey.startsWith(FS_S3A_PREFIX),
         "%s does not start with $%s", baseKey, FS_S3A_PREFIX);
@@ -757,7 +816,7 @@ public final class S3AUtils {
       // no bucket, make the initial value the override value
       initialVal = overrideVal;
     }
-    return getPassword(conf, baseKey, initialVal);
+    return getPassword(conf, baseKey, initialVal, defVal);
   }
 
   /**
@@ -1059,6 +1118,134 @@ public final class S3AUtils {
     }
   }
 
+  /**
+   * Create a new AWS {@code ClientConfiguration}.
+   * All clients to AWS services <i>MUST</i> use this for consistent setup
+   * of connectivity, UA, proxy settings.
+   * @param conf The Hadoop configuration
+   * @param bucket Optional bucket to use to look up per-bucket proxy secrets
+   * @return new AWS client configuration
+   */
+  public static ClientConfiguration createAwsConf(Configuration conf,
+      String bucket)
+      throws IOException {
+    final ClientConfiguration awsConf = new ClientConfiguration();
+    initConnectionSettings(conf, awsConf);
+    initProxySupport(conf, bucket, awsConf);
+    initUserAgent(conf, awsConf);
+    return awsConf;
+  }
+
+  /**
+   * Initializes all AWS SDK settings related to connection management.
+   *
+   * @param conf Hadoop configuration
+   * @param awsConf AWS SDK configuration
+   */
+  public static void initConnectionSettings(Configuration conf,
+      ClientConfiguration awsConf) {
+    awsConf.setMaxConnections(intOption(conf, MAXIMUM_CONNECTIONS,
+        DEFAULT_MAXIMUM_CONNECTIONS, 1));
+    boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS,
+        DEFAULT_SECURE_CONNECTIONS);
+    awsConf.setProtocol(secureConnections ?  Protocol.HTTPS : Protocol.HTTP);
+    awsConf.setMaxErrorRetry(intOption(conf, MAX_ERROR_RETRIES,
+        DEFAULT_MAX_ERROR_RETRIES, 0));
+    awsConf.setConnectionTimeout(intOption(conf, ESTABLISH_TIMEOUT,
+        DEFAULT_ESTABLISH_TIMEOUT, 0));
+    awsConf.setSocketTimeout(intOption(conf, SOCKET_TIMEOUT,
+        DEFAULT_SOCKET_TIMEOUT, 0));
+    int sockSendBuffer = intOption(conf, SOCKET_SEND_BUFFER,
+        DEFAULT_SOCKET_SEND_BUFFER, 2048);
+    int sockRecvBuffer = intOption(conf, SOCKET_RECV_BUFFER,
+        DEFAULT_SOCKET_RECV_BUFFER, 2048);
+    awsConf.setSocketBufferSizeHints(sockSendBuffer, sockRecvBuffer);
+    String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
+    if (!signerOverride.isEmpty()) {
+     LOG.debug("Signer override = {}", signerOverride);
+      awsConf.setSignerOverride(signerOverride);
+    }
+  }
+
+  /**
+   * Initializes AWS SDK proxy support in the AWS client configuration
+   * if the S3A settings enable it.
+   *
+   * @param conf Hadoop configuration
+   * @param bucket Optional bucket to use to look up per-bucket proxy secrets
+   * @param awsConf AWS SDK configuration to update
+   * @throws IllegalArgumentException if misconfigured
+   * @throws IOException problem getting username/secret from password source.
+   */
+  public static void initProxySupport(Configuration conf,
+      String bucket,
+      ClientConfiguration awsConf) throws IllegalArgumentException,
+      IOException {
+    String proxyHost = conf.getTrimmed(PROXY_HOST, "");
+    int proxyPort = conf.getInt(PROXY_PORT, -1);
+    if (!proxyHost.isEmpty()) {
+      awsConf.setProxyHost(proxyHost);
+      if (proxyPort >= 0) {
+        awsConf.setProxyPort(proxyPort);
+      } else {
+        if (conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS)) {
+          LOG.warn("Proxy host set without port. Using HTTPS default 443");
+          awsConf.setProxyPort(443);
+        } else {
+          LOG.warn("Proxy host set without port. Using HTTP default 80");
+          awsConf.setProxyPort(80);
+        }
+      }
+      final String proxyUsername = lookupPassword(bucket, conf, PROXY_USERNAME,
+          null, null);
+      final String proxyPassword = lookupPassword(bucket, conf, PROXY_PASSWORD,
+          null, null);
+      if ((proxyUsername == null) != (proxyPassword == null)) {
+        String msg = "Proxy error: " + PROXY_USERNAME + " or " +
+            PROXY_PASSWORD + " set without the other.";
+        LOG.error(msg);
+        throw new IllegalArgumentException(msg);
+      }
+      awsConf.setProxyUsername(proxyUsername);
+      awsConf.setProxyPassword(proxyPassword);
+      awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
+      awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Using proxy server {}:{} as user {} with password {} on " +
+                "domain {} as workstation {}", awsConf.getProxyHost(),
+            awsConf.getProxyPort(),
+            String.valueOf(awsConf.getProxyUsername()),
+            awsConf.getProxyPassword(), awsConf.getProxyDomain(),
+            awsConf.getProxyWorkstation());
+      }
+    } else if (proxyPort >= 0) {
+      String msg =
+          "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
+      LOG.error(msg);
+      throw new IllegalArgumentException(msg);
+    }
+  }
+
+  /**
+   * Initializes the User-Agent header to send in HTTP requests to AWS
+   * services.  We always include the Hadoop version number.  The user also
+   * may set an optional custom prefix to put in front of the Hadoop version
+   * number.  The AWS SDK internally appends its own information, which seems
+   * to include the AWS SDK version, OS and JVM version.
+   *
+   * @param conf Hadoop configuration
+   * @param awsConf AWS SDK configuration to update
+   */
+  private static void initUserAgent(Configuration conf,
+      ClientConfiguration awsConf) {
+    String userAgent = "Hadoop " + VersionInfo.getVersion();
+    String userAgentPrefix = conf.getTrimmed(USER_AGENT_PREFIX, "");
+    if (!userAgentPrefix.isEmpty()) {
+      userAgent = userAgentPrefix + ", " + userAgent;
+    }
+    LOG.debug("Using User-Agent: {}", userAgent);
+    awsConf.setUserAgentPrefix(userAgent);
+  }
 
   /**
    * An interface for use in lambda-expressions working with
@@ -1289,18 +1476,40 @@ public final class S3AUtils {
    * @param closeables the objects to close
    */
   public static void closeAll(Logger log,
-      java.io.Closeable... closeables) {
-    for (java.io.Closeable c : closeables) {
+      Closeable... closeables) {
+    if (log == null) {
+      log = LOG;
+    }
+    for (Closeable c : closeables) {
       if (c != null) {
         try {
-          if (log != null) {
-            log.debug("Closing {}", c);
-          }
+          log.debug("Closing {}", c);
           c.close();
         } catch (Exception e) {
-          if (log != null && log.isDebugEnabled()) {
-            log.debug("Exception in closing {}", c, e);
-          }
+          log.debug("Exception in closing {}", c, e);
+        }
+      }
+    }
+  }
+  /**
+   * Close the Closeable objects and <b>ignore</b> any Exception or
+   * null pointers.
+   * (This is the SLF4J equivalent of that in {@code IOUtils}).
+   * @param log the log to log at debug level. Can be null.
+   * @param closeables the objects to close
+   */
+  public static void closeAutocloseables(Logger log,
+      AutoCloseable... closeables) {
+    if (log == null) {
+      log = LOG;
+    }
+    for (AutoCloseable c : closeables) {
+      if (c != null) {
+        try {
+          log.debug("Closing {}", c);
+          c.close();
+        } catch (Exception e) {
+          log.debug("Exception in closing {}", c, e);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java
index 9abb362..b237e85 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.s3a;
 import java.io.IOException;
 import java.net.URI;
 
+import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.services.s3.AmazonS3;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -37,9 +38,13 @@ public interface S3ClientFactory {
    * Creates a new {@link AmazonS3} client.
    *
    * @param name raw input S3A file system URI
+   * @param bucket Optional bucket to use to look up per-bucket proxy secrets
+   * @param credentialSet credentials to use
    * @return S3 client
    * @throws IOException IO problem
    */
-  AmazonS3 createS3Client(URI name) throws IOException;
+  AmazonS3 createS3Client(URI name,
+      final String bucket,
+      final AWSCredentialsProvider credentialSet) throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
index fdaf9bd..e5a3639 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
@@ -24,9 +24,11 @@ import java.net.URI;
 import java.util.Locale;
 import java.util.concurrent.TimeUnit;
 
+import com.amazonaws.AmazonClientException;
 import com.amazonaws.auth.AWSCredentials;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
+import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
 import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -37,6 +39,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.s3a.AWSCredentialProviderList;
+import org.apache.hadoop.fs.s3a.S3AUtils;
+import org.apache.hadoop.fs.s3a.Invoker;
+import org.apache.hadoop.fs.s3a.S3ARetryPolicy;
 import org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -77,17 +82,21 @@ public class AssumedRoleCredentialProvider implements AWSCredentialsProvider,
 
   private final String arn;
 
+  private final AWSCredentialProviderList credentialsToSTS;
+
+  private final Invoker invoker;
+
   /**
    * Instantiate.
    * This calls {@link #getCredentials()} to fail fast on the inner
    * role credential retrieval.
-   * @param uri URI of endpoint.
+   * @param fsUri URI of the filesystem.
    * @param conf configuration
    * @throws IOException on IO problems and some parameter checking
    * @throws IllegalArgumentException invalid parameters
    * @throws AWSSecurityTokenServiceException problems getting credentials
    */
-  public AssumedRoleCredentialProvider(URI uri, Configuration conf)
+  public AssumedRoleCredentialProvider(URI fsUri, Configuration conf)
       throws IOException {
 
     arn = conf.getTrimmed(ASSUMED_ROLE_ARN, "");
@@ -99,13 +108,14 @@ public class AssumedRoleCredentialProvider implements AWSCredentialsProvider,
     Class<?>[] awsClasses = loadAWSProviderClasses(conf,
         ASSUMED_ROLE_CREDENTIALS_PROVIDER,
         SimpleAWSCredentialsProvider.class);
-    AWSCredentialProviderList credentials = new AWSCredentialProviderList();
+    credentialsToSTS = new AWSCredentialProviderList();
     for (Class<?> aClass : awsClasses) {
       if (this.getClass().equals(aClass)) {
         throw new IOException(E_FORBIDDEN_PROVIDER);
       }
-      credentials.add(createAWSCredentialProvider(conf, aClass, uri));
+      credentialsToSTS.add(createAWSCredentialProvider(conf, aClass, fsUri));
     }
+    LOG.debug("Credentials to obtain role credentials: {}", credentialsToSTS);
 
     // then the STS binding
     sessionName = conf.getTrimmed(ASSUMED_ROLE_SESSION_NAME,
@@ -122,14 +132,27 @@ public class AssumedRoleCredentialProvider implements AWSCredentialsProvider,
       LOG.debug("Scope down policy {}", policy);
       builder.withScopeDownPolicy(policy);
     }
-    String epr = conf.get(ASSUMED_ROLE_STS_ENDPOINT, "");
-    if (StringUtils.isNotEmpty(epr)) {
-      LOG.debug("STS Endpoint: {}", epr);
-      builder.withServiceEndpoint(epr);
-    }
-    LOG.debug("Credentials to obtain role credentials: {}", credentials);
-    builder.withLongLivedCredentialsProvider(credentials);
+    String endpoint = conf.get(ASSUMED_ROLE_STS_ENDPOINT, "");
+    String region = conf.get(ASSUMED_ROLE_STS_ENDPOINT_REGION,
+        ASSUMED_ROLE_STS_ENDPOINT_REGION_DEFAULT);
+    AWSSecurityTokenServiceClientBuilder stsbuilder =
+        STSClientFactory.builder(
+          conf,
+          fsUri.getHost(),
+          credentialsToSTS,
+          endpoint,
+          region);
+    // the STS client is not tracked for a shutdown in close(), because it
+    // (currently) throws an UnsupportedOperationException in shutdown().
+    builder.withStsClient(stsbuilder.build());
+
+    //now build the provider
     stsProvider = builder.build();
+
+    // to handle STS throttling by the AWS account, we
+    // need to retry
+    invoker = new Invoker(new S3ARetryPolicy(conf), this::operationRetried);
+
     // and force in a fail-fast check just to keep the stack traces less
     // convoluted
     getCredentials();
@@ -143,7 +166,17 @@ public class AssumedRoleCredentialProvider implements AWSCredentialsProvider,
   @Override
   public AWSCredentials getCredentials() {
     try {
-      return stsProvider.getCredentials();
+      return invoker.retryUntranslated("getCredentials",
+          true,
+          stsProvider::getCredentials);
+    } catch (IOException e) {
+      // this is in the signature of retryUntranslated;
+      // its hard to see how this could be raised, but for
+      // completeness, it is wrapped as an Amazon Client Exception
+      // and rethrown.
+      throw new AmazonClientException(
+          "getCredentials failed: " + e,
+          e);
     } catch (AWSSecurityTokenServiceException e) {
       LOG.error("Failed to get credentials for role {}",
           arn, e);
@@ -161,7 +194,7 @@ public class AssumedRoleCredentialProvider implements AWSCredentialsProvider,
    */
   @Override
   public void close() {
-    stsProvider.close();
+    S3AUtils.closeAutocloseables(LOG, stsProvider, credentialsToSTS);
   }
 
   @Override
@@ -205,4 +238,23 @@ public class AssumedRoleCredentialProvider implements AWSCredentialsProvider,
     return r.toString();
   }
 
+  /**
+   * Callback from {@link Invoker} when an operation is retried.
+   * @param text text of the operation
+   * @param ex exception
+   * @param retries number of retries
+   * @param idempotent is the method idempotent
+   */
+  public void operationRetried(
+      String text,
+      Exception ex,
+      int retries,
+      boolean idempotent) {
+    if (retries == 0) {
+      // log on the first retry attempt of the credential access.
+      // At worst, this means one log entry every intermittent renewal
+      // time.
+      LOG.info("Retried {}", text);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/NoAuthWithAWSException.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/NoAuthWithAWSException.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/NoAuthWithAWSException.java
new file mode 100644
index 0000000..f48e17a
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/NoAuthWithAWSException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import com.amazonaws.AmazonClientException;
+
+/**
+ * A specific subclass of {@code AmazonClientException} which can
+ * be used in the retry logic to fail fast when there is any
+ * authentication problem.
+ */
+public class NoAuthWithAWSException extends AmazonClientException {
+
+  public NoAuthWithAWSException(final String message, final Throwable t) {
+    super(message, t);
+  }
+
+  public NoAuthWithAWSException(final String message) {
+    super(message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java
index ca2c993..d4568b0 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java
@@ -205,6 +205,14 @@ public class RoleModel {
     return new Policy(statements);
   }
 
+  /**
+   * From a set of statements, create a policy.
+   * @param statements statements
+   * @return the policy
+   */
+  public static Policy policy(final List<RoleModel.Statement> statements) {
+    return new Policy(statements);
+  }
 
   /**
    * Effect options.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java
index 6711eee..34ed295 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java
@@ -29,6 +29,55 @@ public final class RolePolicies {
   private RolePolicies() {
   }
 
+  /** All KMS operations: {@value}.*/
+  public static final String KMS_ALL_OPERATIONS = "kms:*";
+
+  /** KMS encryption. This is <i>Not</i> used by SSE-KMS: {@value}. */
+  public static final String KMS_ENCRYPT = "kms:Encrypt";
+
+  /**
+   * Decrypt data encrypted with SSE-KMS: {@value}.
+   */
+  public static final String KMS_DECRYPT = "kms:Decrypt";
+
+  /**
+   * Arn for all KMS keys: {@value}.
+   */
+  public static final String KMS_ALL_KEYS = "arn:aws:kms:*";
+
+  /**
+   * This is used by S3 to generate a per-object encryption key and
+   * the encrypted value of this, the latter being what it tags
+   * the object with for later decryption: {@value}.
+   */
+  public static final String KMS_GENERATE_DATA_KEY = "kms:GenerateDataKey";
+
+  /**
+   * Actions needed to read and write SSE-KMS data.
+   */
+  private static final String[] KMS_KEY_RW =
+      new String[]{KMS_DECRYPT, KMS_GENERATE_DATA_KEY};
+
+  /**
+   * Actions needed to read SSE-KMS data.
+   */
+  private static final String[] KMS_KEY_READ =
+      new String[] {KMS_DECRYPT};
+
+  /**
+   * Statement to allow KMS R/W access access, so full use of
+   * SSE-KMS.
+   */
+  public static final Statement STATEMENT_ALLOW_SSE_KMS_RW =
+      statement(true, KMS_ALL_KEYS, KMS_KEY_RW);
+
+  /**
+   * Statement to allow read access to KMS keys, so the ability
+   * to read SSE-KMS data,, but not decrypt it.
+   */
+  public static final Statement STATEMENT_ALLOW_SSE_KMS_READ =
+      statement(true, KMS_ALL_KEYS, KMS_KEY_READ);
+
   /**
    * All S3 operations: {@value}.
    */
@@ -52,7 +101,6 @@ public final class RolePolicies {
   public static final String S3_LIST_BUCKET_MULTPART_UPLOADS =
       "s3:ListBucketMultipartUploads";
 
-
   /**
    * List multipart upload is needed for the S3A Commit protocols.
    */
@@ -97,6 +145,8 @@ public final class RolePolicies {
 
   public static final String S3_GET_OBJECT_VERSION = "s3:GetObjectVersion";
 
+  public static final String S3_GET_BUCKET_LOCATION = "s3:GetBucketLocation";
+
   public static final String S3_GET_OBJECT_VERSION_ACL
       = "s3:GetObjectVersionAcl";
 
@@ -128,7 +178,8 @@ public final class RolePolicies {
   public static final String S3_RESTORE_OBJECT = "s3:RestoreObject";
 
   /**
-   * Actions needed to read data from S3 through S3A.
+   * Actions needed to read a file in S3 through S3A, excluding
+   * S3Guard and SSE-KMS.
    */
   public static final String[] S3_PATH_READ_OPERATIONS =
       new String[]{
@@ -136,18 +187,20 @@ public final class RolePolicies {
       };
 
   /**
-   * Actions needed to read data from S3 through S3A.
+   * Base actions needed to read data from S3 through S3A,
+   * excluding SSE-KMS data and S3Guard-ed buckets.
    */
   public static final String[] S3_ROOT_READ_OPERATIONS =
       new String[]{
           S3_LIST_BUCKET,
           S3_LIST_BUCKET_MULTPART_UPLOADS,
-          S3_GET_OBJECT,
+          S3_ALL_GET,
       };
 
   /**
    * Actions needed to write data to an S3A Path.
-   * This includes the appropriate read operations.
+   * This includes the appropriate read operations, but
+   * not SSE-KMS or S3Guard support.
    */
   public static final String[] S3_PATH_RW_OPERATIONS =
       new String[]{
@@ -163,6 +216,7 @@ public final class RolePolicies {
    * This is purely the extra operations needed for writing atop
    * of the read operation set.
    * Deny these and a path is still readable, but not writeable.
+   * Excludes: SSE-KMS and S3Guard permissions.
    */
   public static final String[] S3_PATH_WRITE_OPERATIONS =
       new String[]{
@@ -173,6 +227,7 @@ public final class RolePolicies {
 
   /**
    * Actions needed for R/W IO from the root of a bucket.
+   * Excludes: SSE-KMS and S3Guard permissions.
    */
   public static final String[] S3_ROOT_RW_OPERATIONS =
       new String[]{
@@ -190,26 +245,57 @@ public final class RolePolicies {
    */
   public static final String DDB_ALL_OPERATIONS = "dynamodb:*";
 
-  public static final String DDB_ADMIN = "dynamodb:*";
+  /**
+   * Operations needed for DDB/S3Guard Admin.
+   * For now: make this {@link #DDB_ALL_OPERATIONS}.
+   */
+  public static final String DDB_ADMIN = DDB_ALL_OPERATIONS;
 
+  /**
+   * Permission for DDB describeTable() operation: {@value}.
+   * This is used during initialization.
+   */
+  public static final String DDB_DESCRIBE_TABLE = "dynamodb:DescribeTable";
 
-  public static final String DDB_BATCH_WRITE = "dynamodb:BatchWriteItem";
+  /**
+   * Permission to query the DDB table: {@value}.
+   */
+  public static final String DDB_QUERY = "dynamodb:Query";
 
   /**
-   * All DynamoDB tables: {@value}.
+   * Permission for DDB operation to get a record: {@value}.
    */
-  public static final String ALL_DDB_TABLES = "arn:aws:dynamodb:::*";
+  public static final String DDB_GET_ITEM = "dynamodb:GetItem";
 
+  /**
+   * Permission for DDB write record operation: {@value}.
+   */
+  public static final String DDB_PUT_ITEM = "dynamodb:PutItem";
 
+  /**
+   * Permission for DDB update single item operation: {@value}.
+   */
+  public static final String DDB_UPDATE_ITEM = "dynamodb:UpdateItem";
 
-  public static final String WILDCARD = "*";
+  /**
+   * Permission for DDB delete operation: {@value}.
+   */
+  public static final String DDB_DELETE_ITEM = "dynamodb:DeleteItem";
 
   /**
-   * Allow all S3 Operations.
+   * Permission for DDB operation: {@value}.
    */
-  public static final Statement STATEMENT_ALL_S3 = statement(true,
-      S3_ALL_BUCKETS,
-      S3_ALL_OPERATIONS);
+  public static final String DDB_BATCH_GET_ITEM = "dynamodb:BatchGetItem";
+
+  /**
+   * Batch write permission for DDB: {@value}.
+   */
+  public static final String DDB_BATCH_WRITE_ITEM = "dynamodb:BatchWriteItem";
+
+  /**
+   * All DynamoDB tables: {@value}.
+   */
+  public static final String ALL_DDB_TABLES = "arn:aws:dynamodb:*";
 
   /**
    * Statement to allow all DDB access.
@@ -218,11 +304,36 @@ public final class RolePolicies {
       ALL_DDB_TABLES, DDB_ALL_OPERATIONS);
 
   /**
-   * Allow all S3 and S3Guard operations.
+   * Statement to allow all client operations needed for S3Guard,
+   * but none of the admin operations.
+   */
+  public static final Statement STATEMENT_S3GUARD_CLIENT = statement(true,
+      ALL_DDB_TABLES,
+      DDB_BATCH_GET_ITEM,
+      DDB_BATCH_WRITE_ITEM,
+      DDB_DELETE_ITEM,
+      DDB_DESCRIBE_TABLE,
+      DDB_GET_ITEM,
+      DDB_PUT_ITEM,
+      DDB_QUERY,
+      DDB_UPDATE_ITEM
+      );
+
+  /**
+   * Allow all S3 Operations.
+   * This does not cover DDB or S3-KMS
+   */
+  public static final Statement STATEMENT_ALL_S3 = statement(true,
+      S3_ALL_BUCKETS,
+      S3_ALL_OPERATIONS);
+
+  /**
+   * Policy for all S3 and S3Guard operations, and SSE-KMS.
    */
   public static final Policy ALLOW_S3_AND_SGUARD = policy(
       STATEMENT_ALL_S3,
-      STATEMENT_ALL_DDB
+      STATEMENT_ALL_DDB,
+      STATEMENT_ALLOW_SSE_KMS_RW
   );
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java
new file mode 100644
index 0000000..10bf88c
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.io.IOException;
+
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.auth.AWSCredentialsProvider;
+import com.amazonaws.client.builder.AwsClientBuilder;
+import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.s3a.S3AUtils;
+
+/**
+ * Factory for creating STS Clients.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class STSClientFactory {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(STSClientFactory.class);
+
+  /**
+   * Create the builder ready for any final configuration options.
+   * Picks up connection settings from the Hadoop configuration, including
+   * proxy secrets.
+   * @param conf Configuration to act as source of options.
+   * @param bucket Optional bucket to use to look up per-bucket proxy secrets
+   * @param credentials AWS credential chain to use
+   * @param stsEndpoint optional endpoint "https://sns.us-west-1.amazonaws.com"
+   * @param stsRegion the region, e.g "us-west-1"
+   * @return the builder to call {@code build()}
+   * @throws IOException problem reading proxy secrets
+   */
+  public static AWSSecurityTokenServiceClientBuilder builder(
+      final Configuration conf,
+      final String bucket,
+      final AWSCredentialsProvider credentials, final String stsEndpoint,
+      final String stsRegion) throws IOException {
+    Preconditions.checkArgument(credentials != null, "No credentials");
+    final AWSSecurityTokenServiceClientBuilder builder
+        = AWSSecurityTokenServiceClientBuilder.standard();
+    final ClientConfiguration awsConf = S3AUtils.createAwsConf(conf, bucket);
+    builder.withClientConfiguration(awsConf);
+    builder.withCredentials(credentials);
+    if (StringUtils.isNotEmpty(stsEndpoint)) {
+      LOG.debug("STS Endpoint ={}", stsEndpoint);
+      builder.withEndpointConfiguration(
+          new AwsClientBuilder.EndpointConfiguration(stsEndpoint, stsRegion));
+    }
+    return builder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
index 91e64cd..9e1d2f4 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
@@ -34,10 +34,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.s3a.DefaultS3ClientFactory;
+import org.apache.hadoop.fs.s3a.S3AUtils;
 
 import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_REGION_KEY;
-import static org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProviderSet;
 
 /**
  * Interface to create a DynamoDB client.
@@ -58,10 +57,14 @@ public interface DynamoDBClientFactory extends Configurable {
    * it will indicate an error.
    *
    * @param defaultRegion the default region of the AmazonDynamoDB client
+   * @param bucket Optional bucket to use to look up per-bucket proxy secrets
+   * @param credentials credentials to use for authentication.
    * @return a new DynamoDB client
    * @throws IOException if any IO error happens
    */
-  AmazonDynamoDB createDynamoDBClient(String defaultRegion) throws IOException;
+  AmazonDynamoDB createDynamoDBClient(final String defaultRegion,
+      final String bucket,
+      final AWSCredentialsProvider credentials) throws IOException;
 
   /**
    * The default implementation for creating an AmazonDynamoDB.
@@ -69,16 +72,15 @@ public interface DynamoDBClientFactory extends Configurable {
   class DefaultDynamoDBClientFactory extends Configured
       implements DynamoDBClientFactory {
     @Override
-    public AmazonDynamoDB createDynamoDBClient(String defaultRegion)
+    public AmazonDynamoDB createDynamoDBClient(String defaultRegion,
+        final String bucket,
+        final AWSCredentialsProvider credentials)
         throws IOException {
       Preconditions.checkNotNull(getConf(),
           "Should have been configured before usage");
 
       final Configuration conf = getConf();
-      final AWSCredentialsProvider credentials =
-          createAWSCredentialProviderSet(null, conf);
-      final ClientConfiguration awsConf =
-          DefaultS3ClientFactory.createAwsConf(conf);
+      final ClientConfiguration awsConf = S3AUtils.createAwsConf(conf, bucket);
 
       final String region = getRegion(conf, defaultRegion);
       LOG.debug("Creating DynamoDB client in region {}", region);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 43849b1..ba80b88 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -22,6 +22,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.net.URI;
+import java.nio.file.AccessDeniedException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -34,6 +35,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.amazonaws.AmazonClientException;
+import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
 import com.amazonaws.services.dynamodbv2.document.BatchWriteItemOutcome;
 import com.amazonaws.services.dynamodbv2.document.DynamoDB;
@@ -67,6 +69,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.AWSCredentialProviderList;
 import org.apache.hadoop.fs.s3a.Constants;
 import org.apache.hadoop.fs.s3a.Invoker;
 import org.apache.hadoop.fs.s3a.Retries;
@@ -75,13 +78,14 @@ import org.apache.hadoop.fs.s3a.S3AInstrumentation;
 import org.apache.hadoop.fs.s3a.S3ARetryPolicy;
 import org.apache.hadoop.fs.s3a.S3AUtils;
 import org.apache.hadoop.fs.s3a.Tristate;
+import org.apache.hadoop.fs.s3a.auth.RolePolicies;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3AUtils.translateException;
+import static org.apache.hadoop.fs.s3a.S3AUtils.*;
 import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.*;
 import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.*;
 
@@ -207,6 +211,7 @@ public class DynamoDBMetadataStore implements MetadataStore {
       new ValueMap().withBoolean(":false", false);
 
   private DynamoDB dynamoDB;
+  private AWSCredentialProviderList credentials;
   private String region;
   private Table table;
   private String tableName;
@@ -242,10 +247,16 @@ public class DynamoDBMetadataStore implements MetadataStore {
    * A utility function to create DynamoDB instance.
    * @param conf the file system configuration
    * @param s3Region region of the associated S3 bucket (if any).
+   * @param bucket Optional bucket to use to look up per-bucket proxy secrets
+   * @param credentials credentials.
    * @return DynamoDB instance.
    * @throws IOException I/O error.
    */
-  private static DynamoDB createDynamoDB(Configuration conf, String s3Region)
+  private static DynamoDB createDynamoDB(
+      final Configuration conf,
+      final String s3Region,
+      final String bucket,
+      final AWSCredentialsProvider credentials)
       throws IOException {
     Preconditions.checkNotNull(conf);
     final Class<? extends DynamoDBClientFactory> cls = conf.getClass(
@@ -254,10 +265,18 @@ public class DynamoDBMetadataStore implements MetadataStore {
         DynamoDBClientFactory.class);
     LOG.debug("Creating DynamoDB client {} with S3 region {}", cls, s3Region);
     final AmazonDynamoDB dynamoDBClient = ReflectionUtils.newInstance(cls, conf)
-        .createDynamoDBClient(s3Region);
+        .createDynamoDBClient(s3Region, bucket, credentials);
     return new DynamoDB(dynamoDBClient);
   }
 
+  /**
+   * {@inheritDoc}.
+   * The credentials for authenticating with S3 are requested from the
+   * FS via {@link S3AFileSystem#shareCredentials(String)}; this will
+   * increment the reference counter of these credentials.
+   * @param fs {@code S3AFileSystem} associated with the MetadataStore
+   * @throws IOException on a failure
+   */
   @Override
   @Retries.OnceRaw
   public void initialize(FileSystem fs) throws IOException {
@@ -274,11 +293,23 @@ public class DynamoDBMetadataStore implements MetadataStore {
       LOG.debug("Overriding S3 region with configured DynamoDB region: {}",
           region);
     } else {
-      region = owner.getBucketLocation();
+      try {
+        region = owner.getBucketLocation();
+      } catch (AccessDeniedException e) {
+        // access denied here == can't call getBucket. Report meaningfully
+        URI uri = owner.getUri();
+        LOG.error("Failed to get bucket location from S3 bucket {}",
+            uri);
+        throw (IOException)new AccessDeniedException(
+            "S3 client role lacks permission "
+                + RolePolicies.S3_GET_BUCKET_LOCATION + " for " + uri)
+            .initCause(e);
+      }
       LOG.debug("Inferring DynamoDB region from S3 bucket: {}", region);
     }
     username = owner.getUsername();
-    dynamoDB = createDynamoDB(conf, region);
+    credentials = owner.shareCredentials("s3guard");
+    dynamoDB = createDynamoDB(conf, region, bucket, credentials);
 
     // use the bucket as the DynamoDB table name if not specified in config
     tableName = conf.getTrimmed(S3GUARD_DDB_TABLE_NAME_KEY, bucket);
@@ -311,6 +342,9 @@ public class DynamoDBMetadataStore implements MetadataStore {
    * must declare the table name and region in the
    * {@link Constants#S3GUARD_DDB_TABLE_NAME_KEY} and
    * {@link Constants#S3GUARD_DDB_REGION_KEY} respectively.
+   * It also creates a new credential provider list from the configuration,
+   * using the base fs.s3a.* options, as there is no bucket to infer per-bucket
+   * settings from.
    *
    * @see #initialize(FileSystem)
    * @throws IOException if there is an error
@@ -327,7 +361,8 @@ public class DynamoDBMetadataStore implements MetadataStore {
     region = conf.getTrimmed(S3GUARD_DDB_REGION_KEY);
     Preconditions.checkArgument(!StringUtils.isEmpty(region),
         "No DynamoDB region configured");
-    dynamoDB = createDynamoDB(conf, region);
+    credentials = createAWSCredentialProviderSet(null, conf);
+    dynamoDB = createDynamoDB(conf, region, null, credentials);
 
     username = UserGroupInformation.getCurrentUser().getShortUserName();
     initDataAccessRetries(conf);
@@ -778,12 +813,17 @@ public class DynamoDBMetadataStore implements MetadataStore {
     if (instrumentation != null) {
       instrumentation.storeClosed();
     }
-    if (dynamoDB != null) {
-      LOG.debug("Shutting down {}", this);
-      dynamoDB.shutdown();
-      dynamoDB = null;
+    try {
+      if (dynamoDB != null) {
+        LOG.debug("Shutting down {}", this);
+        dynamoDB.shutdown();
+        dynamoDB = null;
+      }
+    } finally {
+      closeAutocloseables(LOG, credentials);
+      credentials = null;
     }
-  }
+}
 
   @Override
   @Retries.OnceTranslated


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/25] hadoop git commit: HDDS-344. Remove multibyte characters from OzoneAcl. Contributed by Takanobu Asanuma.

Posted by su...@apache.org.
HDDS-344. Remove multibyte characters from OzoneAcl. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/778369ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/778369ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/778369ea

Branch: refs/heads/HDFS-12943
Commit: 778369ea0204e75ce86fc7da3321b046f8139d9a
Parents: 3d96bc6
Author: Márton Elek <el...@apache.org>
Authored: Thu Aug 9 14:23:41 2018 +0200
Committer: Márton Elek <el...@apache.org>
Committed: Thu Aug 9 14:26:37 2018 +0200

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ozone/OzoneAcl.java      | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/778369ea/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
index ff0ac4e..1827b23 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
@@ -25,9 +25,11 @@ import java.util.Objects;
  * OzoneACL classes define bucket ACLs used in OZONE.
  *
  * ACLs in Ozone follow this pattern.
- * • user:name:rw
- * • group:name:rw
- * • world::rw
+ * <ul>
+ * <li>user:name:rw
+ * <li>group:name:rw
+ * <li>world::rw
+ * </ul>
  */
 public class OzoneAcl {
   private OzoneACLType type;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/25] hadoop git commit: Merge branch 'trunk' into HDFS-12943

Posted by su...@apache.org.
Merge branch 'trunk' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71a35813
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71a35813
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71a35813

Branch: refs/heads/HDFS-12943
Commit: 71a358131bd7430c95426a0d1d26771a6dfd1476
Parents: cc6f80f a2a8c48
Author: Chao Sun <su...@apache.org>
Authored: Fri Aug 10 16:31:17 2018 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Fri Aug 10 16:31:17 2018 -0700

----------------------------------------------------------------------
 LICENSE.txt                                     |   2 +-
 .../src/main/resources/core-default.xml         |  18 +-
 hadoop-hdds/common/pom.xml                      |  34 ++
 .../java/org/apache/hadoop/ozone/OzoneAcl.java  |   8 +-
 .../apache/hadoop/utils/HddsVersionInfo.java    | 182 +++++++
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../main/resources/hdds-version-info.properties |  26 +
 .../container/common/impl/ContainerData.java    |   1 -
 .../container/keyvalue/KeyValueContainer.java   |  54 +-
 .../container/keyvalue/KeyValueHandler.java     |  39 +-
 .../container/keyvalue/helpers/KeyUtils.java    |  50 +-
 .../container/keyvalue/impl/KeyManagerImpl.java |   4 +-
 .../keyvalue/interfaces/KeyManager.java         |   3 +-
 .../keyvalue/TestKeyValueContainer.java         |  16 -
 .../container/keyvalue/TestKeyValueHandler.java |  55 +-
 .../hadoop/hdds/server/events/EventQueue.java   |   7 +-
 .../scm/container/ContainerReportHandler.java   | 107 +++-
 .../replication/ReplicationActivityStatus.java  |  86 +++
 .../ReplicationActivityStatusMXBean.java        |  28 +
 .../replication/ReplicationRequest.java         |  28 +-
 .../hadoop/hdds/scm/events/SCMEvents.java       |   9 +
 .../hdds/scm/node/states/Node2ContainerMap.java |  10 +-
 .../hdds/scm/node/states/ReportResult.java      |  18 +-
 .../scm/server/StorageContainerManager.java     |  27 +-
 .../container/TestContainerReportHandler.java   | 228 ++++++++
 .../scm/node/states/Node2ContainerMapTest.java  | 308 -----------
 .../scm/node/states/TestNode2ContainerMap.java  | 328 +++++++++++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 ...yAliasMapProtocolClientSideTranslatorPB.java |   6 +
 .../qjournal/client/QuorumJournalManager.java   |  11 +-
 .../aliasmap/InMemoryLevelDBAliasMapServer.java |   8 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   1 -
 .../src/main/resources/hdfs-default.xml         |  18 +
 .../impl/TestInMemoryLevelDBAliasMapClient.java |  39 ++
 hadoop-ozone/common/pom.xml                     |  35 ++
 hadoop-ozone/common/src/main/bin/ozone          |   2 +-
 .../hadoop/ozone/util/OzoneVersionInfo.java     | 213 ++++++++
 .../resources/ozone-version-info.properties     |  27 +
 .../common/impl/TestContainerPersistence.java   |   8 -
 .../ozone/scm/TestCommittedBlockLengthAPI.java  | 216 --------
 .../TestGetCommittedBlockLengthAndPutKey.java   | 254 +++++++++
 .../fs/s3a/AWSCredentialProviderList.java       | 101 +++-
 .../org/apache/hadoop/fs/s3a/Constants.java     |  19 +-
 .../hadoop/fs/s3a/DefaultS3ClientFactory.java   | 190 ++-----
 .../fs/s3a/InconsistentAmazonS3Client.java      |  10 +
 .../fs/s3a/InconsistentS3ClientFactory.java     |  11 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  35 +-
 .../apache/hadoop/fs/s3a/S3ARetryPolicy.java    |   4 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 245 ++++++++-
 .../apache/hadoop/fs/s3a/S3ClientFactory.java   |   7 +-
 .../s3a/auth/AssumedRoleCredentialProvider.java |  78 ++-
 .../fs/s3a/auth/NoAuthWithAWSException.java     |  37 ++
 .../apache/hadoop/fs/s3a/auth/RoleModel.java    |   8 +
 .../apache/hadoop/fs/s3a/auth/RolePolicies.java | 143 ++++-
 .../hadoop/fs/s3a/auth/STSClientFactory.java    |  78 +++
 .../fs/s3a/s3guard/DynamoDBClientFactory.java   |  18 +-
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   |  62 ++-
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 191 +++++--
 .../src/site/markdown/tools/hadoop-aws/index.md |   6 +-
 .../hadoop/fs/s3a/ITestS3AConfiguration.java    | 117 ++--
 .../fs/s3a/ITestS3ATemporaryCredentials.java    |  71 +--
 .../fs/s3a/ITestS3GuardListConsistency.java     |  68 ++-
 .../hadoop/fs/s3a/ITestS3GuardWriteBack.java    |  57 +-
 .../hadoop/fs/s3a/MockS3ClientFactory.java      |   6 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  76 ++-
 .../hadoop/fs/s3a/auth/ITestAssumeRole.java     | 151 +++++-
 .../auth/ITestAssumedRoleCommitOperations.java  |   5 +-
 .../hadoop/fs/s3a/auth/RoleTestUtils.java       |  24 +-
 .../s3guard/AbstractS3GuardToolTestBase.java    |   7 +-
 .../s3a/s3guard/ITestS3GuardConcurrentOps.java  | 147 ++---
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |   8 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java       |   4 +-
 .../webapps/static/dt-1.10.7/css/demo_page.css  | 110 ++++
 .../webapps/static/dt-1.10.7/css/demo_table.css | 538 +++++++++++++++++++
 .../webapps/static/dt-1.10.7/css/jui-dt.css     | 322 +++++++++++
 .../static/dt-1.10.7/images/Sorting icons.psd   | Bin 0 -> 27490 bytes
 .../static/dt-1.10.7/images/back_disabled.jpg   | Bin 0 -> 612 bytes
 .../static/dt-1.10.7/images/back_enabled.jpg    | Bin 0 -> 807 bytes
 .../webapps/static/dt-1.10.7/images/favicon.ico | Bin 0 -> 894 bytes
 .../dt-1.10.7/images/forward_disabled.jpg       | Bin 0 -> 635 bytes
 .../static/dt-1.10.7/images/forward_enabled.jpg | Bin 0 -> 852 bytes
 .../static/dt-1.10.7/images/sort_asc.png        | Bin 0 -> 263 bytes
 .../dt-1.10.7/images/sort_asc_disabled.png      | Bin 0 -> 252 bytes
 .../static/dt-1.10.7/images/sort_both.png       | Bin 0 -> 282 bytes
 .../static/dt-1.10.7/images/sort_desc.png       | Bin 0 -> 260 bytes
 .../dt-1.10.7/images/sort_desc_disabled.png     | Bin 0 -> 251 bytes
 .../dt-1.10.7/js/jquery.dataTables.min.js       | 160 ++++++
 .../webapps/static/dt-1.9.4/css/demo_page.css   | 110 ----
 .../webapps/static/dt-1.9.4/css/demo_table.css  | 538 -------------------
 .../webapps/static/dt-1.9.4/css/jui-dt.css      | 322 -----------
 .../static/dt-1.9.4/images/Sorting icons.psd    | Bin 27490 -> 0 bytes
 .../static/dt-1.9.4/images/back_disabled.jpg    | Bin 612 -> 0 bytes
 .../static/dt-1.9.4/images/back_enabled.jpg     | Bin 807 -> 0 bytes
 .../webapps/static/dt-1.9.4/images/favicon.ico  | Bin 894 -> 0 bytes
 .../static/dt-1.9.4/images/forward_disabled.jpg | Bin 635 -> 0 bytes
 .../static/dt-1.9.4/images/forward_enabled.jpg  | Bin 852 -> 0 bytes
 .../webapps/static/dt-1.9.4/images/sort_asc.png | Bin 263 -> 0 bytes
 .../dt-1.9.4/images/sort_asc_disabled.png       | Bin 252 -> 0 bytes
 .../static/dt-1.9.4/images/sort_both.png        | Bin 282 -> 0 bytes
 .../static/dt-1.9.4/images/sort_desc.png        | Bin 260 -> 0 bytes
 .../dt-1.9.4/images/sort_desc_disabled.png      | Bin 251 -> 0 bytes
 .../static/dt-1.9.4/js/jquery.dataTables.min.js | 157 ------
 .../container/ContainerImpl.java                |  13 +-
 .../launcher/ContainerLaunch.java               |  12 +-
 .../launcher/ContainersLauncher.java            |  14 +-
 .../container/TestContainer.java                |  46 +-
 .../server/resourcemanager/RMAppManager.java    |  81 ++-
 .../server/resourcemanager/rmapp/RMApp.java     |   6 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |   8 +-
 .../scheduler/MutableConfigurationProvider.java |   7 +
 .../capacity/AutoCreatedLeafQueueConfig.java    |   5 +
 .../capacity/QueueManagementChange.java         |   2 +-
 .../QueueManagementDynamicEditPolicy.java       |  36 +-
 .../conf/MutableCSConfigurationProvider.java    |   5 +
 .../GuaranteedOrZeroCapacityOverTimePolicy.java |  50 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |  12 +
 .../constraint/AllocationTagsManager.java       |   5 +
 .../resourcemanager/webapp/RMWebServices.java   |  34 ++
 .../resourcemanager/webapp/dao/ConfInfo.java    |  72 +++
 .../yarn/server/resourcemanager/MockNodes.java  |   6 +-
 .../server/resourcemanager/TestAppManager.java  | 241 +++++++--
 .../resourcemanager/TestResourceManager.java    |  16 +-
 .../applicationsmanager/MockAsm.java            |  11 +
 .../server/resourcemanager/rmapp/MockRMApp.java |  20 +
 .../TestCapacitySchedulerAsyncScheduling.java   |  69 +++
 .../scheduler/capacity/TestUtils.java           |   2 +
 .../constraint/TestAllocationTagsManager.java   |  37 ++
 .../TestPlacementConstraintsUtil.java           |  51 +-
 .../TestRMWebServicesConfigurationMutation.java |  40 ++
 .../src/site/markdown/DockerContainers.md       | 194 ++++++-
 .../src/site/markdown/ResourceManagerHA.md      |   2 +-
 .../src/site/markdown/ResourceManagerRest.md    |  40 ++
 .../src/site/markdown/yarn-service/Examples.md  |   5 +-
 .../site/markdown/yarn-service/QuickStart.md    |   1 +
 134 files changed, 5438 insertions(+), 2466 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a35813/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a35813/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 928d901,4faaa98..7b749f4
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@@ -136,16 -124,7 +136,14 @@@ public class QuorumJournalManager imple
      this.nsInfo = nsInfo;
      this.nameServiceId = nameServiceId;
      this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory));
-     this.connectionFactory = URLConnectionFactory
-         .newDefaultURLConnectionFactory(conf);
  
 +    this.maxTxnsPerRpc =
 +        conf.getInt(QJM_RPC_MAX_TXNS_KEY, QJM_RPC_MAX_TXNS_DEFAULT);
 +    Preconditions.checkArgument(maxTxnsPerRpc > 0,
 +        "Must specify %s greater than 0!", QJM_RPC_MAX_TXNS_KEY);
 +    this.inProgressTailingEnabled = conf.getBoolean(
 +        DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY,
 +        DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT);
      // Configure timeouts.
      this.startSegmentTimeoutMs = conf.getInt(
          DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a35813/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index e002323,fd856a3..bfdda90
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@@ -1765,24 -1755,8 +1765,23 @@@ public class NameNode extends Reconfigu
      state.setState(haContext, STANDBY_STATE);
    }
  
 +  synchronized void transitionToObserver()
 +      throws ServiceFailedException, AccessControlException {
 +    namesystem.checkSuperuserPrivilege();
 +    if (!haEnabled) {
 +      throw new ServiceFailedException("HA for namenode is not enabled");
 +    }
 +    // Transition from ACTIVE to OBSERVER is forbidden.
 +    if (state == ACTIVE_STATE) {
 +      throw new ServiceFailedException(
 +          "Cannot transition from '" + ACTIVE_STATE + "' to '" +
 +              OBSERVER_STATE + "'");
 +    }
 +    state.setState(haContext, OBSERVER_STATE);
 +  }
 +
    synchronized HAServiceStatus getServiceStatus()
        throws ServiceFailedException, AccessControlException {
-     namesystem.checkSuperuserPrivilege();
      if (!haEnabled) {
        throw new ServiceFailedException("HA for namenode is not enabled");
      }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a35813/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/25] hadoop git commit: Remove super user privilege requirement for NameNode.getServiceStatus. Contributed by Chao Sun.

Posted by su...@apache.org.
Remove super user privilege requirement for NameNode.getServiceStatus. Contributed by Chao Sun.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2a8c486
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2a8c486
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2a8c486

Branch: refs/heads/HDFS-12943
Commit: a2a8c486998b81d2c73804a07cc74f5269bfd904
Parents: e7951c6
Author: Chao Sun <su...@apache.org>
Authored: Fri Aug 10 15:59:39 2018 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Aug 10 15:59:39 2018 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java  | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a8c486/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 8ad5767..fd856a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -1757,7 +1757,6 @@ public class NameNode extends ReconfigurableBase implements
 
   synchronized HAServiceStatus getServiceStatus()
       throws ServiceFailedException, AccessControlException {
-    namesystem.checkSuperuserPrivilege();
     if (!haEnabled) {
       throw new ServiceFailedException("HA for namenode is not enabled");
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/25] hadoop git commit: HDDS-339. Add block length and blockId in PutKeyResponse. Contributed by Shashikant Banerjee.

Posted by su...@apache.org.
HDDS-339. Add block length and blockId in PutKeyResponse. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/398d8955
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/398d8955
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/398d8955

Branch: refs/heads/HDFS-12943
Commit: 398d89554398a38ffa1347524286cd437f94f3ae
Parents: 15241c6
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Fri Aug 10 23:45:56 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Fri Aug 10 23:45:56 2018 +0530

----------------------------------------------------------------------
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../container/keyvalue/KeyValueHandler.java     |  18 +-
 .../container/keyvalue/helpers/KeyUtils.java    |  50 +++-
 .../container/keyvalue/impl/KeyManagerImpl.java |   4 +-
 .../keyvalue/interfaces/KeyManager.java         |   3 +-
 .../ozone/scm/TestCommittedBlockLengthAPI.java  | 216 ----------------
 .../TestGetCommittedBlockLengthAndPutKey.java   | 254 +++++++++++++++++++
 7 files changed, 313 insertions(+), 233 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/398d8955/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index af06346..930f314 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -308,6 +308,7 @@ message  PutKeyRequestProto {
 }
 
 message  PutKeyResponseProto {
+  required GetCommittedBlockLengthResponseProto committedBlockLength = 1;
 }
 
 message  GetKeyRequestProto  {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/398d8955/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index f4699dd..8364a77 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -421,6 +421,7 @@ public class KeyValueHandler extends Handler {
   ContainerCommandResponseProto handlePutKey(
       ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
 
+    long blockLength;
     if (!request.hasPutKey()) {
       LOG.debug("Malformed Put Key request. trace ID: {}",
           request.getTraceID());
@@ -433,7 +434,7 @@ public class KeyValueHandler extends Handler {
       KeyData keyData = KeyData.getFromProtoBuf(
           request.getPutKey().getKeyData());
       long numBytes = keyData.getProtoBufMessage().toByteArray().length;
-      commitKey(keyData, kvContainer);
+      blockLength = commitKey(keyData, kvContainer);
       metrics.incContainerBytesStats(Type.PutKey, numBytes);
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -443,7 +444,7 @@ public class KeyValueHandler extends Handler {
           request);
     }
 
-    return KeyUtils.getKeyResponseSuccess(request);
+    return KeyUtils.putKeyResponseSuccess(request, blockLength);
   }
 
   private void commitPendingKeys(KeyValueContainer kvContainer)
@@ -456,12 +457,13 @@ public class KeyValueHandler extends Handler {
     }
   }
 
-  private void commitKey(KeyData keyData, KeyValueContainer kvContainer)
+  private long commitKey(KeyData keyData, KeyValueContainer kvContainer)
       throws IOException {
     Preconditions.checkNotNull(keyData);
-    keyManager.putKey(kvContainer, keyData);
+    long length = keyManager.putKey(kvContainer, keyData);
     //update the open key Map in containerManager
     this.openContainerBlockMap.removeFromKeyMap(keyData.getBlockID());
+    return length;
   }
   /**
    * Handle Get Key operation. Calls KeyManager to process the request.
@@ -662,8 +664,12 @@ public class KeyValueHandler extends Handler {
           request.getWriteChunk().getStage() == Stage.COMBINED) {
         metrics.incContainerBytesStats(Type.WriteChunk, request.getWriteChunk()
             .getChunkData().getLen());
-        // the openContainerBlockMap should be updated only while writing data
-        // not during COMMIT_STAGE of handling write chunk request.
+      }
+
+      if (request.getWriteChunk().getStage() == Stage.COMMIT_DATA
+          || request.getWriteChunk().getStage() == Stage.COMBINED) {
+        // the openContainerBlockMap should be updated only during
+        // COMMIT_STAGE of handling write chunk request.
         openContainerBlockMap.addChunk(blockID, chunkInfoProto);
       }
     } catch (StorageContainerException ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/398d8955/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
index 2be966d..a83d298 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
@@ -27,6 +27,10 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetKeyResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+    GetCommittedBlockLengthResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+    PutKeyResponseProto;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
@@ -123,6 +127,26 @@ public final class KeyUtils {
   }
 
   /**
+   * Returns putKey response success.
+   * @param msg - Request.
+   * @return Response.
+   */
+  public static ContainerCommandResponseProto putKeyResponseSuccess(
+      ContainerCommandRequestProto msg, long blockLength) {
+    GetCommittedBlockLengthResponseProto.Builder
+        committedBlockLengthResponseBuilder =
+        getCommittedBlockLengthResponseBuilder(blockLength,
+            msg.getPutKey().getKeyData().getBlockID());
+    PutKeyResponseProto.Builder putKeyResponse =
+        PutKeyResponseProto.newBuilder();
+    putKeyResponse
+        .setCommittedBlockLength(committedBlockLengthResponseBuilder);
+    ContainerProtos.ContainerCommandResponseProto.Builder builder =
+        ContainerUtils.getSuccessResponseBuilder(msg);
+    builder.setPutKey(putKeyResponse);
+    return builder.build();
+  }
+  /**
    * Returns successful keyResponse.
    * @param msg - Request.
    * @return Response.
@@ -150,18 +174,26 @@ public final class KeyUtils {
    * @param msg - Request.
    * @return Response.
    */
-  public static ContainerProtos.ContainerCommandResponseProto
-  getBlockLengthResponse(ContainerProtos.
-      ContainerCommandRequestProto msg, long blockLength) {
+  public static ContainerCommandResponseProto getBlockLengthResponse(
+          ContainerCommandRequestProto msg, long blockLength) {
+    GetCommittedBlockLengthResponseProto.Builder
+        committedBlockLengthResponseBuilder =
+        getCommittedBlockLengthResponseBuilder(blockLength,
+            msg.getGetCommittedBlockLength().getBlockID());
+    ContainerProtos.ContainerCommandResponseProto.Builder builder =
+        ContainerUtils.getSuccessResponseBuilder(msg);
+    builder.setGetCommittedBlockLength(committedBlockLengthResponseBuilder);
+    return builder.build();
+  }
+
+  private static GetCommittedBlockLengthResponseProto.Builder
+  getCommittedBlockLengthResponseBuilder(
+      long blockLength, ContainerProtos.DatanodeBlockID blockID) {
     ContainerProtos.GetCommittedBlockLengthResponseProto.Builder
         getCommittedBlockLengthResponseBuilder = ContainerProtos.
         GetCommittedBlockLengthResponseProto.newBuilder();
     getCommittedBlockLengthResponseBuilder.setBlockLength(blockLength);
-    getCommittedBlockLengthResponseBuilder
-        .setBlockID(msg.getGetCommittedBlockLength().getBlockID());
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setGetCommittedBlockLength(getCommittedBlockLengthResponseBuilder);
-    return  builder.build();
+    getCommittedBlockLengthResponseBuilder.setBlockID(blockID);
+    return getCommittedBlockLengthResponseBuilder;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/398d8955/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
index 58bf1f8..6370f8e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
@@ -67,9 +67,10 @@ public class KeyManagerImpl implements KeyManager {
    *
    * @param container - Container for which key need to be added.
    * @param data     - Key Data.
+   * @return length of the key.
    * @throws IOException
    */
-  public void putKey(Container container, KeyData data) throws IOException {
+  public long putKey(Container container, KeyData data) throws IOException {
     Preconditions.checkNotNull(data, "KeyData cannot be null for put " +
         "operation.");
     Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
@@ -87,6 +88,7 @@ public class KeyManagerImpl implements KeyManager {
 
     // Increment keycount here
     container.getContainerData().incrKeyCount();
+    return data.getSize();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/398d8955/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
index dad688e..37871be 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
@@ -35,9 +35,10 @@ public interface KeyManager {
    *
    * @param container - Container for which key need to be added.
    * @param data     - Key Data.
+   * @return length of the Key.
    * @throws IOException
    */
-  void putKey(Container container, KeyData data) throws IOException;
+  long putKey(Container container, KeyData data) throws IOException;
 
   /**
    * Gets an existing key.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/398d8955/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java
deleted file mode 100644
index 3c6479f..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-    ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-    StorageContainerException;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.
-    ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.
-    SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.protocolPB.
-    StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-
-import java.util.UUID;
-
-/**
- * Test Container calls.
- */
-public class TestCommittedBlockLengthAPI {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration ozoneConfig;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-
-  @BeforeClass
-  public static void init() throws Exception {
-    ozoneConfig = new OzoneConfiguration();
-    ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
-    cluster =
-        MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).build();
-    cluster.waitForClusterToBeReady();
-    storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    xceiverClientManager = new XceiverClientManager(ozoneConfig);
-  }
-
-  @AfterClass
-  public static void shutdown() throws InterruptedException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
-  }
-
-  @Test
-  public void tesGetCommittedBlockLength() throws Exception {
-    ContainerProtos.GetCommittedBlockLengthResponseProto response;
-    String traceID = UUID.randomUUID().toString();
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    long containerID = container.getContainerInfo().getContainerID();
-    Pipeline pipeline = container.getPipeline();
-    XceiverClientSpi client =
-        xceiverClientManager.acquireClient(pipeline, containerID);
-    //create the container
-    ContainerProtocolCalls.createContainer(client, containerID, traceID);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-    byte[] data =
-        RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
-    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-        ContainerTestHelper
-            .getWriteChunkRequest(container.getPipeline(), blockID,
-                data.length);
-    client.sendCommand(writeChunkRequest);
-    // Now, explicitly make a putKey request for the block.
-    ContainerProtos.ContainerCommandRequestProto putKeyRequest =
-        ContainerTestHelper
-            .getPutKeyRequest(pipeline, writeChunkRequest.getWriteChunk());
-    client.sendCommand(putKeyRequest);
-    response = ContainerProtocolCalls
-        .getCommittedBlockLength(client, blockID, traceID);
-    // make sure the block ids in the request and response are same.
-    Assert.assertTrue(
-        BlockID.getFromProtobuf(response.getBlockID()).equals(blockID));
-    Assert.assertTrue(response.getBlockLength() == data.length);
-    xceiverClientManager.releaseClient(client);
-  }
-
-  @Test
-  public void tesGetCommittedBlockLengthWithClosedContainer()
-      throws Exception {
-    String traceID = UUID.randomUUID().toString();
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    long containerID = container.getContainerInfo().getContainerID();
-    Pipeline pipeline = container.getPipeline();
-    XceiverClientSpi client =
-        xceiverClientManager.acquireClient(pipeline, containerID);
-    // create the container
-    ContainerProtocolCalls.createContainer(client, containerID, traceID);
-
-    byte[] data =
-        RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-        ContainerTestHelper
-            .getWriteChunkRequest(container.getPipeline(), blockID,
-                data.length);
-    client.sendCommand(writeChunkRequest);
-    // close the container
-    ContainerProtocolCalls.closeContainer(client, containerID, traceID);
-    ContainerProtos.GetCommittedBlockLengthResponseProto response =
-        ContainerProtocolCalls
-            .getCommittedBlockLength(client, blockID, traceID);
-    // make sure the block ids in the request and response are same.
-    // This will also ensure that closing the container committed the block
-    // on the Datanodes.
-    Assert.assertTrue(
-        BlockID.getFromProtobuf(response.getBlockID()).equals(blockID));
-    Assert.assertTrue(response.getBlockLength() == data.length);
-    xceiverClientManager.releaseClient(client);
-  }
-
-  @Test
-  public void tesGetCommittedBlockLengthForInvalidBlock() throws Exception {
-    String traceID = UUID.randomUUID().toString();
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    long containerID = container.getContainerInfo().getContainerID();
-    XceiverClientSpi client = xceiverClientManager
-        .acquireClient(container.getPipeline(), containerID);
-    ContainerProtocolCalls.createContainer(client, containerID, traceID);
-
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-    // move the container to closed state
-    ContainerProtocolCalls.closeContainer(client, containerID, traceID);
-    try {
-      // There is no block written inside the container. The request should
-      // fail.
-      ContainerProtocolCalls.getCommittedBlockLength(client, blockID, traceID);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert.assertTrue(sce.getMessage().contains("Unable to find the key"));
-    }
-    xceiverClientManager.releaseClient(client);
-  }
-
-  @Test
-  public void testGetCommittedBlockLengthForOpenBlock() throws Exception {
-    String traceID = UUID.randomUUID().toString();
-    ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
-    long containerID = container.getContainerInfo().getContainerID();
-    XceiverClientSpi client = xceiverClientManager
-        .acquireClient(container.getPipeline(), containerID);
-    ContainerProtocolCalls
-        .createContainer(client, containerID, traceID);
-
-    BlockID blockID =
-        ContainerTestHelper.getTestBlockID(containerID);
-    ContainerProtos.ContainerCommandRequestProto requestProto =
-        ContainerTestHelper
-            .getWriteChunkRequest(container.getPipeline(), blockID, 1024);
-    client.sendCommand(requestProto);
-    try {
-      ContainerProtocolCalls.getCommittedBlockLength(client, blockID, traceID);
-      Assert.fail("Expected Exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert.assertEquals(ContainerProtos.Result.BLOCK_NOT_COMMITTED,
-          sce.getResult());
-    }
-    // now close the container, it should auto commit pending open blocks
-    ContainerProtocolCalls
-        .closeContainer(client, containerID, traceID);
-    ContainerProtos.GetCommittedBlockLengthResponseProto response =
-        ContainerProtocolCalls
-            .getCommittedBlockLength(client, blockID, traceID);
-    Assert.assertTrue(response.getBlockLength() == 1024);
-    xceiverClientManager.releaseClient(client);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/398d8955/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
new file mode 100644
index 0000000..f82b0d3
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.scm;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.common.helpers.
+    ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.
+    StorageContainerException;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.
+    ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.
+    SCMContainerPlacementCapacity;
+import org.apache.hadoop.hdds.scm.protocolPB.
+    StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+import java.util.UUID;
+
+/**
+ * Test Container calls.
+ */
+public class TestGetCommittedBlockLengthAndPutKey {
+
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration ozoneConfig;
+  private static StorageContainerLocationProtocolClientSideTranslatorPB
+      storageContainerLocationClient;
+  private static XceiverClientManager xceiverClientManager;
+  private static String containerOwner = "OZONE";
+
+  @BeforeClass
+  public static void init() throws Exception {
+    ozoneConfig = new OzoneConfiguration();
+    ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
+        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
+    cluster =
+        MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).build();
+    cluster.waitForClusterToBeReady();
+    storageContainerLocationClient =
+        cluster.getStorageContainerLocationClient();
+    xceiverClientManager = new XceiverClientManager(ozoneConfig);
+  }
+
+  @AfterClass
+  public static void shutdown() throws InterruptedException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
+  }
+
+  @Test
+  public void tesGetCommittedBlockLength() throws Exception {
+    ContainerProtos.GetCommittedBlockLengthResponseProto response;
+    String traceID = UUID.randomUUID().toString();
+    ContainerWithPipeline container = storageContainerLocationClient
+        .allocateContainer(xceiverClientManager.getType(),
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    long containerID = container.getContainerInfo().getContainerID();
+    Pipeline pipeline = container.getPipeline();
+    XceiverClientSpi client =
+        xceiverClientManager.acquireClient(pipeline, containerID);
+    //create the container
+    ContainerProtocolCalls.createContainer(client, containerID, traceID);
+
+    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
+    byte[] data =
+        RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
+    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+        ContainerTestHelper
+            .getWriteChunkRequest(container.getPipeline(), blockID,
+                data.length);
+    client.sendCommand(writeChunkRequest);
+    // Now, explicitly make a putKey request for the block.
+    ContainerProtos.ContainerCommandRequestProto putKeyRequest =
+        ContainerTestHelper
+            .getPutKeyRequest(pipeline, writeChunkRequest.getWriteChunk());
+    client.sendCommand(putKeyRequest);
+    response = ContainerProtocolCalls
+        .getCommittedBlockLength(client, blockID, traceID);
+    // make sure the block ids in the request and response are same.
+    Assert.assertTrue(
+        BlockID.getFromProtobuf(response.getBlockID()).equals(blockID));
+    Assert.assertTrue(response.getBlockLength() == data.length);
+    xceiverClientManager.releaseClient(client);
+  }
+
+  @Test
+  public void tesGetCommittedBlockLengthWithClosedContainer()
+      throws Exception {
+    String traceID = UUID.randomUUID().toString();
+    ContainerWithPipeline container = storageContainerLocationClient
+        .allocateContainer(xceiverClientManager.getType(),
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    long containerID = container.getContainerInfo().getContainerID();
+    Pipeline pipeline = container.getPipeline();
+    XceiverClientSpi client =
+        xceiverClientManager.acquireClient(pipeline, containerID);
+    // create the container
+    ContainerProtocolCalls.createContainer(client, containerID, traceID);
+
+    byte[] data =
+        RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
+    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
+    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+        ContainerTestHelper
+            .getWriteChunkRequest(container.getPipeline(), blockID,
+                data.length);
+    client.sendCommand(writeChunkRequest);
+    // close the container
+    ContainerProtocolCalls.closeContainer(client, containerID, traceID);
+    ContainerProtos.GetCommittedBlockLengthResponseProto response =
+        ContainerProtocolCalls
+            .getCommittedBlockLength(client, blockID, traceID);
+    // make sure the block ids in the request and response are same.
+    // This will also ensure that closing the container committed the block
+    // on the Datanodes.
+    Assert.assertTrue(
+        BlockID.getFromProtobuf(response.getBlockID()).equals(blockID));
+    Assert.assertTrue(response.getBlockLength() == data.length);
+    xceiverClientManager.releaseClient(client);
+  }
+
+  @Test
+  public void tesGetCommittedBlockLengthForInvalidBlock() throws Exception {
+    String traceID = UUID.randomUUID().toString();
+    ContainerWithPipeline container = storageContainerLocationClient
+        .allocateContainer(xceiverClientManager.getType(),
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    long containerID = container.getContainerInfo().getContainerID();
+    XceiverClientSpi client = xceiverClientManager
+        .acquireClient(container.getPipeline(), containerID);
+    ContainerProtocolCalls.createContainer(client, containerID, traceID);
+
+    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
+    // move the container to closed state
+    ContainerProtocolCalls.closeContainer(client, containerID, traceID);
+    try {
+      // There is no block written inside the container. The request should
+      // fail.
+      ContainerProtocolCalls.getCommittedBlockLength(client, blockID, traceID);
+      Assert.fail("Expected exception not thrown");
+    } catch (StorageContainerException sce) {
+      Assert.assertTrue(sce.getMessage().contains("Unable to find the key"));
+    }
+    xceiverClientManager.releaseClient(client);
+  }
+
+  @Test
+  public void testGetCommittedBlockLengthForOpenBlock() throws Exception {
+    String traceID = UUID.randomUUID().toString();
+    ContainerWithPipeline container = storageContainerLocationClient
+        .allocateContainer(xceiverClientManager.getType(),
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    long containerID = container.getContainerInfo().getContainerID();
+    XceiverClientSpi client = xceiverClientManager
+        .acquireClient(container.getPipeline(), containerID);
+    ContainerProtocolCalls
+        .createContainer(client, containerID, traceID);
+
+    BlockID blockID =
+        ContainerTestHelper.getTestBlockID(containerID);
+    ContainerProtos.ContainerCommandRequestProto requestProto =
+        ContainerTestHelper
+            .getWriteChunkRequest(container.getPipeline(), blockID, 1024);
+    client.sendCommand(requestProto);
+    try {
+      ContainerProtocolCalls.getCommittedBlockLength(client, blockID, traceID);
+      Assert.fail("Expected Exception not thrown");
+    } catch (StorageContainerException sce) {
+      Assert.assertEquals(ContainerProtos.Result.BLOCK_NOT_COMMITTED,
+          sce.getResult());
+    }
+    // now close the container, it should auto commit pending open blocks
+    ContainerProtocolCalls
+        .closeContainer(client, containerID, traceID);
+    ContainerProtos.GetCommittedBlockLengthResponseProto response =
+        ContainerProtocolCalls
+            .getCommittedBlockLength(client, blockID, traceID);
+    Assert.assertTrue(response.getBlockLength() == 1024);
+    xceiverClientManager.releaseClient(client);
+  }
+
+  @Test
+  public void tesPutKeyResposne() throws Exception {
+    ContainerProtos.PutKeyResponseProto response;
+    String traceID = UUID.randomUUID().toString();
+    ContainerWithPipeline container = storageContainerLocationClient
+        .allocateContainer(xceiverClientManager.getType(),
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    long containerID = container.getContainerInfo().getContainerID();
+    Pipeline pipeline = container.getPipeline();
+    XceiverClientSpi client =
+        xceiverClientManager.acquireClient(pipeline, containerID);
+    //create the container
+    ContainerProtocolCalls.createContainer(client, containerID, traceID);
+
+    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
+    byte[] data =
+        RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
+    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+        ContainerTestHelper
+            .getWriteChunkRequest(container.getPipeline(), blockID,
+                data.length);
+    client.sendCommand(writeChunkRequest);
+    // Now, explicitly make a putKey request for the block.
+    ContainerProtos.ContainerCommandRequestProto putKeyRequest =
+        ContainerTestHelper
+            .getPutKeyRequest(pipeline, writeChunkRequest.getWriteChunk());
+    response = client.sendCommand(putKeyRequest).getPutKey();
+    // make sure the block ids in the request and response are same.
+    // This will also ensure that closing the container committed the block
+    // on the Datanodes.
+    Assert.assertEquals(BlockID
+        .getFromProtobuf(response.getCommittedBlockLength().getBlockID()),
+        blockID);
+    Assert.assertEquals(
+        response.getCommittedBlockLength().getBlockLength(), data.length);
+    xceiverClientManager.releaseClient(client);
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org