You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@storm.apache.org by bo...@apache.org on 2016/03/22 16:32:52 UTC

[01/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Repository: storm
Updated Branches:
  refs/heads/0.10.x-branch 3a61f87e9 -> 7e9598325


http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/js/owl.carousel.min.js
----------------------------------------------------------------------
diff --git a/docs/assets/js/owl.carousel.min.js b/docs/assets/js/owl.carousel.min.js
new file mode 100644
index 0000000..394505e
--- /dev/null
+++ b/docs/assets/js/owl.carousel.min.js
@@ -0,0 +1,47 @@
+"function"!==typeof Object.create&&(Object.create=function(f){function g(){}g.prototype=f;return new g});
+(function(f,g,k){var l={init:function(a,b){this.$elem=f(b);this.options=f.extend({},f.fn.owlCarousel.options,this.$elem.data(),a);this.userOptions=a;this.loadContent()},loadContent:function(){function a(a){var d,e="";if("function"===typeof b.options.jsonSuccess)b.options.jsonSuccess.apply(this,[a]);else{for(d in a.owl)a.owl.hasOwnProperty(d)&&(e+=a.owl[d].item);b.$elem.html(e)}b.logIn()}var b=this,e;"function"===typeof b.options.beforeInit&&b.options.beforeInit.apply(this,[b.$elem]);"string"===typeof b.options.jsonPath?
+(e=b.options.jsonPath,f.getJSON(e,a)):b.logIn()},logIn:function(){this.$elem.data("owl-originalStyles",this.$elem.attr("style"));this.$elem.data("owl-originalClasses",this.$elem.attr("class"));this.$elem.css({opacity:0});this.orignalItems=this.options.items;this.checkBrowser();this.wrapperWidth=0;this.checkVisible=null;this.setVars()},setVars:function(){if(0===this.$elem.children().length)return!1;this.baseClass();this.eventTypes();this.$userItems=this.$elem.children();this.itemsAmount=this.$userItems.length;
+this.wrapItems();this.$owlItems=this.$elem.find(".owl-item");this.$owlWrapper=this.$elem.find(".owl-wrapper");this.playDirection="next";this.prevItem=0;this.prevArr=[0];this.currentItem=0;this.customEvents();this.onStartup()},onStartup:function(){this.updateItems();this.calculateAll();this.buildControls();this.updateControls();this.response();this.moveEvents();this.stopOnHover();this.owlStatus();!1!==this.options.transitionStyle&&this.transitionTypes(this.options.transitionStyle);!0===this.options.autoPlay&&
+(this.options.autoPlay=5E3);this.play();this.$elem.find(".owl-wrapper").css("display","block");this.$elem.is(":visible")?this.$elem.css("opacity",1):this.watchVisibility();this.onstartup=!1;this.eachMoveUpdate();"function"===typeof this.options.afterInit&&this.options.afterInit.apply(this,[this.$elem])},eachMoveUpdate:function(){!0===this.options.lazyLoad&&this.lazyLoad();!0===this.options.autoHeight&&this.autoHeight();this.onVisibleItems();"function"===typeof this.options.afterAction&&this.options.afterAction.apply(this,
+[this.$elem])},updateVars:function(){"function"===typeof this.options.beforeUpdate&&this.options.beforeUpdate.apply(this,[this.$elem]);this.watchVisibility();this.updateItems();this.calculateAll();this.updatePosition();this.updateControls();this.eachMoveUpdate();"function"===typeof this.options.afterUpdate&&this.options.afterUpdate.apply(this,[this.$elem])},reload:function(){var a=this;g.setTimeout(function(){a.updateVars()},0)},watchVisibility:function(){var a=this;if(!1===a.$elem.is(":visible"))a.$elem.css({opacity:0}),
+g.clearInterval(a.autoPlayInterval),g.clearInterval(a.checkVisible);else return!1;a.checkVisible=g.setInterval(function(){a.$elem.is(":visible")&&(a.reload(),a.$elem.animate({opacity:1},200),g.clearInterval(a.checkVisible))},500)},wrapItems:function(){this.$userItems.wrapAll('<div class="owl-wrapper">').wrap('<div class="owl-item"></div>');this.$elem.find(".owl-wrapper").wrap('<div class="owl-wrapper-outer">');this.wrapperOuter=this.$elem.find(".owl-wrapper-outer");this.$elem.css("display","block")},
+baseClass:function(){var a=this.$elem.hasClass(this.options.baseClass),b=this.$elem.hasClass(this.options.theme);a||this.$elem.addClass(this.options.baseClass);b||this.$elem.addClass(this.options.theme)},updateItems:function(){var a,b;if(!1===this.options.responsive)return!1;if(!0===this.options.singleItem)return this.options.items=this.orignalItems=1,this.options.itemsCustom=!1,this.options.itemsDesktop=!1,this.options.itemsDesktopSmall=!1,this.options.itemsTablet=!1,this.options.itemsTabletSmall=
+!1,this.options.itemsMobile=!1;a=f(this.options.responsiveBaseWidth).width();a>(this.options.itemsDesktop[0]||this.orignalItems)&&(this.options.items=this.orignalItems);if(!1!==this.options.itemsCustom)for(this.options.itemsCustom.sort(function(a,b){return a[0]-b[0]}),b=0;b<this.options.itemsCustom.length;b+=1)this.options.itemsCustom[b][0]<=a&&(this.options.items=this.options.itemsCustom[b][1]);else a<=this.options.itemsDesktop[0]&&!1!==this.options.itemsDesktop&&(this.options.items=this.options.itemsDesktop[1]),
+a<=this.options.itemsDesktopSmall[0]&&!1!==this.options.itemsDesktopSmall&&(this.options.items=this.options.itemsDesktopSmall[1]),a<=this.options.itemsTablet[0]&&!1!==this.options.itemsTablet&&(this.options.items=this.options.itemsTablet[1]),a<=this.options.itemsTabletSmall[0]&&!1!==this.options.itemsTabletSmall&&(this.options.items=this.options.itemsTabletSmall[1]),a<=this.options.itemsMobile[0]&&!1!==this.options.itemsMobile&&(this.options.items=this.options.itemsMobile[1]);this.options.items>this.itemsAmount&&
+!0===this.options.itemsScaleUp&&(this.options.items=this.itemsAmount)},response:function(){var a=this,b,e;if(!0!==a.options.responsive)return!1;e=f(g).width();a.resizer=function(){f(g).width()!==e&&(!1!==a.options.autoPlay&&g.clearInterval(a.autoPlayInterval),g.clearTimeout(b),b=g.setTimeout(function(){e=f(g).width();a.updateVars()},a.options.responsiveRefreshRate))};f(g).resize(a.resizer)},updatePosition:function(){this.jumpTo(this.currentItem);!1!==this.options.autoPlay&&this.checkAp()},appendItemsSizes:function(){var a=
+this,b=0,e=a.itemsAmount-a.options.items;a.$owlItems.each(function(c){var d=f(this);d.css({width:a.itemWidth}).data("owl-item",Number(c));if(0===c%a.options.items||c===e)c>e||(b+=1);d.data("owl-roundPages",b)})},appendWrapperSizes:function(){this.$owlWrapper.css({width:this.$owlItems.length*this.itemWidth*2,left:0});this.appendItemsSizes()},calculateAll:function(){this.calculateWidth();this.appendWrapperSizes();this.loops();this.max()},calculateWidth:function(){this.itemWidth=Math.round(this.$elem.width()/
+this.options.items)},max:function(){var a=-1*(this.itemsAmount*this.itemWidth-this.options.items*this.itemWidth);this.options.items>this.itemsAmount?this.maximumPixels=a=this.maximumItem=0:(this.maximumItem=this.itemsAmount-this.options.items,this.maximumPixels=a);return a},min:function(){return 0},loops:function(){var a=0,b=0,e,c;this.positionsInArray=[0];this.pagesInArray=[];for(e=0;e<this.itemsAmount;e+=1)b+=this.itemWidth,this.positionsInArray.push(-b),!0===this.options.scrollPerPage&&(c=f(this.$owlItems[e]),
+c=c.data("owl-roundPages"),c!==a&&(this.pagesInArray[a]=this.positionsInArray[e],a=c))},buildControls:function(){if(!0===this.options.navigation||!0===this.options.pagination)this.owlControls=f('<div class="owl-controls"/>').toggleClass("clickable",!this.browser.isTouch).appendTo(this.$elem);!0===this.options.pagination&&this.buildPagination();!0===this.options.navigation&&this.buildButtons()},buildButtons:function(){var a=this,b=f('<div class="owl-buttons"/>');a.owlControls.append(b);a.buttonPrev=
+f("<div/>",{"class":"owl-prev",html:a.options.navigationText[0]||""});a.buttonNext=f("<div/>",{"class":"owl-next",html:a.options.navigationText[1]||""});b.append(a.buttonPrev).append(a.buttonNext);b.on("touchstart.owlControls mousedown.owlControls",'div[class^="owl"]',function(a){a.preventDefault()});b.on("touchend.owlControls mouseup.owlControls",'div[class^="owl"]',function(b){b.preventDefault();f(this).hasClass("owl-next")?a.next():a.prev()})},buildPagination:function(){var a=this;a.paginationWrapper=
+f('<div class="owl-pagination"/>');a.owlControls.append(a.paginationWrapper);a.paginationWrapper.on("touchend.owlControls mouseup.owlControls",".owl-page",function(b){b.preventDefault();Number(f(this).data("owl-page"))!==a.currentItem&&a.goTo(Number(f(this).data("owl-page")),!0)})},updatePagination:function(){var a,b,e,c,d,g;if(!1===this.options.pagination)return!1;this.paginationWrapper.html("");a=0;b=this.itemsAmount-this.itemsAmount%this.options.items;for(c=0;c<this.itemsAmount;c+=1)0===c%this.options.items&&
+(a+=1,b===c&&(e=this.itemsAmount-this.options.items),d=f("<div/>",{"class":"owl-page"}),g=f("<span></span>",{text:!0===this.options.paginationNumbers?a:"","class":!0===this.options.paginationNumbers?"owl-numbers":""}),d.append(g),d.data("owl-page",b===c?e:c),d.data("owl-roundPages",a),this.paginationWrapper.append(d));this.checkPagination()},checkPagination:function(){var a=this;if(!1===a.options.pagination)return!1;a.paginationWrapper.find(".owl-page").each(function(){f(this).data("owl-roundPages")===
+f(a.$owlItems[a.currentItem]).data("owl-roundPages")&&(a.paginationWrapper.find(".owl-page").removeClass("active"),f(this).addClass("active"))})},checkNavigation:function(){if(!1===this.options.navigation)return!1;!1===this.options.rewindNav&&(0===this.currentItem&&0===this.maximumItem?(this.buttonPrev.addClass("disabled"),this.buttonNext.addClass("disabled")):0===this.currentItem&&0!==this.maximumItem?(this.buttonPrev.addClass("disabled"),this.buttonNext.removeClass("disabled")):this.currentItem===
+this.maximumItem?(this.buttonPrev.removeClass("disabled"),this.buttonNext.addClass("disabled")):0!==this.currentItem&&this.currentItem!==this.maximumItem&&(this.buttonPrev.removeClass("disabled"),this.buttonNext.removeClass("disabled")))},updateControls:function(){this.updatePagination();this.checkNavigation();this.owlControls&&(this.options.items>=this.itemsAmount?this.owlControls.hide():this.owlControls.show())},destroyControls:function(){this.owlControls&&this.owlControls.remove()},next:function(a){if(this.isTransition)return!1;
+this.currentItem+=!0===this.options.scrollPerPage?this.options.items:1;if(this.currentItem>this.maximumItem+(!0===this.options.scrollPerPage?this.options.items-1:0))if(!0===this.options.rewindNav)this.currentItem=0,a="rewind";else return this.currentItem=this.maximumItem,!1;this.goTo(this.currentItem,a)},prev:function(a){if(this.isTransition)return!1;this.currentItem=!0===this.options.scrollPerPage&&0<this.currentItem&&this.currentItem<this.options.items?0:this.currentItem-(!0===this.options.scrollPerPage?
+this.options.items:1);if(0>this.currentItem)if(!0===this.options.rewindNav)this.currentItem=this.maximumItem,a="rewind";else return this.currentItem=0,!1;this.goTo(this.currentItem,a)},goTo:function(a,b,e){var c=this;if(c.isTransition)return!1;"function"===typeof c.options.beforeMove&&c.options.beforeMove.apply(this,[c.$elem]);a>=c.maximumItem?a=c.maximumItem:0>=a&&(a=0);c.currentItem=c.owl.currentItem=a;if(!1!==c.options.transitionStyle&&"drag"!==e&&1===c.options.items&&!0===c.browser.support3d)return c.swapSpeed(0),
+!0===c.browser.support3d?c.transition3d(c.positionsInArray[a]):c.css2slide(c.positionsInArray[a],1),c.afterGo(),c.singleItemTransition(),!1;a=c.positionsInArray[a];!0===c.browser.support3d?(c.isCss3Finish=!1,!0===b?(c.swapSpeed("paginationSpeed"),g.setTimeout(function(){c.isCss3Finish=!0},c.options.paginationSpeed)):"rewind"===b?(c.swapSpeed(c.options.rewindSpeed),g.setTimeout(function(){c.isCss3Finish=!0},c.options.rewindSpeed)):(c.swapSpeed("slideSpeed"),g.setTimeout(function(){c.isCss3Finish=!0},
+c.options.slideSpeed)),c.transition3d(a)):!0===b?c.css2slide(a,c.options.paginationSpeed):"rewind"===b?c.css2slide(a,c.options.rewindSpeed):c.css2slide(a,c.options.slideSpeed);c.afterGo()},jumpTo:function(a){"function"===typeof this.options.beforeMove&&this.options.beforeMove.apply(this,[this.$elem]);a>=this.maximumItem||-1===a?a=this.maximumItem:0>=a&&(a=0);this.swapSpeed(0);!0===this.browser.support3d?this.transition3d(this.positionsInArray[a]):this.css2slide(this.positionsInArray[a],1);this.currentItem=
+this.owl.currentItem=a;this.afterGo()},afterGo:function(){this.prevArr.push(this.currentItem);this.prevItem=this.owl.prevItem=this.prevArr[this.prevArr.length-2];this.prevArr.shift(0);this.prevItem!==this.currentItem&&(this.checkPagination(),this.checkNavigation(),this.eachMoveUpdate(),!1!==this.options.autoPlay&&this.checkAp());"function"===typeof this.options.afterMove&&this.prevItem!==this.currentItem&&this.options.afterMove.apply(this,[this.$elem])},stop:function(){this.apStatus="stop";g.clearInterval(this.autoPlayInterval)},
+checkAp:function(){"stop"!==this.apStatus&&this.play()},play:function(){var a=this;a.apStatus="play";if(!1===a.options.autoPlay)return!1;g.clearInterval(a.autoPlayInterval);a.autoPlayInterval=g.setInterval(function(){a.next(!0)},a.options.autoPlay)},swapSpeed:function(a){"slideSpeed"===a?this.$owlWrapper.css(this.addCssSpeed(this.options.slideSpeed)):"paginationSpeed"===a?this.$owlWrapper.css(this.addCssSpeed(this.options.paginationSpeed)):"string"!==typeof a&&this.$owlWrapper.css(this.addCssSpeed(a))},
+addCssSpeed:function(a){return{"-webkit-transition":"all "+a+"ms ease","-moz-transition":"all "+a+"ms ease","-o-transition":"all "+a+"ms ease",transition:"all "+a+"ms ease"}},removeTransition:function(){return{"-webkit-transition":"","-moz-transition":"","-o-transition":"",transition:""}},doTranslate:function(a){return{"-webkit-transform":"translate3d("+a+"px, 0px, 0px)","-moz-transform":"translate3d("+a+"px, 0px, 0px)","-o-transform":"translate3d("+a+"px, 0px, 0px)","-ms-transform":"translate3d("+
+a+"px, 0px, 0px)",transform:"translate3d("+a+"px, 0px,0px)"}},transition3d:function(a){this.$owlWrapper.css(this.doTranslate(a))},css2move:function(a){this.$owlWrapper.css({left:a})},css2slide:function(a,b){var e=this;e.isCssFinish=!1;e.$owlWrapper.stop(!0,!0).animate({left:a},{duration:b||e.options.slideSpeed,complete:function(){e.isCssFinish=!0}})},checkBrowser:function(){var a=k.createElement("div");a.style.cssText="  -moz-transform:translate3d(0px, 0px, 0px); -ms-transform:translate3d(0px, 0px, 0px); -o-transform:translate3d(0px, 0px, 0px); -webkit-transform:translate3d(0px, 0px, 0px); transform:translate3d(0px, 0px, 0px)";
+a=a.style.cssText.match(/translate3d\(0px, 0px, 0px\)/g);this.browser={support3d:null!==a&&1===a.length,isTouch:"ontouchstart"in g||g.navigator.msMaxTouchPoints}},moveEvents:function(){if(!1!==this.options.mouseDrag||!1!==this.options.touchDrag)this.gestures(),this.disabledEvents()},eventTypes:function(){var a=["s","e","x"];this.ev_types={};!0===this.options.mouseDrag&&!0===this.options.touchDrag?a=["touchstart.owl mousedown.owl","touchmove.owl mousemove.owl","touchend.owl touchcancel.owl mouseup.owl"]:
+!1===this.options.mouseDrag&&!0===this.options.touchDrag?a=["touchstart.owl","touchmove.owl","touchend.owl touchcancel.owl"]:!0===this.options.mouseDrag&&!1===this.options.touchDrag&&(a=["mousedown.owl","mousemove.owl","mouseup.owl"]);this.ev_types.start=a[0];this.ev_types.move=a[1];this.ev_types.end=a[2]},disabledEvents:function(){this.$elem.on("dragstart.owl",function(a){a.preventDefault()});this.$elem.on("mousedown.disableTextSelect",function(a){return f(a.target).is("input, textarea, select, option")})},
+gestures:function(){function a(a){if(void 0!==a.touches)return{x:a.touches[0].pageX,y:a.touches[0].pageY};if(void 0===a.touches){if(void 0!==a.pageX)return{x:a.pageX,y:a.pageY};if(void 0===a.pageX)return{x:a.clientX,y:a.clientY}}}function b(a){"on"===a?(f(k).on(d.ev_types.move,e),f(k).on(d.ev_types.end,c)):"off"===a&&(f(k).off(d.ev_types.move),f(k).off(d.ev_types.end))}function e(b){b=b.originalEvent||b||g.event;d.newPosX=a(b).x-h.offsetX;d.newPosY=a(b).y-h.offsetY;d.newRelativeX=d.newPosX-h.relativePos;
+"function"===typeof d.options.startDragging&&!0!==h.dragging&&0!==d.newRelativeX&&(h.dragging=!0,d.options.startDragging.apply(d,[d.$elem]));(8<d.newRelativeX||-8>d.newRelativeX)&&!0===d.browser.isTouch&&(void 0!==b.preventDefault?b.preventDefault():b.returnValue=!1,h.sliding=!0);(10<d.newPosY||-10>d.newPosY)&&!1===h.sliding&&f(k).off("touchmove.owl");d.newPosX=Math.max(Math.min(d.newPosX,d.newRelativeX/5),d.maximumPixels+d.newRelativeX/5);!0===d.browser.support3d?d.transition3d(d.newPosX):d.css2move(d.newPosX)}
+function c(a){a=a.originalEvent||a||g.event;var c;a.target=a.target||a.srcElement;h.dragging=!1;!0!==d.browser.isTouch&&d.$owlWrapper.removeClass("grabbing");d.dragDirection=0>d.newRelativeX?d.owl.dragDirection="left":d.owl.dragDirection="right";0!==d.newRelativeX&&(c=d.getNewPosition(),d.goTo(c,!1,"drag"),h.targetElement===a.target&&!0!==d.browser.isTouch&&(f(a.target).on("click.disable",function(a){a.stopImmediatePropagation();a.stopPropagation();a.preventDefault();f(a.target).off("click.disable")}),
+a=f._data(a.target,"events").click,c=a.pop(),a.splice(0,0,c)));b("off")}var d=this,h={offsetX:0,offsetY:0,baseElWidth:0,relativePos:0,position:null,minSwipe:null,maxSwipe:null,sliding:null,dargging:null,targetElement:null};d.isCssFinish=!0;d.$elem.on(d.ev_types.start,".owl-wrapper",function(c){c=c.originalEvent||c||g.event;var e;if(3===c.which)return!1;if(!(d.itemsAmount<=d.options.items)){if(!1===d.isCssFinish&&!d.options.dragBeforeAnimFinish||!1===d.isCss3Finish&&!d.options.dragBeforeAnimFinish)return!1;
+!1!==d.options.autoPlay&&g.clearInterval(d.autoPlayInterval);!0===d.browser.isTouch||d.$owlWrapper.hasClass("grabbing")||d.$owlWrapper.addClass("grabbing");d.newPosX=0;d.newRelativeX=0;f(this).css(d.removeTransition());e=f(this).position();h.relativePos=e.left;h.offsetX=a(c).x-e.left;h.offsetY=a(c).y-e.top;b("on");h.sliding=!1;h.targetElement=c.target||c.srcElement}})},getNewPosition:function(){var a=this.closestItem();a>this.maximumItem?a=this.currentItem=this.maximumItem:0<=this.newPosX&&(this.currentItem=
+a=0);return a},closestItem:function(){var a=this,b=!0===a.options.scrollPerPage?a.pagesInArray:a.positionsInArray,e=a.newPosX,c=null;f.each(b,function(d,g){e-a.itemWidth/20>b[d+1]&&e-a.itemWidth/20<g&&"left"===a.moveDirection()?(c=g,a.currentItem=!0===a.options.scrollPerPage?f.inArray(c,a.positionsInArray):d):e+a.itemWidth/20<g&&e+a.itemWidth/20>(b[d+1]||b[d]-a.itemWidth)&&"right"===a.moveDirection()&&(!0===a.options.scrollPerPage?(c=b[d+1]||b[b.length-1],a.currentItem=f.inArray(c,a.positionsInArray)):
+(c=b[d+1],a.currentItem=d+1))});return a.currentItem},moveDirection:function(){var a;0>this.newRelativeX?(a="right",this.playDirection="next"):(a="left",this.playDirection="prev");return a},customEvents:function(){var a=this;a.$elem.on("owl.next",function(){a.next()});a.$elem.on("owl.prev",function(){a.prev()});a.$elem.on("owl.play",function(b,e){a.options.autoPlay=e;a.play();a.hoverStatus="play"});a.$elem.on("owl.stop",function(){a.stop();a.hoverStatus="stop"});a.$elem.on("owl.goTo",function(b,e){a.goTo(e)});
+a.$elem.on("owl.jumpTo",function(b,e){a.jumpTo(e)})},stopOnHover:function(){var a=this;!0===a.options.stopOnHover&&!0!==a.browser.isTouch&&!1!==a.options.autoPlay&&(a.$elem.on("mouseover",function(){a.stop()}),a.$elem.on("mouseout",function(){"stop"!==a.hoverStatus&&a.play()}))},lazyLoad:function(){var a,b,e,c,d;if(!1===this.options.lazyLoad)return!1;for(a=0;a<this.itemsAmount;a+=1)b=f(this.$owlItems[a]),"loaded"!==b.data("owl-loaded")&&(e=b.data("owl-item"),c=b.find(".lazyOwl"),"string"!==typeof c.data("src")?
+b.data("owl-loaded","loaded"):(void 0===b.data("owl-loaded")&&(c.hide(),b.addClass("loading").data("owl-loaded","checked")),(d=!0===this.options.lazyFollow?e>=this.currentItem:!0)&&e<this.currentItem+this.options.items&&c.length&&this.lazyPreload(b,c)))},lazyPreload:function(a,b){function e(){a.data("owl-loaded","loaded").removeClass("loading");b.removeAttr("data-src");"fade"===d.options.lazyEffect?b.fadeIn(400):b.show();"function"===typeof d.options.afterLazyLoad&&d.options.afterLazyLoad.apply(this,
+[d.$elem])}function c(){f+=1;d.completeImg(b.get(0))||!0===k?e():100>=f?g.setTimeout(c,100):e()}var d=this,f=0,k;"DIV"===b.prop("tagName")?(b.css("background-image","url("+b.data("src")+")"),k=!0):b[0].src=b.data("src");c()},autoHeight:function(){function a(){var a=f(e.$owlItems[e.currentItem]).height();e.wrapperOuter.css("height",a+"px");e.wrapperOuter.hasClass("autoHeight")||g.setTimeout(function(){e.wrapperOuter.addClass("autoHeight")},0)}function b(){d+=1;e.completeImg(c.get(0))?a():100>=d?g.setTimeout(b,
+100):e.wrapperOuter.css("height","")}var e=this,c=f(e.$owlItems[e.currentItem]).find("img"),d;void 0!==c.get(0)?(d=0,b()):a()},completeImg:function(a){return!a.complete||"undefined"!==typeof a.naturalWidth&&0===a.naturalWidth?!1:!0},onVisibleItems:function(){var a;!0===this.options.addClassActive&&this.$owlItems.removeClass("active");this.visibleItems=[];for(a=this.currentItem;a<this.currentItem+this.options.items;a+=1)this.visibleItems.push(a),!0===this.options.addClassActive&&f(this.$owlItems[a]).addClass("active");
+this.owl.visibleItems=this.visibleItems},transitionTypes:function(a){this.outClass="owl-"+a+"-out";this.inClass="owl-"+a+"-in"},singleItemTransition:function(){var a=this,b=a.outClass,e=a.inClass,c=a.$owlItems.eq(a.currentItem),d=a.$owlItems.eq(a.prevItem),f=Math.abs(a.positionsInArray[a.currentItem])+a.positionsInArray[a.prevItem],g=Math.abs(a.positionsInArray[a.currentItem])+a.itemWidth/2;a.isTransition=!0;a.$owlWrapper.addClass("owl-origin").css({"-webkit-transform-origin":g+"px","-moz-perspective-origin":g+
+"px","perspective-origin":g+"px"});d.css({position:"relative",left:f+"px"}).addClass(b).on("webkitAnimationEnd oAnimationEnd MSAnimationEnd animationend",function(){a.endPrev=!0;d.off("webkitAnimationEnd oAnimationEnd MSAnimationEnd animationend");a.clearTransStyle(d,b)});c.addClass(e).on("webkitAnimationEnd oAnimationEnd MSAnimationEnd animationend",function(){a.endCurrent=!0;c.off("webkitAnimationEnd oAnimationEnd MSAnimationEnd animationend");a.clearTransStyle(c,e)})},clearTransStyle:function(a,
+b){a.css({position:"",left:""}).removeClass(b);this.endPrev&&this.endCurrent&&(this.$owlWrapper.removeClass("owl-origin"),this.isTransition=this.endCurrent=this.endPrev=!1)},owlStatus:function(){this.owl={userOptions:this.userOptions,baseElement:this.$elem,userItems:this.$userItems,owlItems:this.$owlItems,currentItem:this.currentItem,prevItem:this.prevItem,visibleItems:this.visibleItems,isTouch:this.browser.isTouch,browser:this.browser,dragDirection:this.dragDirection}},clearEvents:function(){this.$elem.off(".owl owl mousedown.disableTextSelect");
+f(k).off(".owl owl");f(g).off("resize",this.resizer)},unWrap:function(){0!==this.$elem.children().length&&(this.$owlWrapper.unwrap(),this.$userItems.unwrap().unwrap(),this.owlControls&&this.owlControls.remove());this.clearEvents();this.$elem.attr("style",this.$elem.data("owl-originalStyles")||"").attr("class",this.$elem.data("owl-originalClasses"))},destroy:function(){this.stop();g.clearInterval(this.checkVisible);this.unWrap();this.$elem.removeData()},reinit:function(a){a=f.extend({},this.userOptions,
+a);this.unWrap();this.init(a,this.$elem)},addItem:function(a,b){var e;if(!a)return!1;if(0===this.$elem.children().length)return this.$elem.append(a),this.setVars(),!1;this.unWrap();e=void 0===b||-1===b?-1:b;e>=this.$userItems.length||-1===e?this.$userItems.eq(-1).after(a):this.$userItems.eq(e).before(a);this.setVars()},removeItem:function(a){if(0===this.$elem.children().length)return!1;a=void 0===a||-1===a?-1:a;this.unWrap();this.$userItems.eq(a).remove();this.setVars()}};f.fn.owlCarousel=function(a){return this.each(function(){if(!0===
+f(this).data("owl-init"))return!1;f(this).data("owl-init",!0);var b=Object.create(l);b.init(a,this);f.data(this,"owlCarousel",b)})};f.fn.owlCarousel.options={items:5,itemsCustom:!1,itemsDesktop:[1199,4],itemsDesktopSmall:[979,3],itemsTablet:[768,2],itemsTabletSmall:!1,itemsMobile:[479,1],singleItem:!1,itemsScaleUp:!1,slideSpeed:200,paginationSpeed:800,rewindSpeed:1E3,autoPlay:!1,stopOnHover:!1,navigation:!1,navigationText:["prev","next"],rewindNav:!0,scrollPerPage:!1,pagination:!0,paginationNumbers:!1,
+responsive:!0,responsiveRefreshRate:200,responsiveBaseWidth:g,baseClass:"owl-carousel",theme:"owl-theme",lazyLoad:!1,lazyFollow:!0,lazyEffect:"fade",autoHeight:!1,jsonPath:!1,jsonSuccess:!1,dragBeforeAnimFinish:!0,mouseDrag:!0,touchDrag:!0,addClassActive:!1,transitionStyle:!1,beforeUpdate:!1,afterUpdate:!1,beforeInit:!1,afterInit:!1,beforeMove:!1,afterMove:!1,afterAction:!1,startDragging:!1,afterLazyLoad:!1}})(jQuery,window,document);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/js/storm.js
----------------------------------------------------------------------
diff --git a/docs/assets/js/storm.js b/docs/assets/js/storm.js
new file mode 100644
index 0000000..1aab7e1
--- /dev/null
+++ b/docs/assets/js/storm.js
@@ -0,0 +1,67 @@
+$(document).ready(function() {
+	//Scroll to Top
+	$(".totop").hide();
+	$(window).scroll(function(){
+		if ($(this).scrollTop() > 300) {
+			$('.totop').fadeIn();
+		} else {
+			$('.totop').fadeOut();
+		}
+	});
+	$(".totop a").click(function(e) {
+		e.preventDefault();
+		$("html, body").animate({ scrollTop: 0 }, "slow");
+		return false;
+	});
+	
+    //Fixed Navigation
+    $('.navbar').affix({
+        offset: {
+            top: $('header').height()
+        }
+    });
+
+    //Owl Carousel For CLient List
+	$("#owl-example").owlCarousel({
+		items: 8
+	});	
+
+	$(".navbar li a").each(function() {
+		if(document.URL.indexOf(this.getAttribute('href'))>0)
+		{
+			$(".navbar li a").removeClass('current');
+			$(this).addClass('current');
+		}
+		if(document.URL.indexOf('/documentation/')>0)
+		{
+			$(".navbar li a").removeClass('current');
+			$('#documentation').addClass('current');
+		}
+		if(document.URL.indexOf('/contribute/')>0)
+		{
+			$(".navbar li a").removeClass('current');
+			$('#contribute').addClass('current');
+		}
+
+	});
+	$(".news li a").each(function() {
+		if(document.URL.indexOf(this.getAttribute('href'))>0 && !($("#news-list li a").hasClass('current'))){
+			$("#news-list li a").removeClass('current');
+			$(this).addClass('current');
+		}
+	});
+
+});;
+
+function isMobile() {
+	    if (sessionStorage.desktop)
+	        return false;
+	    else if (localStorage.mobile)
+	        $('#twitter_widget').hide();
+
+	    var mobile = ['iphone','ipad','android','blackberry','nokia','opera mini','windows mobile','windows phone','iemobile']; 
+	    for (var i in mobile) 
+	    	if (navigator.userAgent.toLowerCase().indexOf(mobile[i].toLowerCase()) > 0) 
+	    		$('#twitter_widget').hide();
+	    return false;
+	};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/css/style.css
----------------------------------------------------------------------
diff --git a/docs/css/style.css b/docs/css/style.css
new file mode 100644
index 0000000..0302cb7
--- /dev/null
+++ b/docs/css/style.css
@@ -0,0 +1,553 @@
+/*
+	Theme: Apache Storm
+*/
+
+/* Fonts */
+@import url(http://fonts.googleapis.com/css?family=Lato:300,400,700,300italic,400italic,700italic);
+@import url(http://fonts.googleapis.com/css?family=Oswald:400,300);
+
+/* Generic */
+body {
+	font-family: "Lato", sans-serif;
+	font-weight: 400;
+	font-size: 14px;
+}
+header {
+	background: #fdfbfb url(../images/header-bg.png);
+	border-top: 3px #328fbf solid;
+}
+footer {
+	background: #222 url(../images/footer-bg.png);
+	padding-top: 10px;
+	color: #ddd;
+	border-top: 3px #328fbf solid;
+	font-size: 12px;
+}
+p {
+	line-height: 24px;
+}
+h1, h2, h3, h4, h5, h6{
+	font-family: "Oswald", sans-serif;
+	font-weight: 400;
+}
+
+/* Bootstrap Extended */
+.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {
+	padding-top: 15px;
+	padding-bottom: 15px;
+}
+.navbar{
+	background: #328fbf;
+	border-top:1px solid #235693;
+	border-bottom:1px solid #235693;
+	border-radius:0;
+	margin-bottom:10px; 
+}
+.navbar.affix {
+    position: fixed;
+    top: 0;
+    width: 100%;
+    z-index:10;
+}
+.nav .open>a, .nav .open>a:hover, .nav .open>a:focus {
+   border-color: #235693;
+}
+.navbar button{
+	background:#29a1c4;  
+}
+.navbar button:hover{
+	background:#29a1c4;
+}
+.navbar-toggle .icon-bar {
+  background: #fff;
+}
+.navbar .nav{
+	border-right:1px solid #235693;
+}
+.navbar .nav > li > a{
+	color: #ffffff;
+	background: #328fbf !important; 
+	border-left:1px solid #235693;
+	font-weight: 400;
+	
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;	
+	transition:background 1s ease;
+}
+.navbar .nav > li > a:hover, .navbar .nav > li > a.current {
+	background:#235693 !important;
+}
+.navbar .nav .active > a,
+.navbar .nav .active > a:hover,
+.navbar .nav .active > a:focus {
+	background: #235693;
+}
+.dropdown-toggle{
+	background: #328fbf;
+}
+.dropdown-menu{
+	padding:0px;
+	background:#328fbf;
+	border:1px solid #235693;
+}
+.dropdown-menu a{
+	padding: 8px 10px !important;
+	color:#ffffff !important;
+	background:#328fbf;
+	border-bottom:1px solid #235693;
+	
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;
+	transition:background 1s ease;
+}
+.dropdown-menu a:hover,.dropdown-menu a:focus{
+	background:#235693 !important;
+}
+
+/* Theme Style */
+.row-margin-bottom {
+	margin-bottom: 25px;
+}
+.remove-custom-padding {
+	padding-top: 0px;
+	padding-bottom: 0px;
+}
+.no-margin-top {
+	margin-top: 0px;
+}
+.logo {
+	margin-top: 20px;
+	margin-bottom: 20px;
+}
+.page-title {
+	margin-top: 0;
+	margin-bottom: 20px;
+	padding-bottom: 5px;
+	color: #235693;
+	border-bottom: 2px solid #e99941;
+	font-weight: 300;
+}
+.page-title span {
+	font-family: "Lato", sans-serif;
+	font-weight: 400;
+	font-size: 14px;
+	color: #328fbf;
+	margin-top: 5px;
+}
+.btn-std {
+	background-color: #328fbf;
+	border-bottom: 2px solid #235693;
+	color: #ffffff;
+	padding: 4px 12px;
+	font-size: 12px;
+	text-align: center;
+	border-radius: 5px;
+}
+.btn-std:hover {
+	background-color: #235693;
+	color: #ffffff;
+	text-decoration: none;
+}
+.btn-download {
+	background-color: #32bf61;
+	border-bottom: 2px solid #239329;
+	font-size: 16px;
+	font-weight: 700;
+	margin-top: 35px;
+	margin-bottom: 35px;
+	padding: 12px 36px;	
+}
+.btn-download:hover {
+	background-color: #239329;
+}
+.box-primary {
+	padding: 8px 10px;
+	background: #fbfbfb;
+	border: 1px solid #ccc;
+	border-bottom: 3px solid #ccc;
+	border-radius: 10px;
+}
+.box-primary h4 {
+	border-bottom: 1px solid #ccc;
+	padding-bottom: 10px;
+}
+.box-warning {
+	padding: 8px 10px;
+	background: #FFFAF3;
+	border: 1px solid #EB9A35;
+	border-bottom: 3px solid #EB9A35;
+	border-radius: 10px;
+}
+.box-warning h4 {
+	border-bottom: 1px solid #EB9A35;
+	padding-bottom: 10px;
+}
+.box-info {
+	padding: 8px 10px;
+	background: #F8FFFF;
+	border: 1px solid #328fbf;
+	border-bottom: 3px solid #328fbf;
+	/*border-radius: 10px;*/
+}
+.box-info h4 {
+	border-bottom: 1px solid #328fbf;
+	padding-bottom: 10px;
+}
+
+/* Footer */
+footer hr {
+	margin: 0;
+	border-top: 1px solid #555;
+	border-bottom: 1px solid #111;
+}
+.footer-widget h5 {
+	padding-bottom: 10px;
+	border-bottom: 1px solid #555;
+	margin-bottom: 10px;
+}
+.footer-list {
+	list-style: none;
+	padding-left: 20px;
+}
+.footer-list li {
+	line-height: 32px;
+}
+.footer-list li:before {
+	content: "\f105";
+	font-family: 'FontAwesome';
+	margin-right: 10px;
+}
+.footer-list li a {
+	color: #ddd;
+}
+
+.tweet {margin-bottom: 10px;}
+
+.social a, .social a:visited, .social a:hover{
+	color:#fff;
+	text-decoration:none;
+}
+.social i{
+	display:inline-block;
+	height:30px;
+	width:30px;
+	font-size:15px;
+	text-align:center;
+	line-height:30px;
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;
+	transition:background 1s ease;
+	border-radius:30px;
+	margin-right:5px;
+}
+.social i:hover{
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;
+	transition:background 1s ease;
+}
+.facebook { background: #1e78ae !important; }
+.twitter { background: #1ba1e2 !important; }
+.google-plus { background: #f22d0c !important; }
+.linkedin { background: #2d93cf !important; }
+.pinterest { background:	#dd1617 !important; }
+
+.facebook:hover, .twitter:hover, .google-plus:hover, .linkedin:hover, .pinterest:hover { background: #666 !important; }
+
+/* Scroll to top */
+.totop {
+	position: fixed;
+	bottom: 10px;
+	right: 10px;
+	z-index: 104400;
+}
+.totop a i{
+	display: block;
+	width: 40px;
+	height: 40px;
+	line-height: 40px;
+	text-align: center;
+	font-size: 25px;
+	background: rgba(60,60,60,0.7);
+}
+.totop a:hover i { background: rgba(60,60,60,1); }
+.totop a, .totop a:visited{
+	color: #fff;
+}
+.totop a:hover {
+	color: #eee;
+	text-decoration: none;
+}
+
+/* Page Specific - Home */
+.latest-news {
+	list-style: none;
+	padding-left: 20px;
+}
+.latest-news li {
+	line-height: 28px;
+}
+.latest-news li:before {
+	content: "\f101";
+	font-family: 'FontAwesome';
+	margin-right: 10px;
+}
+.latest-news li span.small {
+	font-size:10px;
+}
+
+/* Page Specific - Download */
+.download-block {
+	margin-bottom: 15px;
+	border-bottom: 1px solid #eee;
+}
+.download-block h5 {
+	background: #328fbf;
+	color: #fff;
+	padding: 10px;
+}
+.download-info {
+	padding;
+}
+.arrow-list {
+	list-style: none;
+	padding-left: 20px;
+}
+.arrow-list li {
+	line-height: 28px;
+}
+.arrow-list li:before {
+	content: "\f105";
+	font-family: 'FontAwesome';
+	margin-right: 10px;
+}
+
+/* Page Specific - News */
+.news {
+	list-style: none;
+	padding-left: 0px;
+}
+.news li a {
+	display: block;
+	padding: 7px;
+	margin-bottom: 2px;
+	font-family: "Oswald", sans-serif;
+	letter-spacing: 1px;
+	color: #328fbf;
+	border: 1px #328fbf solid;
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;	
+	transition:background 1s ease;
+}
+.news li a:hover, .news li a.current {
+	text-decoration: none;
+	border: 1px #235693 solid;
+	background-color: #328fbf;
+	color:white;
+}
+.news-title {
+	color: #235693;
+	margin-top: 0;
+}
+.news-meta {
+	padding: 5px 0;
+	margin-bottom: 20px;
+	border-top: 1px #328fbf solid;
+	border-bottom: 1px #328fbf solid;
+	color: #235693;
+	font-weight: 400;
+}
+.news-meta .fa-user {
+	margin-left: 25px;
+}
+
+/* Page Specific - FAQ */
+.faq .nav-tabs {
+	border-bottom: 1px solid #328fbf;
+}
+.faq .nav-tabs > li > a {
+	margin-right: 2px;
+	font-family: "Oswald", sans-serif;
+	font-size: 18px;
+	letter-spacing: 1px;
+	border: none;
+	border-radius: 0;
+}
+.faq .nav-tabs > li > a:hover {
+	border-bottom-color: #328fbf;
+	background-color: #FFF0DF;
+}
+.faq .nav-tabs > li.active > a, 
+.faq .nav-tabs > li.active > a:hover, 
+.faq .nav-tabs > li.active > a:focus {
+	color: #235693;
+	cursor: default;
+	background-color: #fff;
+	border: 1px solid #328fbf;
+	border-bottom-color: transparent;
+}
+.faq .tab-content {
+	padding: 15px;
+	border: 1px #328fbf solid;
+	border-top: none;
+}
+
+/* Syntax Highlighting Styles */
+.highlight {background: #fff;}
+.highlight .c {color: #998; font-style: italic;}
+.highlight .err {color: #a61717; background-color: #e3d2d2;}
+.highlight .k {font-weight: bold;}
+.highlight .o {font-weight: bold;}
+.highlight .cm {color: #998; font-style: italic;}
+.highlight .cp {color: #999; font-weight: bold;}
+.highlight .c1 {color: #998; font-style: italic;}
+.highlight .cs {color: #999; font-weight: bold; font-style: italic;}
+.highlight .gd {color: #000; background-color: #fdd;}
+.highlight .gd .x {color: #000; background-color: #faa;}
+.highlight .ge {font-style: italic;}
+.highlight .gr {color: #a00;}
+.highlight .gh {color: #999;}
+.highlight .gi {color: #000; background-color: #dfd;}
+.highlight .gi .x {color: #000; background-color: #afa;}
+.highlight .go {color: #888;}
+.highlight .gp {color: #555;}
+.highlight .gs {font-weight: bold;}
+.highlight .gu {color: #aaa;}
+.highlight .gt {color: #a00;}
+.highlight .kc {font-weight: bold;}
+.highlight .kd {font-weight: bold;}
+.highlight .kp {font-weight: bold;}
+.highlight .kr {font-weight: bold;}
+.highlight .kt {color: #458; font-weight: bold;}
+.highlight .m {color: #099;}
+.highlight .s {color: #d14;}
+.highlight .na {color: #008080;}
+.highlight .nb {color: #0086B3;}
+.highlight .nc {color: #458; font-weight: bold;}
+.highlight .no {color: #008080;}
+.highlight .ni {color: #800080;}
+.highlight .ne {color: #900; font-weight: bold;}
+.highlight .nf {color: #900; font-weight: bold;}
+.highlight .nn {color: #555;}
+.highlight .nt {color: #000080;}
+.highlight .nv {color: #008080;}
+.highlight .ow {font-weight: bold;}
+.highlight .w {color: #bbb;}
+.highlight .mf {color: #099;}
+.highlight .mh {color: #099;}
+.highlight .mi {color: #099;}
+.highlight .mo {color: #099;}
+.highlight .sb {color: #d14;}
+.highlight .sc {color: #d14;}
+.highlight .sd {color: #d14;}
+.highlight .s2 {color: #d14;}
+.highlight .se {color: #d14;}
+.highlight .sh {color: #d14;}
+.highlight .si {color: #d14;}
+.highlight .sx {color: #d14;}
+.highlight .sr {color: #009926;}
+.highlight .s1 {color: #d14;}
+.highlight .ss {color: #990073;}
+.highlight .bp {color: #999;}
+.highlight .vc {color: #008080;}
+.highlight .vg {color: #008080;}
+.highlight .vi {color: #008080;}
+.highlight .il {color: #099;}
+
+/* Page Specific - Documentation Index */
+.documentation-list {
+	list-style: none;
+	padding-left: 0px;
+}
+.documentation-list li {
+	line-height: 28px;
+}
+.documentation-list li:before {
+	content: "\f101";
+	font-family: 'FontAwesome';
+	margin-right: 5px;
+	color: #337ab7;
+}
+
+
+/* Responsive */
+@media screen and (max-width: 992px) {
+	.logo, .btn-download {margin-top:0; margin-bottom:0;}
+}
+
+.brick{
+	width: 370px;
+    border: solid 1px #CCCCCC;
+    margin: 20px;
+}
+
+.brick h3{
+	border-bottom: 1px solid #CCCCCC
+    margin-bottom: 20px;
+}
+.brickSS {
+    border: solid 1px #CCCCCC;
+    margin: 20px;
+}
+
+.brickSS .row{
+    padding: 15px;
+}
+
+.brickSS iframe{
+    margin: 15px;
+}
+
+.resources .nav-tabs {
+	border-bottom: 1px solid #328fbf;
+}
+.resources .nav-tabs > li > a {
+	margin-right: 2px;
+	font-family: "Oswald", sans-serif;
+	font-size: 18px;
+	letter-spacing: 1px;
+	border: none;
+	border-radius: 0;
+}
+.resources .nav-tabs > li > a:hover {
+	border-bottom-color: #328fbf;
+	background-color: #FFF0DF;
+}
+.resources .nav-tabs > li.active > a, 
+.resources .nav-tabs > li.active > a:hover, 
+.resources .nav-tabs > li.active > a:focus {
+	color: #235693;
+	cursor: default;
+	background-color: #fff;
+	border: 1px solid #328fbf;
+	border-bottom-color: transparent;
+}
+.resources .tab-content {
+	padding: 15px;
+	border: 1px #328fbf solid;
+	border-top: none;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/favicon.ico
----------------------------------------------------------------------
diff --git a/docs/favicon.ico b/docs/favicon.ico
new file mode 100644
index 0000000..7149e2e
Binary files /dev/null and b/docs/favicon.ico differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/ack_tree.png
----------------------------------------------------------------------
diff --git a/docs/images/ack_tree.png b/docs/images/ack_tree.png
new file mode 100644
index 0000000..2134cc8
Binary files /dev/null and b/docs/images/ack_tree.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/batched-stream.png
----------------------------------------------------------------------
diff --git a/docs/images/batched-stream.png b/docs/images/batched-stream.png
new file mode 100644
index 0000000..1e6aa01
Binary files /dev/null and b/docs/images/batched-stream.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/drpc-workflow.png
----------------------------------------------------------------------
diff --git a/docs/images/drpc-workflow.png b/docs/images/drpc-workflow.png
new file mode 100644
index 0000000..9905648
Binary files /dev/null and b/docs/images/drpc-workflow.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/eclipse-project-properties.png
----------------------------------------------------------------------
diff --git a/docs/images/eclipse-project-properties.png b/docs/images/eclipse-project-properties.png
new file mode 100644
index 0000000..62f8d32
Binary files /dev/null and b/docs/images/eclipse-project-properties.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/example-of-a-running-topology.png
----------------------------------------------------------------------
diff --git a/docs/images/example-of-a-running-topology.png b/docs/images/example-of-a-running-topology.png
new file mode 100644
index 0000000..1462b21
Binary files /dev/null and b/docs/images/example-of-a-running-topology.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/footer-bg.png
----------------------------------------------------------------------
diff --git a/docs/images/footer-bg.png b/docs/images/footer-bg.png
new file mode 100644
index 0000000..e72d575
Binary files /dev/null and b/docs/images/footer-bg.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/grouping.png
----------------------------------------------------------------------
diff --git a/docs/images/grouping.png b/docs/images/grouping.png
new file mode 100644
index 0000000..3911286
Binary files /dev/null and b/docs/images/grouping.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/header-bg.png
----------------------------------------------------------------------
diff --git a/docs/images/header-bg.png b/docs/images/header-bg.png
new file mode 100644
index 0000000..01a291e
Binary files /dev/null and b/docs/images/header-bg.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/ld-library-path-eclipse-linux.png
----------------------------------------------------------------------
diff --git a/docs/images/ld-library-path-eclipse-linux.png b/docs/images/ld-library-path-eclipse-linux.png
new file mode 100644
index 0000000..b6fbbd9
Binary files /dev/null and b/docs/images/ld-library-path-eclipse-linux.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/loading.gif
----------------------------------------------------------------------
diff --git a/docs/images/loading.gif b/docs/images/loading.gif
new file mode 100644
index 0000000..06f47af
Binary files /dev/null and b/docs/images/loading.gif differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logo.png
----------------------------------------------------------------------
diff --git a/docs/images/logo.png b/docs/images/logo.png
new file mode 100644
index 0000000..570276e
Binary files /dev/null and b/docs/images/logo.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/aeris.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/aeris.jpg b/docs/images/logos/aeris.jpg
new file mode 100644
index 0000000..adc2e18
Binary files /dev/null and b/docs/images/logos/aeris.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/alibaba.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/alibaba.jpg b/docs/images/logos/alibaba.jpg
index 52c6eab..658a003 100644
Binary files a/docs/images/logos/alibaba.jpg and b/docs/images/logos/alibaba.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/bai.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/bai.jpg b/docs/images/logos/bai.jpg
new file mode 100644
index 0000000..1bde805
Binary files /dev/null and b/docs/images/logos/bai.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/cerner.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/cerner.jpg b/docs/images/logos/cerner.jpg
new file mode 100644
index 0000000..9a18cb6
Binary files /dev/null and b/docs/images/logos/cerner.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/flipboard.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/flipboard.jpg b/docs/images/logos/flipboard.jpg
new file mode 100644
index 0000000..4d1eac1
Binary files /dev/null and b/docs/images/logos/flipboard.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/fullcontact.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/fullcontact.jpg b/docs/images/logos/fullcontact.jpg
new file mode 100644
index 0000000..cc21610
Binary files /dev/null and b/docs/images/logos/fullcontact.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/groupon.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/groupon.jpg b/docs/images/logos/groupon.jpg
index 3acad43..97ae2c5 100644
Binary files a/docs/images/logos/groupon.jpg and b/docs/images/logos/groupon.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/health-market-science.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/health-market-science.jpg b/docs/images/logos/health-market-science.jpg
new file mode 100644
index 0000000..06ce608
Binary files /dev/null and b/docs/images/logos/health-market-science.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/images.png
----------------------------------------------------------------------
diff --git a/docs/images/logos/images.png b/docs/images/logos/images.png
new file mode 100644
index 0000000..801cfce
Binary files /dev/null and b/docs/images/logos/images.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/infochimp.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/infochimp.jpg b/docs/images/logos/infochimp.jpg
new file mode 100644
index 0000000..9b6e89f
Binary files /dev/null and b/docs/images/logos/infochimp.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/klout.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/klout.jpg b/docs/images/logos/klout.jpg
new file mode 100644
index 0000000..69cdd3d
Binary files /dev/null and b/docs/images/logos/klout.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/loggly.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/loggly.jpg b/docs/images/logos/loggly.jpg
new file mode 100644
index 0000000..3f0eb81
Binary files /dev/null and b/docs/images/logos/loggly.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/ooyala.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/ooyala.jpg b/docs/images/logos/ooyala.jpg
new file mode 100644
index 0000000..6a9480f
Binary files /dev/null and b/docs/images/logos/ooyala.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/parc.png
----------------------------------------------------------------------
diff --git a/docs/images/logos/parc.png b/docs/images/logos/parc.png
index 3b5ea4c..13a591e 100644
Binary files a/docs/images/logos/parc.png and b/docs/images/logos/parc.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/premise.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/premise.jpg b/docs/images/logos/premise.jpg
new file mode 100644
index 0000000..13f8760
Binary files /dev/null and b/docs/images/logos/premise.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/qiy.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/qiy.jpg b/docs/images/logos/qiy.jpg
new file mode 100644
index 0000000..ad7dce4
Binary files /dev/null and b/docs/images/logos/qiy.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/quicklizard.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/quicklizard.jpg b/docs/images/logos/quicklizard.jpg
new file mode 100644
index 0000000..65328c6
Binary files /dev/null and b/docs/images/logos/quicklizard.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/rocketfuel.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/rocketfuel.jpg b/docs/images/logos/rocketfuel.jpg
new file mode 100644
index 0000000..b169a87
Binary files /dev/null and b/docs/images/logos/rocketfuel.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/rubicon.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/rubicon.jpg b/docs/images/logos/rubicon.jpg
new file mode 100644
index 0000000..da01e0a
Binary files /dev/null and b/docs/images/logos/rubicon.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/spider.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/spider.jpg b/docs/images/logos/spider.jpg
new file mode 100644
index 0000000..69aab3e
Binary files /dev/null and b/docs/images/logos/spider.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/spotify.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/spotify.jpg b/docs/images/logos/spotify.jpg
new file mode 100644
index 0000000..8d6253f
Binary files /dev/null and b/docs/images/logos/spotify.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/taobao.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/taobao.jpg b/docs/images/logos/taobao.jpg
new file mode 100644
index 0000000..54272af
Binary files /dev/null and b/docs/images/logos/taobao.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/the-weather-channel.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/the-weather-channel.jpg b/docs/images/logos/the-weather-channel.jpg
new file mode 100644
index 0000000..f9d68f4
Binary files /dev/null and b/docs/images/logos/the-weather-channel.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/twitter.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/twitter.jpg b/docs/images/logos/twitter.jpg
new file mode 100644
index 0000000..fb50bdb
Binary files /dev/null and b/docs/images/logos/twitter.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/verisign.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/verisign.jpg b/docs/images/logos/verisign.jpg
new file mode 100644
index 0000000..2a0dc70
Binary files /dev/null and b/docs/images/logos/verisign.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/webmd.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/webmd.jpg b/docs/images/logos/webmd.jpg
index e477769..cec11ed 100644
Binary files a/docs/images/logos/webmd.jpg and b/docs/images/logos/webmd.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/wego.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/wego.jpg b/docs/images/logos/wego.jpg
new file mode 100644
index 0000000..27c62c4
Binary files /dev/null and b/docs/images/logos/wego.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/yahoo-japan.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/yahoo-japan.jpg b/docs/images/logos/yahoo-japan.jpg
new file mode 100644
index 0000000..ef213a7
Binary files /dev/null and b/docs/images/logos/yahoo-japan.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/yahoo.png
----------------------------------------------------------------------
diff --git a/docs/images/logos/yahoo.png b/docs/images/logos/yahoo.png
new file mode 100755
index 0000000..659e6de
Binary files /dev/null and b/docs/images/logos/yahoo.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/logos/yelp.jpg
----------------------------------------------------------------------
diff --git a/docs/images/logos/yelp.jpg b/docs/images/logos/yelp.jpg
new file mode 100644
index 0000000..9e6b6e4
Binary files /dev/null and b/docs/images/logos/yelp.jpg differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/relationships-worker-processes-executors-tasks.png
----------------------------------------------------------------------
diff --git a/docs/images/relationships-worker-processes-executors-tasks.png b/docs/images/relationships-worker-processes-executors-tasks.png
new file mode 100644
index 0000000..ef6f3fd
Binary files /dev/null and b/docs/images/relationships-worker-processes-executors-tasks.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/spout-vs-state.png
----------------------------------------------------------------------
diff --git a/docs/images/spout-vs-state.png b/docs/images/spout-vs-state.png
new file mode 100644
index 0000000..b6b06b3
Binary files /dev/null and b/docs/images/spout-vs-state.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/storm-cluster.png
----------------------------------------------------------------------
diff --git a/docs/images/storm-cluster.png b/docs/images/storm-cluster.png
new file mode 100644
index 0000000..df2ddb8
Binary files /dev/null and b/docs/images/storm-cluster.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/storm-flow.png
----------------------------------------------------------------------
diff --git a/docs/images/storm-flow.png b/docs/images/storm-flow.png
new file mode 100644
index 0000000..45df814
Binary files /dev/null and b/docs/images/storm-flow.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/topology-tasks.png
----------------------------------------------------------------------
diff --git a/docs/images/topology-tasks.png b/docs/images/topology-tasks.png
new file mode 100644
index 0000000..0affaba
Binary files /dev/null and b/docs/images/topology-tasks.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/transactional-batches.png
----------------------------------------------------------------------
diff --git a/docs/images/transactional-batches.png b/docs/images/transactional-batches.png
new file mode 100644
index 0000000..db2716b
Binary files /dev/null and b/docs/images/transactional-batches.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/transactional-commit-flow.png
----------------------------------------------------------------------
diff --git a/docs/images/transactional-commit-flow.png b/docs/images/transactional-commit-flow.png
new file mode 100644
index 0000000..25131b0
Binary files /dev/null and b/docs/images/transactional-commit-flow.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/transactional-design-2.png
----------------------------------------------------------------------
diff --git a/docs/images/transactional-design-2.png b/docs/images/transactional-design-2.png
new file mode 100644
index 0000000..a0e0ebf
Binary files /dev/null and b/docs/images/transactional-design-2.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/transactional-spout-structure.png
----------------------------------------------------------------------
diff --git a/docs/images/transactional-spout-structure.png b/docs/images/transactional-spout-structure.png
new file mode 100644
index 0000000..ecf7def
Binary files /dev/null and b/docs/images/transactional-spout-structure.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/trident-to-storm1.png
----------------------------------------------------------------------
diff --git a/docs/images/trident-to-storm1.png b/docs/images/trident-to-storm1.png
new file mode 100644
index 0000000..b022776
Binary files /dev/null and b/docs/images/trident-to-storm1.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/trident-to-storm2.png
----------------------------------------------------------------------
diff --git a/docs/images/trident-to-storm2.png b/docs/images/trident-to-storm2.png
new file mode 100644
index 0000000..6aa0fc5
Binary files /dev/null and b/docs/images/trident-to-storm2.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/tuple-dag.png
----------------------------------------------------------------------
diff --git a/docs/images/tuple-dag.png b/docs/images/tuple-dag.png
new file mode 100644
index 0000000..34611d4
Binary files /dev/null and b/docs/images/tuple-dag.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/images/tuple_tree.png
----------------------------------------------------------------------
diff --git a/docs/images/tuple_tree.png b/docs/images/tuple_tree.png
new file mode 100644
index 0000000..b14f558
Binary files /dev/null and b/docs/images/tuple_tree.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/index.md
----------------------------------------------------------------------
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..55bf828
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,69 @@
+---
+layout: documentation
+---
+Storm is a distributed realtime computation system. Similar to how Hadoop provides a set of general primitives for doing batch processing, Storm provides a set of general primitives for doing realtime computation. Storm is simple, can be used with any programming language, [is used by many companies](/documentation/Powered-By.html), and is a lot of fun to use!
+
+### Read these first
+
+* [Rationale](Rationale.html)
+* [Tutorial](Tutorial.html)
+* [Setting up development environment](Setting-up-development-environment.html)
+* [Creating a new Storm project](Creating-a-new-Storm-project.html)
+
+### Documentation
+
+* [Manual](Documentation.html)
+* [Javadoc](javadocs/index.html)
+* [FAQ](FAQ.html)
+* [SECURITY](SECURITY.html)
+* [REST API](STORM-UI-REST-API.html)
+
+### Getting help
+
+__NOTE:__ The google groups account storm-user@googlegroups.com is now officially deprecated in favor of the Apache-hosted user/dev mailing lists.
+
+#### Storm Users
+Storm users should send messages and subscribe to [user@storm.apache.org](mailto:user@storm.apache.org).
+
+You can subscribe to this list by sending an email to [user-subscribe@storm.apache.org](mailto:user-subscribe@storm.apache.org). Likewise, you can cancel a subscription by sending an email to [user-unsubscribe@storm.apache.org](mailto:user-unsubscribe@storm.apache.org).
+
+You can view the archives of the mailing list [here](http://mail-archives.apache.org/mod_mbox/storm-user/).
+
+#### Storm Developers
+Storm developers should send messages and subscribe to [dev@storm.apache.org](mailto:dev@storm.apache.org).
+
+You can subscribe to this list by sending an email to [dev-subscribe@storm.apache.org](mailto:dev-subscribe@storm.apache.org). Likewise, you can cancel a subscription by sending an email to [dev-unsubscribe@storm.apache.org](mailto:dev-unsubscribe@storm.apache.org).
+
+You can view the archives of the mailing list [here](http://mail-archives.apache.org/mod_mbox/storm-dev/).
+
+#### Which list should I send/subscribe to?
+If you are using a pre-built binary distribution of Storm, then chances are you should send questions, comments, storm-related announcements, etc. to [user@storm.apache.org](user@storm.apache.org). 
+
+If you are building storm from source, developing new features, or otherwise hacking storm source code, then [dev@storm.apache.org](dev@storm.apache.org) is more appropriate. 
+
+#### What will happen with storm-user@googlegroups.com?
+All existing messages will remain archived there, and can be accessed/searched [here](https://groups.google.com/forum/#!forum/storm-user).
+
+New messages sent to storm-user@googlegroups.com will either be rejected/bounced or replied to with a message to direct the email to the appropriate Apache-hosted group.
+
+#### IRC
+You can also come to the #storm-user room on [freenode](http://freenode.net/). You can usually find a Storm developer there to help you out.
+
+
+
+### Related projects
+
+* [storm-contrib](https://github.com/nathanmarz/storm-contrib)
+* [storm-deploy](http://github.com/nathanmarz/storm-deploy): One click deploys for Storm clusters on AWS
+* [Spout implementations](Spout-implementations.html)
+* [DSLs and multilang adapters](DSLs-and-multilang-adapters.html)
+* [Serializers](Serializers.html)
+
+### Contributing to Storm
+
+* [Contributing to Storm](Contributing-to-Storm.html)
+* [Project ideas](Project-ideas.html)
+
+### Powered by Storm
+
+[Companies and projects powered by Storm](Powered-By.html)


[07/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Trident-tutorial.md
----------------------------------------------------------------------
diff --git a/docs/Trident-tutorial.md b/docs/Trident-tutorial.md
new file mode 100644
index 0000000..862dd8b
--- /dev/null
+++ b/docs/Trident-tutorial.md
@@ -0,0 +1,253 @@
+---
+layout: documentation
+---
+# Trident tutorial
+
+Trident is a high-level abstraction for doing realtime computing on top of Storm. It allows you to seamlessly intermix high throughput (millions of messages per second), stateful stream processing with low latency distributed querying. If you're familiar with high level batch processing tools like Pig or Cascading, the concepts of Trident will be very familiar – Trident has joins, aggregations, grouping, functions, and filters. In addition to these, Trident adds primitives for doing stateful, incremental processing on top of any database or persistence store. Trident has consistent, exactly-once semantics, so it is easy to reason about Trident topologies.
+
+## Illustrative example
+
+Let's look at an illustrative example of Trident. This example will do two things:
+
+1. Compute streaming word count from an input stream of sentences
+2. Implement queries to get the sum of the counts for a list of words
+
+For the purposes of illustration, this example will read an infinite stream of sentences from the following source:
+
+```java
+FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
+               new Values("the cow jumped over the moon"),
+               new Values("the man went to the store and bought some candy"),
+               new Values("four score and seven years ago"),
+               new Values("how many apples can you eat"));
+spout.setCycle(true);
+```
+
+This spout cycles through that set of sentences over and over to produce the sentence stream. Here's the code to do the streaming word count part of the computation:
+
+```java
+TridentTopology topology = new TridentTopology();        
+TridentState wordCounts =
+     topology.newStream("spout1", spout)
+       .each(new Fields("sentence"), new Split(), new Fields("word"))
+       .groupBy(new Fields("word"))
+       .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))                
+       .parallelismHint(6);
+```
+
+Let's go through the code line by line. First a TridentTopology object is created, which exposes the interface for constructing Trident computations. TridentTopology has a method called newStream that creates a new stream of data in the topology reading from an input source. In this case, the input source is just the FixedBatchSpout defined from before. Input sources can also be queue brokers like Kestrel or Kafka. Trident keeps track of a small amount of state for each input source (metadata about what it has consumed) in Zookeeper, and the "spout1" string here specifies the node in Zookeeper where Trident should keep that metadata.
+
+Trident processes the stream as small batches of tuples. For example, the incoming stream of sentences might be divided into batches like so:
+
+![Batched stream](images/batched-stream.png)
+
+Generally the size of those small batches will be on the order of thousands or millions of tuples, depending on your incoming throughput.
+
+Trident provides a fully fledged batch processing API to process those small batches. The API is very similar to what you see in high level abstractions for Hadoop like Pig or Cascading: you can do group by's, joins, aggregations, run functions, run filters, and so on. Of course, processing each small batch in isolation isn't that interesting, so Trident provides functions for doing aggregations across batches and persistently storing those aggregations – whether in memory, in Memcached, in Cassandra, or some other store. Finally, Trident has first-class functions for querying sources of realtime state. That state could be updated by Trident (like in this example), or it could be an independent source of state.
+
+Back to the example, the spout emits a stream containing one field called "sentence". The next line of the topology definition applies the Split function to each tuple in the stream, taking the "sentence" field and splitting it into words. Each sentence tuple creates potentially many word tuples – for instance, the sentence "the cow jumped over the moon" creates six "word" tuples. Here's the definition of Split:
+
+```java
+public class Split extends BaseFunction {
+   public void execute(TridentTuple tuple, TridentCollector collector) {
+       String sentence = tuple.getString(0);
+       for(String word: sentence.split(" ")) {
+           collector.emit(new Values(word));                
+       }
+   }
+}
+```
+
+As you can see, it's really simple. It simply grabs the sentence, splits it on whitespace, and emits a tuple for each word.
+
+The rest of the topology computes word count and keeps the results persistently stored. First the stream is grouped by the "word" field. Then, each group is persistently aggregated using the Count aggregator. The persistentAggregate function knows how to store and update the results of the aggregation in a source of state. In this example, the word counts are kept in memory, but this can be trivially swapped to use Memcached, Cassandra, or any other persistent store. Swapping this topology to store counts in Memcached is as simple as replacing the persistentAggregate line with this (using [trident-memcached](https://github.com/nathanmarz/trident-memcached)), where the "serverLocations" is a list of host/ports for the Memcached cluster:
+
+```java
+.persistentAggregate(MemcachedState.transactional(serverLocations), new Count(), new Fields("count"))        
+MemcachedState.transactional()
+```
+
+The values stored by persistentAggregate represents the aggregation of all batches ever emitted by the stream.
+
+One of the cool things about Trident is that it has fully fault-tolerant, exactly-once processing semantics. This makes it easy to reason about your realtime processing. Trident persists state in a way so that if failures occur and retries are necessary, it won't perform multiple updates to the database for the same source data.
+
+The persistentAggregate method transforms a Stream into a TridentState object. In this case the TridentState object represents all the word counts. We will use this TridentState object to implement the distributed query portion of the computation.
+
+The next part of the topology implements a low latency distributed query on the word counts. The query takes as input a whitespace separated list of words and return the sum of the counts for those words. These queries are executed just like normal RPC calls, except they are parallelized in the background. Here's an example of how you might invoke one of these queries:
+
+```java
+DRPCClient client = new DRPCClient("drpc.server.location", 3772);
+System.out.println(client.execute("words", "cat dog the man");
+// prints the JSON-encoded result, e.g.: "[[5078]]"
+```
+
+As you can see, it looks just like a regular remote procedure call (RPC), except it's executing in parallel across a Storm cluster. The latency for small queries like this are typically around 10ms. More intense DRPC queries can take longer of course, although the latency largely depends on how many resources you have allocated for the computation.
+
+The implementation of the distributed query portion of the topology looks like this:
+
+```java
+topology.newDRPCStream("words")
+       .each(new Fields("args"), new Split(), new Fields("word"))
+       .groupBy(new Fields("word"))
+       .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count"))
+       .each(new Fields("count"), new FilterNull())
+       .aggregate(new Fields("count"), new Sum(), new Fields("sum"));
+```
+
+The same TridentTopology object is used to create the DRPC stream, and the function is named "words". The function name corresponds to the function name given in the first argument of execute when using a DRPCClient.
+
+Each DRPC request is treated as its own little batch processing job that takes as input a single tuple representing the request. The tuple contains one field called "args" that contains the argument provided by the client. In this case, the argument is a whitespace separated list of words.
+
+First, the Split function is used to split the arguments for the request into its constituent words. The stream is grouped by "word", and the stateQuery operator is used to query the TridentState object that the first part of the topology generated. stateQuery takes in a source of state – in this case, the word counts computed by the other portion of the topology – and a function for querying that state. In this case, the MapGet function is invoked, which gets the count for each word. Since the DRPC stream is grouped the exact same way as the TridentState was (by the "word" field), each word query is routed to the exact partition of the TridentState object that manages updates for that word.
+
+Next, words that didn't have a count are filtered out via the FilterNull filter and the counts are summed using the Sum aggregator to get the result. Then, Trident automatically sends the result back to the waiting client.
+
+Trident is intelligent about how it executes a topology to maximize performance. There's two interesting things happening automatically in this topology:
+
+1. Operations that read from or write to state (like persistentAggregate and stateQuery) automatically batch operations to that state. So if there's 20 updates that need to be made to the database for the current batch of processing, rather than do 20 read requests and 20 writes requests to the database, Trident will automatically batch up the reads and writes, doing only 1 read request and 1 write request (and in many cases, you can use caching in your State implementation to eliminate the read request). So you get the best of both words of convenience – being able to express your computation in terms of what should be done with each tuple – and performance.
+2. Trident aggregators are heavily optimized. Rather than transfer all tuples for a group to the same machine and then run the aggregator, Trident will do partial aggregations when possible before sending tuples over the network. For example, the Count aggregator computes the count on each partition, sends the partial count over the network, and then sums together all the partial counts to get the total count. This technique is similar to the use of combiners in MapReduce.
+
+Let's look at another example of Trident.
+
+## Reach
+
+The next example is a pure DRPC topology that computes the reach of a URL on demand. Reach is the number of unique people exposed to a URL on Twitter. To compute reach, you need to fetch all the people who ever tweeted a URL, fetch all the followers of all those people, unique that set of followers, and that count that uniqued set. Computing reach is too intense for a single machine – it can require thousands of database calls and tens of millions of tuples. With Storm and Trident, you can parallelize the computation of each step across a cluster.
+
+This topology will read from two sources of state. One database maps URLs to a list of people who tweeted that URL. The other database maps a person to a list of followers for that person. The topology definition looks like this:
+
+```java
+TridentState urlToTweeters =
+       topology.newStaticState(getUrlToTweetersState());
+TridentState tweetersToFollowers =
+       topology.newStaticState(getTweeterToFollowersState());
+
+topology.newDRPCStream("reach")
+       .stateQuery(urlToTweeters, new Fields("args"), new MapGet(), new Fields("tweeters"))
+       .each(new Fields("tweeters"), new ExpandList(), new Fields("tweeter"))
+       .shuffle()
+       .stateQuery(tweetersToFollowers, new Fields("tweeter"), new MapGet(), new Fields("followers"))
+       .parallelismHint(200)
+       .each(new Fields("followers"), new ExpandList(), new Fields("follower"))
+       .groupBy(new Fields("follower"))
+       .aggregate(new One(), new Fields("one"))
+       .parallelismHint(20)
+       .aggregate(new Count(), new Fields("reach"));
+```
+
+The topology creates TridentState objects representing each external database using the newStaticState method. These can then be queried in the topology. Like all sources of state, queries to these databases will be automatically batched for maximum efficiency.
+
+The topology definition is straightforward – it's just a simple batch processing job. First, the urlToTweeters database is queried to get the list of people who tweeted the URL for this request. That returns a list, so the ExpandList function is invoked to create a tuple for each tweeter.
+
+Next, the followers for each tweeter must be fetched. It's important that this step be parallelized, so shuffle is invoked to evenly distribute the tweeters among all workers for the topology. Then, the followers database is queried to get the list of followers for each tweeter. You can see that this portion of the topology is given a large parallelism since this is the most intense portion of the computation.
+
+Next, the set of followers is uniqued and counted. This is done in two steps. First a "group by" is done on the batch by "follower", running the "One" aggregator on each group. The "One" aggregator simply emits a single tuple containing the number one for each group. Then, the ones are summed together to get the unique count of the followers set. Here's the definition of the "One" aggregator:
+
+```java
+public class One implements CombinerAggregator<Integer> {
+   public Integer init(TridentTuple tuple) {
+       return 1;
+   }
+
+   public Integer combine(Integer val1, Integer val2) {
+       return 1;
+   }
+
+   public Integer zero() {
+       return 1;
+   }        
+}
+```
+
+This is a "combiner aggregator", which knows how to do partial aggregations before transferring tuples over the network to maximize efficiency. Sum is also defined as a combiner aggregator, so the global sum done at the end of the topology will be very efficient.
+
+Let's now look at Trident in more detail.
+
+## Fields and tuples
+
+The Trident data model is the TridentTuple which is a named list of values. During a topology, tuples are incrementally built up through a sequence of operations. Operations generally take in a set of input fields and emit a set of "function fields". The input fields are used to select a subset of the tuple as input to the operation, while the "function fields" name the fields the operation emits.
+
+Consider this example. Suppose you have a stream called "stream" that contains the fields "x", "y", and "z". To run a filter MyFilter that takes in "y" as input, you would say:
+
+```java
+stream.each(new Fields("y"), new MyFilter())
+```
+
+Suppose the implementation of MyFilter is this:
+
+```java
+public class MyFilter extends BaseFilter {
+   public boolean isKeep(TridentTuple tuple) {
+       return tuple.getInteger(0) < 10;
+   }
+}
+```
+
+This will keep all tuples whose "y" field is less than 10. The TridentTuple given as input to MyFilter will only contain the "y" field. Note that Trident is able to project a subset of a tuple extremely efficiently when selecting the input fields: the projection is essentially free.
+
+Let's now look at how "function fields" work. Suppose you had this function:
+
+```java
+public class AddAndMultiply extends BaseFunction {
+   public void execute(TridentTuple tuple, TridentCollector collector) {
+       int i1 = tuple.getInteger(0);
+       int i2 = tuple.getInteger(1);
+       collector.emit(new Values(i1 + i2, i1 * i2));
+   }
+}
+```
+
+This function takes two numbers as input and emits two new values: the addition of the numbers and the multiplication of the numbers. Suppose you had a stream with the fields "x", "y", and "z". You would use this function like this:
+
+```java
+stream.each(new Fields("x", "y"), new AddAndMultiply(), new Fields("added", "multiplied"));
+```
+
+The output of functions is additive: the fields are added to the input tuple. So the output of this each call would contain tuples with the five fields "x", "y", "z", "added", and "multiplied". "added" corresponds to the first value emitted by AddAndMultiply, while "multiplied" corresponds to the second value.
+
+With aggregators, on the other hand, the function fields replace the input tuples. So if you had a stream containing the fields "val1" and "val2", and you did this:
+
+```java
+stream.aggregate(new Fields("val2"), new Sum(), new Fields("sum"))
+```
+
+The output stream would only contain a single tuple with a single field called "sum", representing the sum of all "val2" fields in that batch.
+
+With grouped streams, the output will contain the grouping fields followed by the fields emitted by the aggregator. For example:
+
+```java
+stream.groupBy(new Fields("val1"))
+     .aggregate(new Fields("val2"), new Sum(), new Fields("sum"))
+```
+
+In this example, the output will contain the fields "val1" and "sum".
+
+## State
+
+A key problem to solve with realtime computation is how to manage state so that updates are idempotent in the face of failures and retries. It's impossible to eliminate failures, so when a node dies or something else goes wrong, batches need to be retried. The question is – how do you do state updates (whether external databases or state internal to the topology) so that it's like each message was only processed only once?
+
+This is a tricky problem, and can be illustrated with the following example. Suppose that you're doing a count aggregation of your stream and want to store the running count in a database. If you store only the count in the database and it's time to apply a state update for a batch, there's no way to know if you applied that state update before. The batch could have been attempted before, succeeded in updating the database, and then failed at a later step. Or the batch could have been attempted before and failed to update the database. You just don't know.
+
+Trident solves this problem by doing two things:
+
+1. Each batch is given a unique id called the "transaction id". If a batch is retried it will have the exact same transaction id.
+2. State updates are ordered among batches. That is, the state updates for batch 3 won't be applied until the state updates for batch 2 have succeeded.
+
+With these two primitives, you can achieve exactly-once semantics with your state updates. Rather than store just the count in the database, what you can do instead is store the transaction id with the count in the database as an atomic value. Then, when updating the count, you can just compare the transaction id in the database with the transaction id for the current batch. If they're the same, you skip the update – because of the strong ordering, you know for sure that the value in the database incorporates the current batch. If they're different, you increment the count.
+
+Of course, you don't have to do this logic manually in your topologies. This logic is wrapped by the State abstraction and done automatically. Nor is your State object required to implement the transaction id trick: if you don't want to pay the cost of storing the transaction id in the database, you don't have to. In that case the State will have at-least-once-processing semantics in the case of failures (which may be fine for your application). You can read more about how to implement a State and the various fault-tolerance tradeoffs possible [in this doc](Trident-state.html).
+
+A State is allowed to use whatever strategy it wants to store state. So it could store state in an external database or it could keep the state in-memory but backed by HDFS (like how HBase works). State's are not required to hold onto state forever. For example, you could have an in-memory State implementation that only keeps the last X hours of data available and drops anything older. Take a look at the implementation of the [Memcached integration](https://github.com/nathanmarz/trident-memcached/blob/master/src/jvm/trident/memcached/MemcachedState.java) for an example State implementation.
+
+## Execution of Trident topologies
+
+Trident topologies compile down into as efficient of a Storm topology as possible. Tuples are only sent over the network when a repartitioning of the data is required, such as if you do a groupBy or a shuffle. So if you had this Trident topology:
+
+![Compiling Trident to Storm 1](images/trident-to-storm1.png)
+
+It would compile into Storm spouts/bolts like this:
+
+![Compiling Trident to Storm 2](images/trident-to-storm2.png)
+
+## Conclusion
+
+Trident makes realtime computation elegant. You've seen how high throughput stream processing, state manipulation, and low-latency querying can be seamlessly intermixed via Trident's API. Trident lets you express your realtime computations in a natural way while still getting maximal performance.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Troubleshooting.md
----------------------------------------------------------------------
diff --git a/docs/Troubleshooting.md b/docs/Troubleshooting.md
new file mode 100644
index 0000000..c9df298
--- /dev/null
+++ b/docs/Troubleshooting.md
@@ -0,0 +1,144 @@
+---
+layout: documentation
+---
+## Troubleshooting
+
+This page lists issues people have run into when using Storm along with their solutions.
+
+### Worker processes are crashing on startup with no stack trace
+
+Possible symptoms:
+ 
+ * Topologies work with one node, but workers crash with multiple nodes
+
+Solutions:
+
+ * You may have a misconfigured subnet, where nodes can't locate other nodes based on their hostname. ZeroMQ sometimes crashes the process when it can't resolve a host. There are two solutions:
+  * Make a mapping from hostname to IP address in /etc/hosts
+  * Set up an internal DNS so that nodes can locate each other based on hostname.
+  
+### Nodes are unable to communicate with each other
+
+Possible symptoms:
+
+ * Every spout tuple is failing
+ * Processing is not working
+
+Solutions:
+
+ * Storm doesn't work with ipv6. You can force ipv4 by adding `-Djava.net.preferIPv4Stack=true` to the supervisor child options and restarting the supervisor. 
+ * You may have a misconfigured subnet. See the solutions for `Worker processes are crashing on startup with no stack trace`
+
+### Topology stops processing tuples after awhile
+
+Symptoms:
+
+ * Processing works fine for awhile, and then suddenly stops and spout tuples start failing en masse. 
+ 
+Solutions:
+
+ * This is a known issue with ZeroMQ 2.1.10. Downgrade to ZeroMQ 2.1.7.
+ 
+### Not all supervisors appear in Storm UI
+
+Symptoms:
+ 
+ * Some supervisor processes are missing from the Storm UI
+ * List of supervisors in Storm UI changes on refreshes
+
+Solutions:
+
+ * Make sure the supervisor local dirs are independent (e.g., not sharing a local dir over NFS)
+ * Try deleting the local dirs for the supervisors and restarting the daemons. Supervisors create a unique id for themselves and store it locally. When that id is copied to other nodes, Storm gets confused. 
+
+### "Multiple defaults.yaml found" error
+
+Symptoms:
+
+ * When deploying a topology with "storm jar", you get this error
+
+Solution:
+
+ * You're most likely including the Storm jars inside your topology jar. When packaging your topology jar, don't include the Storm jars as Storm will put those on the classpath for you.
+
+### "NoSuchMethodError" when running storm jar
+
+Symptoms:
+
+ * When running storm jar, you get a cryptic "NoSuchMethodError"
+
+Solution:
+
+ * You're deploying your topology with a different version of Storm than you built your topology against. Make sure the storm client you use comes from the same version as the version you compiled your topology against.
+
+
+### Kryo ConcurrentModificationException
+
+Symptoms:
+
+ * At runtime, you get a stack trace like the following:
+
+```
+java.lang.RuntimeException: java.util.ConcurrentModificationException
+	at backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:84)
+	at backtype.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:55)
+	at backtype.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:56)
+	at backtype.storm.disruptor$consume_loop_STAR_$fn__1597.invoke(disruptor.clj:67)
+	at backtype.storm.util$async_loop$fn__465.invoke(util.clj:377)
+	at clojure.lang.AFn.run(AFn.java:24)
+	at java.lang.Thread.run(Thread.java:679)
+Caused by: java.util.ConcurrentModificationException
+	at java.util.LinkedHashMap$LinkedHashIterator.nextEntry(LinkedHashMap.java:390)
+	at java.util.LinkedHashMap$EntryIterator.next(LinkedHashMap.java:409)
+	at java.util.LinkedHashMap$EntryIterator.next(LinkedHashMap.java:408)
+	at java.util.HashMap.writeObject(HashMap.java:1016)
+	at sun.reflect.GeneratedMethodAccessor17.invoke(Unknown Source)
+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+	at java.lang.reflect.Method.invoke(Method.java:616)
+	at java.io.ObjectStreamClass.invokeWriteObject(ObjectStreamClass.java:959)
+	at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1480)
+	at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1416)
+	at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1174)
+	at java.io.ObjectOutputStream.writeObject(ObjectOutputStream.java:346)
+	at backtype.storm.serialization.SerializableSerializer.write(SerializableSerializer.java:21)
+	at com.esotericsoftware.kryo.Kryo.writeClassAndObject(Kryo.java:554)
+	at com.esotericsoftware.kryo.serializers.CollectionSerializer.write(CollectionSerializer.java:77)
+	at com.esotericsoftware.kryo.serializers.CollectionSerializer.write(CollectionSerializer.java:18)
+	at com.esotericsoftware.kryo.Kryo.writeObject(Kryo.java:472)
+	at backtype.storm.serialization.KryoValuesSerializer.serializeInto(KryoValuesSerializer.java:27)
+```
+
+Solution: 
+
+ * This means that you're emitting a mutable object as an output tuple. Everything you emit into the output collector must be immutable. What's happening is that your bolt is modifying the object while it is being serialized to be sent over the network.
+
+
+### NullPointerException from deep inside Storm
+
+Symptoms:
+
+ * You get a NullPointerException that looks something like:
+
+```
+java.lang.RuntimeException: java.lang.NullPointerException
+    at backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:84)
+    at backtype.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:55)
+    at backtype.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:56)
+    at backtype.storm.disruptor$consume_loop_STAR_$fn__1596.invoke(disruptor.clj:67)
+    at backtype.storm.util$async_loop$fn__465.invoke(util.clj:377)
+    at clojure.lang.AFn.run(AFn.java:24)
+    at java.lang.Thread.run(Thread.java:662)
+Caused by: java.lang.NullPointerException
+    at backtype.storm.serialization.KryoTupleSerializer.serialize(KryoTupleSerializer.java:24)
+    at backtype.storm.daemon.worker$mk_transfer_fn$fn__4126$fn__4130.invoke(worker.clj:99)
+    at backtype.storm.util$fast_list_map.invoke(util.clj:771)
+    at backtype.storm.daemon.worker$mk_transfer_fn$fn__4126.invoke(worker.clj:99)
+    at backtype.storm.daemon.executor$start_batch_transfer__GT_worker_handler_BANG_$fn__3904.invoke(executor.clj:205)
+    at backtype.storm.disruptor$clojure_handler$reify__1584.onEvent(disruptor.clj:43)
+    at backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:81)
+    ... 6 more
+```
+
+Solution:
+
+ * This is caused by having multiple threads issue methods on the `OutputCollector`. All emits, acks, and fails must happen on the same thread. One subtle way this can happen is if you make a `IBasicBolt` that emits on a separate thread. `IBasicBolt`'s automatically ack after execute is called, so this would cause multiple threads to use the `OutputCollector` leading to this exception. When using a basic bolt, all emits must happen in the same thread that runs `execute`.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Tutorial.md
----------------------------------------------------------------------
diff --git a/docs/Tutorial.md b/docs/Tutorial.md
new file mode 100644
index 0000000..73bf9a4
--- /dev/null
+++ b/docs/Tutorial.md
@@ -0,0 +1,310 @@
+---
+layout: documentation
+---
+In this tutorial, you'll learn how to create Storm topologies and deploy them to a Storm cluster. Java will be the main language used, but a few examples will use Python to illustrate Storm's multi-language capabilities.
+
+## Preliminaries
+
+This tutorial uses examples from the [storm-starter](http://github.com/nathanmarz/storm-starter) project. It's recommended that you clone the project and follow along with the examples. Read [Setting up a development environment](Setting-up-development-environment.html) and [Creating a new Storm project](Creating-a-new-Storm-project.html) to get your machine set up. 
+
+## Components of a Storm cluster
+
+A Storm cluster is superficially similar to a Hadoop cluster. Whereas on Hadoop you run "MapReduce jobs", on Storm you run "topologies". "Jobs" and "topologies" themselves are very different -- one key difference is that a MapReduce job eventually finishes, whereas a topology processes messages forever (or until you kill it).
+
+There are two kinds of nodes on a Storm cluster: the master node and the worker nodes. The master node runs a daemon called "Nimbus" that is similar to Hadoop's "JobTracker". Nimbus is responsible for distributing code around the cluster, assigning tasks to machines, and monitoring for failures.
+
+Each worker node runs a daemon called the "Supervisor". The supervisor listens for work assigned to its machine and starts and stops worker processes as necessary based on what Nimbus has assigned to it. Each worker process executes a subset of a topology; a running topology consists of many worker processes spread across many machines.
+
+![Storm cluster](images/storm-cluster.png)
+
+All coordination between Nimbus and the Supervisors is done through a [Zookeeper](http://zookeeper.apache.org/) cluster. Additionally, the Nimbus daemon and Supervisor daemons are fail-fast and stateless; all state is kept in Zookeeper or on local disk. This means you can kill -9 Nimbus or the Supervisors and they'll start back up like nothing happened. This design leads to Storm clusters being incredibly stable.
+
+## Topologies
+
+To do realtime computation on Storm, you create what are called "topologies". A topology is a graph of computation. Each node in a topology contains processing logic, and links between nodes indicate how data should be passed around between nodes.
+
+Running a topology is straightforward. First, you package all your code and dependencies into a single jar. Then, you run a command like the following:
+
+```
+storm jar all-my-code.jar backtype.storm.MyTopology arg1 arg2
+```
+
+This runs the class `backtype.storm.MyTopology` with the arguments `arg1` and `arg2`. The main function of the class defines the topology and submits it to Nimbus. The `storm jar` part takes care of connecting to Nimbus and uploading the jar.
+
+Since topology definitions are just Thrift structs, and Nimbus is a Thrift service, you can create and submit topologies using any programming language. The above example is the easiest way to do it from a JVM-based language. See [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html)] for more information on starting and stopping topologies.
+
+## Streams
+
+The core abstraction in Storm is the "stream". A stream is an unbounded sequence of tuples. Storm provides the primitives for transforming a stream into a new stream in a distributed and reliable way. For example, you may transform a stream of tweets into a stream of trending topics.
+
+The basic primitives Storm provides for doing stream transformations are "spouts" and "bolts". Spouts and bolts have interfaces that you implement to run your application-specific logic.
+
+A spout is a source of streams. For example, a spout may read tuples off of a [Kestrel](http://github.com/nathanmarz/storm-kestrel) queue and emit them as a stream. Or a spout may connect to the Twitter API and emit a stream of tweets.
+
+A bolt consumes any number of input streams, does some processing, and possibly emits new streams. Complex stream transformations, like computing a stream of trending topics from a stream of tweets, require multiple steps and thus multiple bolts. Bolts can do anything from run functions, filter tuples, do streaming aggregations, do streaming joins, talk to databases, and more.
+
+Networks of spouts and bolts are packaged into a "topology" which is the top-level abstraction that you submit to Storm clusters for execution. A topology is a graph of stream transformations where each node is a spout or bolt. Edges in the graph indicate which bolts are subscribing to which streams. When a spout or bolt emits a tuple to a stream, it sends the tuple to every bolt that subscribed to that stream.
+
+![A Storm topology](images/topology.png)
+
+Links between nodes in your topology indicate how tuples should be passed around. For example, if there is a link between Spout A and Bolt B, a link from Spout A to Bolt C, and a link from Bolt B to Bolt C, then everytime Spout A emits a tuple, it will send the tuple to both Bolt B and Bolt C. All of Bolt B's output tuples will go to Bolt C as well.
+
+Each node in a Storm topology executes in parallel. In your topology, you can specify how much parallelism you want for each node, and then Storm will spawn that number of threads across the cluster to do the execution.
+
+A topology runs forever, or until you kill it. Storm will automatically reassign any failed tasks. Additionally, Storm guarantees that there will be no data loss, even if machines go down and messages are dropped.
+
+## Data model
+
+Storm uses tuples as its data model. A tuple is a named list of values, and a field in a tuple can be an object of any type. Out of the box, Storm supports all the primitive types, strings, and byte arrays as tuple field values. To use an object of another type, you just need to implement [a serializer](Serialization.html) for the type.
+
+Every node in a topology must declare the output fields for the tuples it emits. For example, this bolt declares that it emits 2-tuples with the fields "double" and "triple":
+
+```java
+public class DoubleAndTripleBolt extends BaseRichBolt {
+    private OutputCollectorBase _collector;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, OutputCollectorBase collector) {
+        _collector = collector;
+    }
+
+    @Override
+    public void execute(Tuple input) {
+        int val = input.getInteger(0);        
+        _collector.emit(input, new Values(val*2, val*3));
+        _collector.ack(input);
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("double", "triple"));
+    }    
+}
+```
+
+The `declareOutputFields` function declares the output fields `["double", "triple"]` for the component. The rest of the bolt will be explained in the upcoming sections.
+
+## A simple topology
+
+Let's take a look at a simple topology to explore the concepts more and see how the code shapes up. Let's look at the `ExclamationTopology` definition from storm-starter:
+
+```java
+TopologyBuilder builder = new TopologyBuilder();        
+builder.setSpout("words", new TestWordSpout(), 10);        
+builder.setBolt("exclaim1", new ExclamationBolt(), 3)
+        .shuffleGrouping("words");
+builder.setBolt("exclaim2", new ExclamationBolt(), 2)
+        .shuffleGrouping("exclaim1");
+```
+
+This topology contains a spout and two bolts. The spout emits words, and each bolt appends the string "!!!" to its input. The nodes are arranged in a line: the spout emits to the first bolt which then emits to the second bolt. If the spout emits the tuples ["bob"] and ["john"], then the second bolt will emit the words ["bob!!!!!!"] and ["john!!!!!!"].
+
+This code defines the nodes using the `setSpout` and `setBolt` methods. These methods take as input a user-specified id, an object containing the processing logic, and the amount of parallelism you want for the node. In this example, the spout is given id "words" and the bolts are given ids "exclaim1" and "exclaim2". 
+
+The object containing the processing logic implements the [IRichSpout](javadocs/backtype/storm/topology/IRichSpout.html) interface for spouts and the [IRichBolt](javadocs/backtype/storm/topology/IRichBolt.html) interface for bolts. 
+
+The last parameter, how much parallelism you want for the node, is optional. It indicates how many threads should execute that component across the cluster. If you omit it, Storm will only allocate one thread for that node.
+
+`setBolt` returns an [InputDeclarer](javadocs/backtype/storm/topology/InputDeclarer.html) object that is used to define the inputs to the Bolt. Here, component "exclaim1" declares that it wants to read all the tuples emitted by component "words" using a shuffle grouping, and component "exclaim2" declares that it wants to read all the tuples emitted by component "exclaim1" using a shuffle grouping. "shuffle grouping" means that tuples should be randomly distributed from the input tasks to the bolt's tasks. There are many ways to group data between components. These will be explained in a few sections.
+
+If you wanted component "exclaim2" to read all the tuples emitted by both component "words" and component "exclaim1", you would write component "exclaim2"'s definition like this:
+
+```java
+builder.setBolt("exclaim2", new ExclamationBolt(), 5)
+            .shuffleGrouping("words")
+            .shuffleGrouping("exclaim1");
+```
+
+As you can see, input declarations can be chained to specify multiple sources for the Bolt.
+
+Let's dig into the implementations of the spouts and bolts in this topology. Spouts are responsible for emitting new messages into the topology. `TestWordSpout` in this topology emits a random word from the list ["nathan", "mike", "jackson", "golda", "bertels"] as a 1-tuple every 100ms. The implementation of `nextTuple()` in TestWordSpout looks like this:
+
+```java
+public void nextTuple() {
+    Utils.sleep(100);
+    final String[] words = new String[] {"nathan", "mike", "jackson", "golda", "bertels"};
+    final Random rand = new Random();
+    final String word = words[rand.nextInt(words.length)];
+    _collector.emit(new Values(word));
+}
+```
+
+As you can see, the implementation is very straightforward.
+
+`ExclamationBolt` appends the string "!!!" to its input. Let's take a look at the full implementation for `ExclamationBolt`:
+
+```java
+public static class ExclamationBolt implements IRichBolt {
+    OutputCollector _collector;
+
+    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+        _collector = collector;
+    }
+
+    public void execute(Tuple tuple) {
+        _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
+        _collector.ack(tuple);
+    }
+
+    public void cleanup() {
+    }
+
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("word"));
+    }
+    
+    public Map getComponentConfiguration() {
+        return null;
+    }
+}
+```
+
+The `prepare` method provides the bolt with an `OutputCollector` that is used for emitting tuples from this bolt. Tuples can be emitted at anytime from the bolt -- in the `prepare`, `execute`, or `cleanup` methods, or even asynchronously in another thread. This `prepare` implementation simply saves the `OutputCollector` as an instance variable to be used later on in the `execute` method.
+
+The `execute` method receives a tuple from one of the bolt's inputs. The `ExclamationBolt` grabs the first field from the tuple and emits a new tuple with the string "!!!" appended to it. If you implement a bolt that subscribes to multiple input sources, you can find out which component the [Tuple](javadocs/backtype/storm/tuple/Tuple.html) came from by using the `Tuple#getSourceComponent` method.
+
+There's a few other things going in in the `execute` method, namely that the input tuple is passed as the first argument to `emit` and the input tuple is acked on the final line. These are part of Storm's reliability API for guaranteeing no data loss and will be explained later in this tutorial. 
+
+The `cleanup` method is called when a Bolt is being shutdown and should cleanup any resources that were opened. There's no guarantee that this method will be called on the cluster: for example, if the machine the task is running on blows up, there's no way to invoke the method. The `cleanup` method is intended for when you run topologies in [local mode](Local-mode.html) (where a Storm cluster is simulated in process), and you want to be able to run and kill many topologies without suffering any resource leaks.
+
+The `declareOutputFields` method declares that the `ExclamationBolt` emits 1-tuples with one field called "word".
+
+The `getComponentConfiguration` method allows you to configure various aspects of how this component runs. This is a more advanced topic that is explained further on [Configuration](Configuration.html).
+
+Methods like `cleanup` and `getComponentConfiguration` are often not needed in a bolt implementation. You can define bolts more succinctly by using a base class that provides default implementations where appropriate. `ExclamationBolt` can be written more succinctly by extending `BaseRichBolt`, like so:
+
+```java
+public static class ExclamationBolt extends BaseRichBolt {
+    OutputCollector _collector;
+
+    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+        _collector = collector;
+    }
+
+    public void execute(Tuple tuple) {
+        _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
+        _collector.ack(tuple);
+    }
+
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("word"));
+    }    
+}
+```
+
+## Running ExclamationTopology in local mode
+
+Let's see how to run the `ExclamationTopology` in local mode and see that it's working.
+
+Storm has two modes of operation: local mode and distributed mode. In local mode, Storm executes completely in process by simulating worker nodes with threads. Local mode is useful for testing and development of topologies. When you run the topologies in storm-starter, they'll run in local mode and you'll be able to see what messages each component is emitting. You can read more about running topologies in local mode on [Local mode](Local-mode.html).
+
+In distributed mode, Storm operates as a cluster of machines. When you submit a topology to the master, you also submit all the code necessary to run the topology. The master will take care of distributing your code and allocating workers to run your topology. If workers go down, the master will reassign them somewhere else. You can read more about running topologies on a cluster on [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html)]. 
+
+Here's the code that runs `ExclamationTopology` in local mode:
+
+```java
+Config conf = new Config();
+conf.setDebug(true);
+conf.setNumWorkers(2);
+
+LocalCluster cluster = new LocalCluster();
+cluster.submitTopology("test", conf, builder.createTopology());
+Utils.sleep(10000);
+cluster.killTopology("test");
+cluster.shutdown();
+```
+
+First, the code defines an in-process cluster by creating a `LocalCluster` object. Submitting topologies to this virtual cluster is identical to submitting topologies to distributed clusters. It submits a topology to the `LocalCluster` by calling `submitTopology`, which takes as arguments a name for the running topology, a configuration for the topology, and then the topology itself.
+
+The name is used to identify the topology so that you can kill it later on. A topology will run indefinitely until you kill it.
+
+The configuration is used to tune various aspects of the running topology. The two configurations specified here are very common:
+
+1. **TOPOLOGY_WORKERS** (set with `setNumWorkers`) specifies how many _processes_ you want allocated around the cluster to execute the topology. Each component in the topology will execute as many _threads_. The number of threads allocated to a given component is configured through the `setBolt` and `setSpout` methods. Those _threads_ exist within worker _processes_. Each worker _process_ contains within it some number of _threads_ for some number of components. For instance, you may have 300 threads specified across all your components and 50 worker processes specified in your config. Each worker process will execute 6 threads, each of which of could belong to a different component. You tune the performance of Storm topologies by tweaking the parallelism for each component and the number of worker processes those threads should run within.
+2. **TOPOLOGY_DEBUG** (set with `setDebug`), when set to true, tells Storm to log every message every emitted by a component. This is useful in local mode when testing topologies, but you probably want to keep this turned off when running topologies on the cluster.
+
+There's many other configurations you can set for the topology. The various configurations are detailed on [the Javadoc for Config](javadocs/backtype/storm/Config.html).
+
+To learn about how to set up your development environment so that you can run topologies in local mode (such as in Eclipse), see [Creating a new Storm project](Creating-a-new-Storm-project.html).
+
+## Stream groupings
+
+A stream grouping tells a topology how to send tuples between two components. Remember, spouts and bolts execute in parallel as many tasks across the cluster. If you look at how a topology is executing at the task level, it looks something like this:
+
+![Tasks in a topology](images/topology-tasks.png)
+
+When a task for Bolt A emits a tuple to Bolt B, which task should it send the tuple to?
+
+A "stream grouping" answers this question by telling Storm how to send tuples between sets of tasks. Before we dig into the different kinds of stream groupings, let's take a look at another topology from [storm-starter](http://github.com/nathanmarz/storm-starter). This [WordCountTopology](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/WordCountTopology.java) reads sentences off of a spout and streams out of `WordCountBolt` the total number of times it has seen that word before:
+
+```java
+TopologyBuilder builder = new TopologyBuilder();
+        
+builder.setSpout("sentences", new RandomSentenceSpout(), 5);        
+builder.setBolt("split", new SplitSentence(), 8)
+        .shuffleGrouping("sentences");
+builder.setBolt("count", new WordCount(), 12)
+        .fieldsGrouping("split", new Fields("word"));
+```
+
+`SplitSentence` emits a tuple for each word in each sentence it receives, and `WordCount` keeps a map in memory from word to count. Each time `WordCount` receives a word, it updates its state and emits the new word count.
+
+There's a few different kinds of stream groupings.
+
+The simplest kind of grouping is called a "shuffle grouping" which sends the tuple to a random task. A shuffle grouping is used in the `WordCountTopology` to send tuples from `RandomSentenceSpout` to the `SplitSentence` bolt. It has the effect of evenly distributing the work of processing the tuples across all of `SplitSentence` bolt's tasks.
+
+A more interesting kind of grouping is the "fields grouping". A fields grouping is used between the `SplitSentence` bolt and the `WordCount` bolt. It is critical for the functioning of the `WordCount` bolt that the same word always go to the same task. Otherwise, more than one task will see the same word, and they'll each emit incorrect values for the count since each has incomplete information. A fields grouping lets you group a stream by a subset of its fields. This causes equal values for that subset of fields to go to the same task. Since `WordCount` subscribes to `SplitSentence`'s output stream using a fields grouping on the "word" field, the same word always goes to the same task and the bolt produces the correct output.
+
+Fields groupings are the basis of implementing streaming joins and streaming aggregations as well as a plethora of other use cases. Underneath the hood, fields groupings are implemented using mod hashing.
+
+There's a few other kinds of stream groupings. You can read more about them on [Concepts](Concepts.html). 
+
+## Defining Bolts in other languages
+
+Bolts can be defined in any language. Bolts written in another language are executed as subprocesses, and Storm communicates with those subprocesses with JSON messages over stdin/stdout. The communication protocol just requires an ~100 line adapter library, and Storm ships with adapter libraries for Ruby, Python, and Fancy. 
+
+Here's the definition of the `SplitSentence` bolt from `WordCountTopology`:
+
+```java
+public static class SplitSentence extends ShellBolt implements IRichBolt {
+    public SplitSentence() {
+        super("python", "splitsentence.py");
+    }
+
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("word"));
+    }
+}
+```
+
+`SplitSentence` overrides `ShellBolt` and declares it as running using `python` with the arguments `splitsentence.py`. Here's the implementation of `splitsentence.py`:
+
+```python
+import storm
+
+class SplitSentenceBolt(storm.BasicBolt):
+    def process(self, tup):
+        words = tup.values[0].split(" ")
+        for word in words:
+          storm.emit([word])
+
+SplitSentenceBolt().run()
+```
+
+For more information on writing spouts and bolts in other languages, and to learn about how to create topologies in other languages (and avoid the JVM completely), see [Using non-JVM languages with Storm](Using-non-JVM-languages-with-Storm.html).
+
+## Guaranteeing message processing
+
+Earlier on in this tutorial, we skipped over a few aspects of how tuples are emitted. Those aspects were part of Storm's reliability API: how Storm guarantees that every message coming off a spout will be fully processed. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for information on how this works and what you have to do as a user to take advantage of Storm's reliability capabilities.
+
+## Transactional topologies
+
+Storm guarantees that every message will be played through the topology at least once. A common question asked is "how do you do things like counting on top of Storm? Won't you overcount?" Storm has a feature called transactional topologies that let you achieve exactly-once messaging semantics for most computations. Read more about transactional topologies [here](Transactional-topologies.html). 
+
+## Distributed RPC
+
+This tutorial showed how to do basic stream processing on top of Storm. There's lots more things you can do with Storm's primitives. One of the most interesting applications of Storm is Distributed RPC, where you parallelize the computation of intense functions on the fly. Read more about Distributed RPC [here](Distributed-RPC.html). 
+
+## Conclusion
+
+This tutorial gave a broad overview of developing, testing, and deploying Storm topologies. The rest of the documentation dives deeper into all the aspects of using Storm.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Understanding-the-parallelism-of-a-Storm-topology.md
----------------------------------------------------------------------
diff --git a/docs/Understanding-the-parallelism-of-a-Storm-topology.md b/docs/Understanding-the-parallelism-of-a-Storm-topology.md
new file mode 100644
index 0000000..adc4c41
--- /dev/null
+++ b/docs/Understanding-the-parallelism-of-a-Storm-topology.md
@@ -0,0 +1,121 @@
+---
+layout: documentation
+---
+# What makes a running topology: worker processes, executors and tasks
+
+Storm distinguishes between the following three main entities that are used to actually run a topology in a Storm cluster:
+
+1. Worker processes
+2. Executors (threads)
+3. Tasks
+
+Here is a simple illustration of their relationships:
+
+![The relationships of worker processes, executors (threads) and tasks in Storm](images/relationships-worker-processes-executors-tasks.png)
+
+A _worker process_ executes a subset of a topology. A worker process belongs to a specific topology and may run one or more executors for one or more components (spouts or bolts) of this topology. A running topology consists of many such processes running on many machines within a Storm cluster.
+
+An _executor_ is a thread that is spawned by a worker process. It may run one or more tasks for the same component (spout or bolt).
+
+A _task_ performs the actual data processing — each spout or bolt that you implement in your code executes as many tasks across the cluster. The number of tasks for a component is always the same throughout the lifetime of a topology, but the number of executors (threads) for a component can change over time. This means that the following condition holds true: ``#threads ≤ #tasks``. By default, the number of tasks is set to be the same as the number of executors, i.e. Storm will run one task per thread.
+
+# Configuring the parallelism of a topology
+
+Note that in Storm’s terminology "parallelism" is specifically used to describe the so-called _parallelism hint_, which means the initial number of executor (threads) of a component. In this document though we use the term "parallelism" in a more general sense to describe how you can configure not only the number of executors but also the number of worker processes and the number of tasks of a Storm topology. We will specifically call out when "parallelism" is used in the normal, narrow definition of Storm.
+
+The following sections give an overview of the various configuration options and how to set them in your code. There is more than one way of setting these options though, and the table lists only some of them. Storm currently has the following [order of precedence for configuration settings](Configuration.html): ``defaults.yaml`` < ``storm.yaml`` < topology-specific configuration < internal component-specific configuration < external component-specific configuration.
+
+## Number of worker processes
+
+* Description: How many worker processes to create _for the topology_ across machines in the cluster.
+* Configuration option: [TOPOLOGY_WORKERS](javadocs/backtype/storm/Config.html#TOPOLOGY_WORKERS)
+* How to set in your code (examples):
+    * [Config#setNumWorkers](javadocs/backtype/storm/Config.html)
+
+## Number of executors (threads)
+
+* Description: How many executors to spawn _per component_.
+* Configuration option: ?
+* How to set in your code (examples):
+    * [TopologyBuilder#setSpout()](javadocs/backtype/storm/topology/TopologyBuilder.html)
+    * [TopologyBuilder#setBolt()](javadocs/backtype/storm/topology/TopologyBuilder.html)
+    * Note that as of Storm 0.8 the ``parallelism_hint`` parameter now specifies the initial number of executors (not tasks!) for that bolt.
+
+## Number of tasks
+
+* Description: How many tasks to create _per component_.
+* Configuration option: [TOPOLOGY_TASKS](javadocs/backtype/storm/Config.html#TOPOLOGY_TASKS)
+* How to set in your code (examples):
+    * [ComponentConfigurationDeclarer#setNumTasks()](javadocs/backtype/storm/topology/ComponentConfigurationDeclarer.html)
+
+
+Here is an example code snippet to show these settings in practice:
+
+```java
+topologyBuilder.setBolt("green-bolt", new GreenBolt(), 2)
+               .setNumTasks(4)
+               .shuffleGrouping("blue-spout);
+```
+
+In the above code we configured Storm to run the bolt ``GreenBolt`` with an initial number of two executors and four associated tasks. Storm will run two tasks per executor (thread). If you do not explicitly configure the number of tasks, Storm will run by default one task per executor.
+
+# Example of a running topology
+
+The following illustration shows how a simple topology would look like in operation. The topology consists of three components: one spout called ``BlueSpout`` and two bolts called ``GreenBolt`` and ``YellowBolt``. The components are linked such that ``BlueSpout`` sends its output to ``GreenBolt``, which in turns sends its own output to ``YellowBolt``.
+
+![Example of a running topology in Storm](images/example-of-a-running-topology.png)
+
+The ``GreenBolt`` was configured as per the code snippet above whereas ``BlueSpout`` and ``YellowBolt`` only set the parallelism hint (number of executors). Here is the relevant code:
+
+```java
+Config conf = new Config();
+conf.setNumWorkers(2); // use two worker processes
+
+topologyBuilder.setSpout("blue-spout", new BlueSpout(), 2); // set parallelism hint to 2
+
+topologyBuilder.setBolt("green-bolt", new GreenBolt(), 2)
+               .setNumTasks(4)
+               .shuffleGrouping("blue-spout");
+
+topologyBuilder.setBolt("yellow-bolt", new YellowBolt(), 6)
+               .shuffleGrouping("green-bolt");
+
+StormSubmitter.submitTopology(
+        "mytopology",
+        conf,
+        topologyBuilder.createTopology()
+    );
+```
+
+And of course Storm comes with additional configuration settings to control the parallelism of a topology, including:
+
+* [TOPOLOGY_MAX_TASK_PARALLELISM](javadocs/backtype/storm/Config.html#TOPOLOGY_MAX_TASK_PARALLELISM): This setting puts a ceiling on the number of executors that can be spawned for a single component. It is typically used during testing to limit the number of threads spawned when running a topology in local mode. You can set this option via e.g. [Config#setMaxTaskParallelism()](javadocs/backtype/storm/Config.html).
+
+# How to change the parallelism of a running topology
+
+A nifty feature of Storm is that you can increase or decrease the number of worker processes and/or executors without being required to restart the cluster or the topology. The act of doing so is called rebalancing.
+
+You have two options to rebalance a topology:
+
+1. Use the Storm web UI to rebalance the topology.
+2. Use the CLI tool storm rebalance as described below.
+
+Here is an example of using the CLI tool:
+
+```
+# Reconfigure the topology "mytopology" to use 5 worker processes,
+# the spout "blue-spout" to use 3 executors and
+# the bolt "yellow-bolt" to use 10 executors.
+
+$ storm rebalance mytopology -n 5 -e blue-spout=3 -e yellow-bolt=10
+```
+
+# References for this article
+
+* [Concepts](Concepts.html)
+* [Configuration](Configuration.html)
+* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html)]
+* [Local mode](Local-mode.html)
+* [Tutorial](Tutorial.html)
+* [Storm API documentation](javadocs/), most notably the class ``Config``
+

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Using-non-JVM-languages-with-Storm.md
----------------------------------------------------------------------
diff --git a/docs/Using-non-JVM-languages-with-Storm.md b/docs/Using-non-JVM-languages-with-Storm.md
new file mode 100644
index 0000000..7b2a2f2
--- /dev/null
+++ b/docs/Using-non-JVM-languages-with-Storm.md
@@ -0,0 +1,52 @@
+---
+layout: documentation
+---
+- two pieces: creating topologies and implementing spouts and bolts in other languages
+- creating topologies in another language is easy since topologies are just thrift structures (link to storm.thrift)
+- implementing spouts and bolts in another language is called a "multilang components" or "shelling"
+   - Here's a specification of the protocol: [Multilang protocol](Multilang-protocol.html)
+   - the thrift structure lets you define multilang components explicitly as a program and a script (e.g., python and the file implementing your bolt)
+   - In Java, you override ShellBolt or ShellSpout to create multilang components
+       - note that output fields declarations happens in the thrift structure, so in Java you create multilang components like the following:
+            - declare fields in java, processing code in the other language by specifying it in constructor of shellbolt
+   - multilang uses json messages over stdin/stdout to communicate with the subprocess
+   - storm comes with ruby, python, and fancy adapters that implement the protocol. show an example of python
+      - python supports emitting, anchoring, acking, and logging
+- "storm shell" command makes constructing jar and uploading to nimbus easy
+  - makes jar and uploads it
+  - calls your program with host/port of nimbus and the jarfile id
+
+## Notes on implementing a DSL in a non-JVM language
+
+The right place to start is src/storm.thrift. Since Storm topologies are just Thrift structures, and Nimbus is a Thrift daemon, you can create and submit topologies in any language.
+
+When you create the Thrift structs for spouts and bolts, the code for the spout or bolt is specified in the ComponentObject struct:
+
+```
+union ComponentObject {
+  1: binary serialized_java;
+  2: ShellComponent shell;
+  3: JavaObject java_object;
+}
+```
+
+For a non-JVM DSL, you would want to make use of "2" and "3". ShellComponent lets you specify a script to run that component (e.g., your python code). And JavaObject lets you specify native java spouts and bolts for the component (and Storm will use reflection to create that spout or bolt).
+
+There's a "storm shell" command that will help with submitting a topology. Its usage is like this:
+
+```
+storm shell resources/ python topology.py arg1 arg2
+```
+
+storm shell will then package resources/ into a jar, upload the jar to Nimbus, and call your topology.py script like this:
+
+```
+python topology.py arg1 arg2 {nimbus-host} {nimbus-port} {uploaded-jar-location}
+```
+
+Then you can connect to Nimbus using the Thrift API and submit the topology, passing {uploaded-jar-location} into the submitTopology method. For reference, here's the submitTopology definition:
+
+```
+void submitTopology(1: string name, 2: string uploadedJarLocation, 3: string jsonConf, 4: StormTopology topology)
+    throws (1: AlreadyAliveException e, 2: InvalidTopologyException ite);
+```

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_config.yml
----------------------------------------------------------------------
diff --git a/docs/_config.yml b/docs/_config.yml
index 14aaa5d..b05bcef 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -8,4 +8,11 @@ github_username:  apache/storm
 # Build settings
 markdown: redcarpet
 redcarpet:
-  extensions: ["no_intra_emphasis", "fenced_code_blocks", "autolink", "tables", "with_toc_data"]
\ No newline at end of file
+  extensions: ["no_intra_emphasis", "fenced_code_blocks", "autolink", "tables", "with_toc_data"]
+
+keep_files:   [".git", ".svn"]
+encoding:     "utf-8"
+exclude: 
+  - READEME.md
+
+storm_release_only: true

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_includes/footer.html
----------------------------------------------------------------------
diff --git a/docs/_includes/footer.html b/docs/_includes/footer.html
index bf766f4..1696720 100644
--- a/docs/_includes/footer.html
+++ b/docs/_includes/footer.html
@@ -1,16 +1,55 @@
-<hr/>
-<div id="footer" class="container text-center">
-	
-            <p class="text-muted credit"><p>
-Copyright © 2014 <a href="http://www.apache.org">Apache Software Foundation</a>. All Rights Reserved. Apache Storm, Apache, the Apache feather logo, and the Apache Storm project logos are trademarks of The Apache Software Foundation. All other marks mentioned may be trademarks or registered trademarks of their respective owners.</p>
-
-</div>
-
-    <!-- Bootstrap core JavaScript
-    ================================================== -->
-    <!-- Placed at the end of the document so the pages load faster -->
-    <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
-    <script src="/assets/js/bootstrap.min.js"></script>
-    <script src="/assets/js/docs.min.js"></script>
-    <!-- IE10 viewport hack for Surface/desktop Windows 8 bug -->
-    <script src="/assets/js/ie10-viewport-bug-workaround.js"></script>
\ No newline at end of file
+<footer>
+    <div class="container-fluid">
+        <div class="row">
+            <div class="col-md-3">
+                <div class="footer-widget">
+                    <h5>Meetups</h5>
+                    <ul class="latest-news">
+                        {% for meetup in site.data.meetups %}
+                        <li><a href="{{meetup.url}}">{{meetup.title}}</a> <span class="small">({{meetup.location}})</span></li>
+                        {% endfor %}
+                        <!-- <li><a href="http://www.meetup.com/Apache-Storm-Kafka-Users/">Seatle, WA</a> <span class="small">(27 Jun 2015)</span></li> -->
+                    </ul>
+                </div>
+            </div>
+            <div class="col-md-3">
+                <div class="footer-widget">
+                    <h5>About Storm</h5>
+                    <p>Storm integrates with any queueing system and any database system. Storm's spout abstraction makes it easy to integrate a new queuing system. Likewise, integrating Storm with database systems is easy.</p>
+               </div>
+            </div>
+            <div class="col-md-3">
+                <div class="footer-widget">
+                    <h5>First Look</h5>
+                    <ul class="footer-list">
+                        <li><a href="/releases/current/Rationale.html">Rationale</a></li>
+                        <li><a href="/releases/current/tutorial.html">Tutorial</a></li>
+                        <li><a href="/releases/current/Setting-up-development-environment.html">Setting up development environment</a></li>
+                        <li><a href="/releases/current/Creating-a-new-Storm-project.html">Creating a new Storm project</a></li>
+                    </ul>
+                </div>
+            </div>
+            <div class="col-md-3">
+                <div class="footer-widget">
+                    <h5>Documentation</h5>
+                    <ul class="footer-list">
+                        <li><a href="/releases/current/index.html">Index</a></li>
+                        <li><a href="/releases/current/javadocs/index.html">Javadoc</a></li>
+                        <li><a href="/releases/current/FAQ.html">FAQ</a></li>
+                    </ul>
+                </div>
+            </div>
+        </div>
+        <hr/>
+        <div class="row">   
+            <div class="col-md-12">
+                <p align="center">Copyright © 2015 <a href="http://www.apache.org">Apache Software Foundation</a>. All Rights Reserved. 
+                    <br>Apache Storm, Apache, the Apache feather logo, and the Apache Storm project logos are trademarks of The Apache Software Foundation. 
+                    <br>All other marks mentioned may be trademarks or registered trademarks of their respective owners.</p>
+            </div>
+        </div>
+    </div>
+</footer>
+<!--Footer End-->
+<!-- Scroll to top -->
+<span class="totop"><a href="#"><i class="fa fa-angle-up"></i></a></span> 

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_includes/head.html
----------------------------------------------------------------------
diff --git a/docs/_includes/head.html b/docs/_includes/head.html
index 781492c..8f51c94 100644
--- a/docs/_includes/head.html
+++ b/docs/_includes/head.html
@@ -2,9 +2,9 @@
     <meta charset="utf-8">
     <meta http-equiv="X-UA-Compatible" content="IE=edge">
     <meta name="viewport" content="width=device-width, initial-scale=1">
-    <meta name="description" content="">
-    <meta name="author" content="">
-    <link rel="icon" href="/assets/favicon.ico">
+
+    <link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
+    <link rel="icon" href="/favicon.ico" type="image/x-icon">
 
     <title>{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}</title>
 
@@ -14,14 +14,17 @@
     <link href="/assets/css/bootstrap-theme.min.css" rel="stylesheet">
 
     <!-- Custom styles for this template -->
-    <link href="/assets/css/theme.css" rel="stylesheet">
-	
-	<link href="/css/main.css" rel="stylesheet">
-
+    <link rel="stylesheet" href="http://fortawesome.github.io/Font-Awesome/assets/font-awesome/css/font-awesome.css">
+    <link href="/css/style.css" rel="stylesheet">
+    <link href="/assets/css/owl.theme.css" rel="stylesheet">
+    <link href="/assets/css/owl.carousel.css" rel="stylesheet">
+    <script type="text/javascript" src="/assets/js/jquery.min.js"></script>
+    <script type="text/javascript" src="/assets/js/bootstrap.min.js"></script>
+    <script type="text/javascript" src="/assets/js/owl.carousel.min.js"></script>
+    <script type="text/javascript" src="/assets/js/storm.js"></script>
     <!-- Just for debugging purposes. Don't actually copy these 2 lines! -->
     <!--[if lt IE 9]><script src="../../assets/js/ie8-responsive-file-warning.js"></script><![endif]-->
-    <script src="/assets/js/ie-emulation-modes-warning.js"></script>
-
+    
     <!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
     <!--[if lt IE 9]>
       <script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_includes/header.html
----------------------------------------------------------------------
diff --git a/docs/_includes/header.html b/docs/_includes/header.html
index 78b0ff1..b9a2b03 100644
--- a/docs/_includes/header.html
+++ b/docs/_includes/header.html
@@ -1,32 +1,59 @@
-    <!-- Fixed navbar -->
-    <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
-      <div class="container">
-        <div class="navbar-header">
-          <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
-            <span class="sr-only">Toggle navigation</span>
-            <span class="icon-bar"></span>
-            <span class="icon-bar"></span>
-            <span class="icon-bar"></span>
-          </button>
-          <a class="navbar-brand" href="/">Apache Storm<!-- <img alt="Apache Storm" src="/images/storm_logo_tagline_color.png"></a> -->
+<header>
+  <div class="container-fluid">
+     <div class="row">
+          <div class="col-md-5">
+            <a href="/index.html"><img src="/images/logo.png" class="logo" /></a>
+          </div>
+          <div class="col-md-5">
+            {% if page.version %}
+              <h1>Version: {{page.version}}</h1>
+            {% endif %}
+          </div>
+          <div class="col-md-2">
+            <a href="/releases.html" class="btn-std btn-block btn-download">Download</a>
+          </div>
         </div>
-        <div id="navbar" class="navbar-collapse collapse">
+    </div>
+</header>
+<!--Header End-->
+<!--Navigation Begin-->
+<div class="navbar" role="banner">
+  <div class="container-fluid">
+      <div class="navbar-header">
+          <button class="navbar-toggle" type="button" data-toggle="collapse" data-target=".bs-navbar-collapse">
+                <span class="icon-bar"></span>
+                <span class="icon-bar"></span>
+                <span class="icon-bar"></span>
+            </button>
+        </div>
+        <nav class="collapse navbar-collapse bs-navbar-collapse" role="navigation">
           <ul class="nav navbar-nav">
-            <li><a href="/">Home</a></li>
-            <li class="dropdown">
-              <a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-expanded="false">About<span class="caret"></span></a>
-              <ul class="dropdown-menu" role="menu">
-                <li><a href="/about/integrates.html">About</a></li>
-                <li><a href="/documentation/Contributing-to-Storm.html">Contributing</a></li>
-                <li><a href="/documentation/BYLAWS.html">Bylaws</a></li>
-              </ul>
-            </li>
-            <li><a href="/downloads.html">Download</a></li>
-            <li><a href="/documentation/Home.html">Documentation</a></li>
-            <li><a href="/news.html">News</a></li>
-          </ul>
-        </div><!--/.nav-collapse -->
-      </div>
-    </nav>
+              <li><a href="/index.html" id="home">Home</a></li>
+                <li><a href="/getting-help.html" id="getting-help">Getting Help</a></li>
+                <li><a href="/about/integrates.html" id="project-info">Project Information</a></li>
+                <li class="dropdown">
+                    <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="documentation">Documentation <b class="caret"></b></a>
+                    <ul class="dropdown-menu">
+                      {% for r in site.data.releases %}
+                        {% if r.documented %}
+                          <li><a href="/releases/{{r.name}}/index.html">{{r.name}}</a></li>
+                        {% endif %}
+                      {% endfor %}
+                    </ul>
+                </li>
+                <li><a href="/talksAndVideos.html">Talks and Slideshows</a></li>
+                <li class="dropdown">
+                    <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="contribute">Community <b class="caret"></b></a>
+                    <ul class="dropdown-menu">
+                        <li><a href="/contribute/Contributing-to-Storm.html">Contributing</a></li>
+                        <li><a href="/contribute/People.html">People</a></li>
+                        <li><a href="/contribute/BYLAWS.html">ByLaws</a></li>
+                    </ul>
+                </li>
+                <li><a href="{{ site.posts[0].url }}" id="news">News</a></li>
+            </ul>
+        </nav>
+    </div>
+</div>
 
 

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_layouts/about.html
----------------------------------------------------------------------
diff --git a/docs/_layouts/about.html b/docs/_layouts/about.html
index 28b958b..7ca3e79 100644
--- a/docs/_layouts/about.html
+++ b/docs/_layouts/about.html
@@ -1,11 +1,8 @@
 ---
 layout: default
-title: About Storm
+title: Project Information
 items:
     - 
-      - "/about/integrates.html"
-      - "Integrates"
-    - 
       - "/about/simple-api.html"
       - "Simple API"
     - 
@@ -27,14 +24,20 @@ items:
       - "/about/free-and-open-source.html"
       - "Free and open source"
 ---
+<div class="download-block">
+    <div class="row">
+          <div class="col-md-3">
+          <ul class="news">
+          {% for post in page.items %}
+                      <li>
+                      <a href="{{ post[0] }}">{{ post[1] }}</a>
+                      </li>
+          {% endfor %}
+          </ul>
+          </div>
+          <div class="col-md-9">
+                    {{ content }}
 
-<div id="sidebar">
-  <ul>
-  {% for post in page.items %}
-<li><a class="{% if page.url == post[0] then %}current{% else %}{% endif %}" href="{{ post[0] }}">{{ post[1] }}</a></li>
-  {% endfor %}
-  </ul>
-</div>
-<div id="aboutcontent">
-{{ content }}
-</div>
+        </div>
+    </div>
+</div>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_layouts/default.html
----------------------------------------------------------------------
diff --git a/docs/_layouts/default.html b/docs/_layouts/default.html
index de13299..80b404e 100644
--- a/docs/_layouts/default.html
+++ b/docs/_layouts/default.html
@@ -1,19 +1,18 @@
 <!DOCTYPE html>
 <html>
-
   {% include head.html %}
-
   <body>
-    <div class="container">
     {% include header.html %}
-
-      
-        {{ content }}
-      
-
-    {% include footer.html %}
-    </div>
-	
-  </body>
+    <div class="container-fluid">
+    <h1 class="page-title">{{ page.title }}</h1>
+          <div class="row">
+           	<div class="col-md-12">
+	             {{ content }}
+	          </div>
+	       </div>
+	  </div>
+{% include footer.html %}
+</body>
 
 </html>
+

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_layouts/documentation.html
----------------------------------------------------------------------
diff --git a/docs/_layouts/documentation.html b/docs/_layouts/documentation.html
index 6d175a3..81cc09f 100644
--- a/docs/_layouts/documentation.html
+++ b/docs/_layouts/documentation.html
@@ -2,15 +2,8 @@
 layout: default
 ---
 <!-- Documentation -->
-<!-- <div class="container"> -->
 
-  <header class="post-header">
-    <h1 class="post-title">{{ page.title }}</h1>
-    <p class="post-meta">{{ page.date | date: "%b %-d, %Y" }}{% if page.author %} by {{ page.author }}{% endif %}{% if page.meta %} • {{ page.meta }}{% endif %}</p>
-  </header>
+<p class="post-meta">{{ page.date | date: "%b %-d, %Y" }}{% if page.author %} by {{ page.author }}{% endif %}{% if page.meta %} • {{ page.meta }}{% endif %}</p>
 
-  <article class="post-content">
-    {{ content }}
-  </article>
+{{ content }}
 
-<!-- </div> -->

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_layouts/page.html
----------------------------------------------------------------------
diff --git a/docs/_layouts/page.html b/docs/_layouts/page.html
index 74c1a11..e230861 100644
--- a/docs/_layouts/page.html
+++ b/docs/_layouts/page.html
@@ -1,14 +1,5 @@
 ---
 layout: default
 ---
-<div class="post">
-
-  <header class="post-header">
-    <h1 class="post-title">{{ page.title }}</h1>
-  </header>
-
-  <article class="post-content">
     {{ content }}
-  </article>
 
-</div>

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_layouts/post.html
----------------------------------------------------------------------
diff --git a/docs/_layouts/post.html b/docs/_layouts/post.html
index 932ce28..5080868 100644
--- a/docs/_layouts/post.html
+++ b/docs/_layouts/post.html
@@ -1,15 +1,61 @@
----
-layout: default
----
-<div class="container">
+<!DOCTYPE html>
+<html>
 
-  <header class="post-header">
-    <h1 class="post-title">{{ page.title }}</h1>
-    <p class="text-muted credit">Posted on {{ page.date | date: "%b %-d, %Y" }}{% if page.author %} by {{ page.author }}{% endif %}{% if page.meta %} • {{ page.meta }}{% endif %}</p>
-  </header>
+  {% include head.html %}
 
-  <article class="post-content">
-    {{ content }}
-  </article>
+    <body>
+
+    {% include header.html %}
+    <div class="container-fluid">
+        <div class="row">
+            <div class="col-md-12">
+                <div class="row">
+                    <div class="col-md-3">
+                        <ul class="news" id="news-list">
+                            {% for post in site.posts %}
+                      		<li><a href="{{ post.url }}">{{ post.title }}</a></li>
+                    		{% endfor %}
+                        </ul>
+                    </div>
+                    <div class="col-md-9" id="news-content">
+                            <h1 class="page-title">
+                               {{ page.title }}
+                            </h1>
+                                
+                            <div class="row" style="margin: -15px;">
+                                <div class="col-md-12">
+                                    <p class="text-muted credit pull-left">Posted on {{ page.date | date: "%b %-d, %Y" }}{% if page.author %} by {{ page.author }}{% endif %}{% if page.meta %} • {{ page.meta }}{% endif %}</p>
+                                    <div class="pull-right">
+                                        <a 
+                                                href="https://twitter.com/share" 
+                                                class="twitter-share-button"
+                                                data-count=none
+                                        >Tweet</a>
+                                        <script> !function(d,s,id){
+                                                var js,
+                                                fjs=d.getElementsByTagName(s)[0],
+                                                p=/^http:/.test(d.location)?'http':'https';
+                                                if(!d.getElementById(id)){
+                                                    js=d.createElement(s);
+                                                    js.id=id;
+                                                    js.src=p+'://platform.twitter.com/widgets.js';
+                                                    fjs.parentNode.insertBefore(js,fjs);
+                                                }
+                                            }(document, 'script', 'twitter-wjs');
+                                        </script>
+                                    </div>
+                                </div>
+                            </div>
+                        <div>
+                	        {{ content }}
+                	    </div>
+                    </div>
+                </div>
+            </div>
+        </div>
+    </div>
+    {% include footer.html %}
+    </body>
+
+</html>
 
-</div>

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/_plugins/releases.rb
----------------------------------------------------------------------
diff --git a/docs/_plugins/releases.rb b/docs/_plugins/releases.rb
new file mode 100644
index 0000000..f28ccd2
--- /dev/null
+++ b/docs/_plugins/releases.rb
@@ -0,0 +1,84 @@
+module Releases
+  class Generator < Jekyll::Generator
+    def dir_to_releasename(dir)
+      ret = nil
+      splitdir = dir.split("/").select{ |a| a != ""};
+      if (splitdir[0] == 'releases')
+        ret = splitdir[1]
+        if (ret == 'current')
+          ret = File.readlink(splitdir.join("/")).split("/")[-1]
+        end
+      end
+      return ret
+    end
+
+    def set_if_unset(hash, key, value)
+      hash[key] = hash[key] || value;
+    end
+
+    def parse_version(version_string)
+      return version_string.split('.').map{|e| e.to_i}
+    end
+
+    def release_from_pom()
+      text= `mvn -f ../pom.xml help:evaluate -Dexpression=project.version`
+      return text.split("\n").select{|a| !a.start_with?('[')}[0]
+    end
+
+    def branch_from_git()
+      return `git rev-parse --abbrev-ref HEAD`
+    end
+
+    def generate(site)
+      if site.config['storm_release_only']
+        release_name = release_from_pom()
+        puts "release: #{release_name}"
+        git_branch = branch_from_git()
+        puts "branch: #{git_branch}"
+        for page in site.pages do
+          page.data['version'] = release_name;
+          page.data['git-tree-base'] = "http://github.com/apache/storm/tree/#{git_branch}"
+          page.data['git-blob-base'] = "http://github.com/apache/storm/blob/#{git_branch}"
+        end
+        return
+      end
+
+      releases = Hash.new
+      if (site.data['releases'])
+        for rel_data in site.data['releases'] do
+          releases[rel_data['name']] = rel_data
+        end
+      end
+
+      for page in site.pages do
+        release_name = dir_to_releasename(page.dir)
+        if (release_name != nil)
+          if !releases.has_key?(release_name)
+            releases[release_name] = {'name' => release_name};
+          end
+          releases[release_name]['documented'] = true
+        end
+      end
+
+      releases.each { |release_name, release_data|
+          set_if_unset(release_data, 'git-tag-or-branch', "v#{release_data['name']}")
+          set_if_unset(release_data, 'git-tree-base', "http://github.com/apache/storm/tree/#{release_data['git-tag-or-branch']}")
+          set_if_unset(release_data, 'git-blob-base', "http://github.com/apache/storm/blob/#{release_data['git-tag-or-branch']}")
+          set_if_unset(release_data, 'base-name', "apache-storm-#{release_data['name']}")
+          set_if_unset(release_data, 'has-download', !release_name.end_with?('-SNAPSHOT'))
+      }
+
+      for page in site.pages do
+        release_name = dir_to_releasename(page.dir)
+        if (release_name != nil)
+          release_data = releases[release_name] 
+          page.data['version'] = release_name;
+          page.data['git-tree-base'] = release_data['git-tree-base'];
+          page.data['git-blob-base'] = release_data['git-blob-base'];
+        end
+      end
+      site.data['releases'] = releases.values.sort{|x,y| parse_version(y['name']) <=>
+                                                         parse_version(x['name'])};
+    end
+  end
+end


[14/16] storm git commit: STORM-1617: 0.10.x release docs

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Trident-API-Overview.md
----------------------------------------------------------------------
diff --git a/docs/Trident-API-Overview.md b/docs/Trident-API-Overview.md
index 3b68645..4dec1e3 100644
--- a/docs/Trident-API-Overview.md
+++ b/docs/Trident-API-Overview.md
@@ -1,7 +1,8 @@
 ---
+title: Trident API Overview
 layout: documentation
+documentation: true
 ---
-# Trident API overview
 
 The core data model in Trident is the "Stream", processed as a series of batches. A stream is partitioned among the nodes in the cluster, and operations applied to a stream are applied in parallel across each partition.
 
@@ -58,7 +59,7 @@ The resulting tuples would have fields ["a", "b", "c", "d"] and look like this:
 Filters take in a tuple as input and decide whether or not to keep that tuple or not. Suppose you had this filter:
 
 ```java
-public class MyFilter extends BaseFunction {
+public class MyFilter extends BaseFilter {
     public boolean isKeep(TridentTuple tuple) {
         return tuple.getInteger(0) == 1 && tuple.getInteger(1) == 2;
     }
@@ -76,14 +77,227 @@ Now suppose you had these tuples with fields ["a", "b", "c"]:
 If you ran this code:
 
 ```java
-mystream.each(new Fields("b", "a"), new MyFilter())
+mystream.filter(new MyFilter())
 ```
 
 The resulting tuples would be:
 
 ```
-[2, 1, 1]
+[1, 2, 3]
+```
+
+### map and flatMap
+
+`map` returns a stream consisting of the result of applying the given mapping function to the tuples of the stream. This
+can be used to apply a one-one transformation to the tuples.
+
+For example, if there is a stream of words and you wanted to convert it to a stream of upper case words,
+you could define a mapping function as follows,
+
+```java
+public class UpperCase extends MapFunction {
+ @Override
+ public Values execute(TridentTuple input) {
+   return new Values(input.getString(0).toUpperCase());
+ }
+}
+```
+
+The mapping function can then be applied on the stream to produce a stream of uppercase words.
+
+```java
+mystream.map(new UpperCase())
+```
+
+`flatMap` is similar to `map` but has the effect of applying a one-to-many transformation to the values of the stream,
+and then flattening the resulting elements into a new stream.
+
+For example, if there is a stream of sentences and you wanted to convert it to a stream of words,
+you could define a flatMap function as follows,
+
+```java
+public class Split extends FlatMapFunction {
+  @Override
+  public Iterable<Values> execute(TridentTuple input) {
+    List<Values> valuesList = new ArrayList<>();
+    for (String word : input.getString(0).split(" ")) {
+      valuesList.add(new Values(word));
+    }
+    return valuesList;
+  }
+}
+```
+
+The flatMap function can then be applied on the stream of sentences to produce a stream of words,
+
+```java
+mystream.flatMap(new Split())
+```
+
+Of course these operations can be chained, so a stream of uppercase words can be obtained from a stream of sentences as follows,
+
+```java
+mystream.flatMap(new Split()).map(new UpperCase())
+```
+### peek
+`peek` can be used to perform an additional action on each trident tuple as they flow through the stream.
+ This could be useful for debugging to see the tuples as they flow past a certain point in a pipeline.
+
+For example, the below code would print the result of converting the words to uppercase before they are passed to `groupBy`
+```java
+ mystream.flatMap(new Split()).map(new UpperCase())
+         .peek(new Consumer() {
+                @Override
+                public void accept(TridentTuple input) {
+                  System.out.println(input.getString(0));
+                }
+         })
+         .groupBy(new Fields("word"))
+         .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
+```
+
+### min and minBy
+`min` and `minBy` operations return minimum value on each partition of a batch of tuples in a trident stream.
+
+Suppose, a trident stream contains fields ["device-id", "count"] and the following partitions of tuples
+
+```
+Partition 0:
+[123, 2]
+[113, 54]
+[23,  28]
+[237, 37]
+[12,  23]
+[62,  17]
+[98,  42]
+
+Partition 1:
+[64,  18]
+[72,  54]
+[2,   28]
+[742, 71]
+[98,  45]
+[62,  12]
+[19,  174]
+
+
+Partition 2:
+[27,  94]
+[82,  23]
+[9,   86]
+[53,  71]
+[74,  37]
+[51,  49]
+[37,  98]
+
+```
+
+`minBy` operation can be applied on the above stream of tuples like below which results in emitting tuples with minimum values of `count` field in each partition.
+
+``` java
+  mystream.minBy(new Fields("count"))
 ```
+Result of the above code on mentioned partitions is:
+ 
+```
+Partition 0:
+[123, 2]
+
+
+Partition 1:
+[62,  12]
+
+
+Partition 2:
+[82,  23]
+
+```
+
+You can look at other `min` and `minBy` operations on Stream
+``` java
+      public <T> Stream minBy(String inputFieldName, Comparator<T> comparator) 
+      public Stream min(Comparator<TridentTuple> comparator) 
+```
+Below example shows how these APIs can be used to find minimum using respective Comparators on a tuple. 
+
+``` java
+
+        FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20));
+
+        TridentTopology topology = new TridentTopology();
+        Stream vehiclesStream = topology.newStream("spout1", spout).
+                each(allFields, new Debug("##### vehicles"));
+                
+        Stream slowVehiclesStream =
+                vehiclesStream
+                        .min(new SpeedComparator()) // Comparator w.r.t speed on received tuple.
+                        .each(vehicleField, new Debug("#### slowest vehicle"));
+
+        vehiclesStream
+                .minBy(Vehicle.FIELD_NAME, new EfficiencyComparator()) // Comparator w.r.t efficiency on received tuple.
+                .each(vehicleField, new Debug("#### least efficient vehicle"));
+
+```
+Example applications of these APIs can be located at [TridentMinMaxOfDevicesTopology](https://github.com/apache/storm/blob/master/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java) and [TridentMinMaxOfVehiclesTopology](https://github.com/apache/storm/blob/master/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java) 
+
+### max and maxBy
+`max` and `maxBy` operations return maximum value on each partition of a batch of tuples in a trident stream.
+
+Suppose, a trident stream contains fields ["device-id", "count"] as mentioned in the above section.
+
+`max` and `maxBy` operations can be applied on the above stream of tuples like below which results in emitting tuples with maximum values of `count` field for each partition.
+
+``` java
+  mystream.maxBy(new Fields("count"))
+```
+Result of the above code on mentioned partitions is:
+ 
+```
+Partition 0:
+[113, 54]
+
+
+Partition 1:
+[19,  174]
+
+
+Partition 2:
+[37,  98]
+
+```
+
+You can look at other `max` and `maxBy` functions on Stream
+
+``` java
+
+      public <T> Stream maxBy(String inputFieldName, Comparator<T> comparator) 
+      public Stream max(Comparator<TridentTuple> comparator) 
+      
+```
+
+Below example shows how these APIs can be used to find maximum using respective Comparators on a tuple.
+
+``` java
+
+        FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20));
+
+        TridentTopology topology = new TridentTopology();
+        Stream vehiclesStream = topology.newStream("spout1", spout).
+                each(allFields, new Debug("##### vehicles"));
+
+        vehiclesStream
+                .max(new SpeedComparator()) // Comparator w.r.t speed on received tuple.
+                .each(vehicleField, new Debug("#### fastest vehicle"))
+                .project(driverField)
+                .each(driverField, new Debug("##### fastest driver"));
+        
+        vehiclesStream
+                .maxBy(Vehicle.FIELD_NAME, new EfficiencyComparator()) // Comparator w.r.t efficiency on received tuple.
+                .each(vehicleField, new Debug("#### most efficient vehicle"));
+
+```
+
+Example applications of these APIs can be located at [TridentMinMaxOfDevicesTopology](https://github.com/apache/storm/blob/master/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java) and [TridentMinMaxOfVehiclesTopology](https://github.com/apache/storm/blob/master/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java) 
 
 ### partitionAggregate
 
@@ -153,7 +367,7 @@ public class Count implements CombinerAggregator<Long> {
 }
 ```
 
-The benefits of CombinerAggregators are seen when you use the with the aggregate method instead of partitionAggregate. In that case, Trident automatically optimizes the computation by doing partial aggregations before transferring tuples over the network.
+The benefits of CombinerAggregators are seen when you use them with the aggregate method instead of partitionAggregate. In that case, Trident automatically optimizes the computation by doing partial aggregations before transferring tuples over the network.
 
 A ReducerAggregator has the following interface:
 
@@ -269,7 +483,7 @@ mystream.aggregate(new Count(), new Fields("count"))
 
 Like partitionAggregate, aggregators for aggregate can be chained. However, if you chain a CombinerAggregator with a non-CombinerAggregator, Trident is unable to do the partial aggregation optimization.
 
-You can read more about how to use persistentAggregate in the [Trident state doc](https://github.com/apache/incubator-storm/wiki/Trident-state).
+You can read more about how to use persistentAggregate in the [Trident state doc](Trident-state.html).
 
 ## Operations on grouped streams
 
@@ -277,7 +491,7 @@ The groupBy operation repartitions the stream by doing a partitionBy on the spec
 
 ![Grouping](images/grouping.png)
 
-If you run aggregators on a grouped stream, the aggregation will be run within each group instead of against the whole batch. persistentAggregate can also be run on a GroupedStream, in which case the results will be stored in a [MapState](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/state/map/MapState.java) with the key being the grouping fields. You can read more about persistentAggregate in the [Trident state doc](Trident-state.html).
+If you run aggregators on a grouped stream, the aggregation will be run within each group instead of against the whole batch. persistentAggregate can also be run on a GroupedStream, in which case the results will be stored in a [MapState]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/state/map/MapState.java) with the key being the grouping fields. You can read more about persistentAggregate in the [Trident state doc](Trident-state.html).
 
 Like regular streams, aggregators on grouped streams can be chained.
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Trident-spouts.md
----------------------------------------------------------------------
diff --git a/docs/Trident-spouts.md b/docs/Trident-spouts.md
index 92330a7..ab7de89 100644
--- a/docs/Trident-spouts.md
+++ b/docs/Trident-spouts.md
@@ -1,5 +1,7 @@
 ---
+title: Trident Spouts
 layout: documentation
+documentation: true
 ---
 # Trident spouts
 
@@ -32,10 +34,10 @@ Even while processing multiple batches simultaneously, Trident will order any st
 
 Here are the following spout APIs available:
 
-1. [ITridentSpout](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/spout/ITridentSpout.java): The most general API that can support transactional or opaque transactional semantics. Generally you'll use one of the partitioned flavors of this API rather than this one directly.
-2. [IBatchSpout](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/spout/IBatchSpout.java): A non-transactional spout that emits batches of tuples at a time
-3. [IPartitionedTridentSpout](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/spout/IPartitionedTridentSpout.java): A transactional spout that reads from a partitioned data source (like a cluster of Kafka servers)
-4. [IOpaquePartitionedTridentSpout](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/spout/IOpaquePartitionedTridentSpout.java): An opaque transactional spout that reads from a partitioned data source
+1. [ITridentSpout]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/spout/ITridentSpout.java): The most general API that can support transactional or opaque transactional semantics. Generally you'll use one of the partitioned flavors of this API rather than this one directly.
+2. [IBatchSpout]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/spout/IBatchSpout.java): A non-transactional spout that emits batches of tuples at a time
+3. [IPartitionedTridentSpout]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/spout/IPartitionedTridentSpout.java): A transactional spout that reads from a partitioned data source (like a cluster of Kafka servers)
+4. [IOpaquePartitionedTridentSpout]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/spout/IOpaquePartitionedTridentSpout.java): An opaque transactional spout that reads from a partitioned data source
 
 And, like mentioned in the beginning of this tutorial, you can use regular IRichSpout's as well.
  

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Trident-state.md
----------------------------------------------------------------------
diff --git a/docs/Trident-state.md b/docs/Trident-state.md
index 2ace8c8..af779e4 100644
--- a/docs/Trident-state.md
+++ b/docs/Trident-state.md
@@ -1,7 +1,8 @@
 ---
+title: Trident State
 layout: documentation
 ---
-# State in Trident
+
 
 Trident has first-class abstractions for reading from and writing to stateful sources. The state can either be internal to the topology – e.g., kept in-memory and backed by HDFS – or externally stored in a database like Memcached or Cassandra. There's no difference in the Trident API for either case.
 
@@ -27,7 +28,7 @@ Remember, Trident processes tuples as small batches with each batch being given
 2. There's no overlap between batches of tuples (tuples are in one batch or another, never multiple).
 3. Every tuple is in a batch (no tuples are skipped)
 
-This is a pretty easy type of spout to understand, the stream is divided into fixed batches that never change. storm-contrib has [an implementation of a transactional spout](https://github.com/nathanmarz/storm-contrib/blob/{{page.version}}/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java) for Kafka.
+This is a pretty easy type of spout to understand, the stream is divided into fixed batches that never change. storm-contrib has [an implementation of a transactional spout]({{page.git-tree-base}}/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java) for Kafka.
 
 You might be wondering – why wouldn't you just always use a transactional spout? They're simple and easy to understand. One reason you might not use one is because they're not necessarily very fault-tolerant. For example, the way TransactionalTridentKafkaSpout works is the batch for a txid will contain tuples from all the Kafka partitions for a topic. Once a batch has been emitted, any time that batch is re-emitted in the future the exact same set of tuples must be emitted to meet the semantics of transactional spouts. Now suppose a batch is emitted from TransactionalTridentKafkaSpout, the batch fails to process, and at the same time one of the Kafka nodes goes down. You're now incapable of replaying the same batch as you did before (since the node is down and some partitions for the topic are not unavailable), and processing will halt. 
 
@@ -71,7 +72,7 @@ As described before, an opaque transactional spout cannot guarantee that the bat
 
 1. Every tuple is *successfully* processed in exactly one batch. However, it's possible for a tuple to fail to process in one batch and then succeed to process in a later batch.
 
-[OpaqueTridentKafkaSpout](https://github.com/nathanmarz/storm-contrib/blob/{{page.version}}/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java) is a spout that has this property and is fault-tolerant to losing Kafka nodes. Whenever it's time for OpaqueTridentKafkaSpout to emit a batch, it emits tuples starting from where the last batch finished emitting. This ensures that no tuple is ever skipped or successfully processed by multiple batches.
+[OpaqueTridentKafkaSpout]({{page.git-tree-base}}/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java) is a spout that has this property and is fault-tolerant to losing Kafka nodes. Whenever it's time for OpaqueTridentKafkaSpout to emit a batch, it emits tuples starting from where the last batch finished emitting. This ensures that no tuple is ever skipped or successfully processed by multiple batches.
 
 With opaque transactional spouts, it's no longer possible to use the trick of skipping state updates if the transaction id in the database is the same as the transaction id for the current batch. This is because the batch may have changed between state updates.
 
@@ -308,7 +309,7 @@ public interface Snapshottable<T> extends State {
 }
 ```
 
-[MemoryMapState](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/testing/MemoryMapState.java) and [MemcachedState](https://github.com/nathanmarz/trident-memcached/blob/master/src/jvm/trident/memcached/MemcachedState.java) each implement both of these interfaces.
+[MemoryMapState]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/testing/MemoryMapState.java) and [MemcachedState](https://github.com/nathanmarz/trident-memcached/blob/{{page.version}}/src/jvm/trident/memcached/MemcachedState.java) each implement both of these interfaces.
 
 ## Implementing Map States
 
@@ -321,10 +322,10 @@ public interface IBackingMap<T> {
 }
 ```
 
-OpaqueMap's will call multiPut with [OpaqueValue](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/state/OpaqueValue.java)'s for the vals, TransactionalMap's will give [TransactionalValue](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/state/TransactionalValue.java)'s for the vals, and NonTransactionalMaps will just pass the objects from the topology through.
+OpaqueMap's will call multiPut with [OpaqueValue]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/state/OpaqueValue.java)'s for the vals, TransactionalMap's will give [TransactionalValue]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/state/TransactionalValue.java)'s for the vals, and NonTransactionalMaps will just pass the objects from the topology through.
 
-Trident also provides the [CachedMap](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/state/map/CachedMap.java) class to do automatic LRU caching of map key/vals.
+Trident also provides the [CachedMap]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/state/map/CachedMap.java) class to do automatic LRU caching of map key/vals.
 
-Finally, Trident provides the [SnapshottableMap](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/state/map/SnapshottableMap.java) class that turns a MapState into a Snapshottable object, by storing global aggregations into a fixed key.
+Finally, Trident provides the [SnapshottableMap]({{page.git-blob-base}}/storm-core/src/jvm/storm/trident/state/map/SnapshottableMap.java) class that turns a MapState into a Snapshottable object, by storing global aggregations into a fixed key.
 
 Take a look at the implementation of [MemcachedState](https://github.com/nathanmarz/trident-memcached/blob/master/src/jvm/trident/memcached/MemcachedState.java) to see how all these utilities can be put together to make a high performance MapState implementation. MemcachedState allows you to choose between opaque transactional, transactional, and non-transactional semantics.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Trident-tutorial.md
----------------------------------------------------------------------
diff --git a/docs/Trident-tutorial.md b/docs/Trident-tutorial.md
index 862dd8b..6ad9103 100644
--- a/docs/Trident-tutorial.md
+++ b/docs/Trident-tutorial.md
@@ -1,7 +1,8 @@
 ---
+title: Trident Tutorial
 layout: documentation
+documentation: true
 ---
-# Trident tutorial
 
 Trident is a high-level abstraction for doing realtime computing on top of Storm. It allows you to seamlessly intermix high throughput (millions of messages per second), stateful stream processing with low latency distributed querying. If you're familiar with high level batch processing tools like Pig or Cascading, the concepts of Trident will be very familiar – Trident has joins, aggregations, grouping, functions, and filters. In addition to these, Trident adds primitives for doing stateful, incremental processing on top of any database or persistence store. Trident has consistent, exactly-once semantics, so it is easy to reason about Trident topologies.
 
@@ -234,7 +235,7 @@ Trident solves this problem by doing two things:
 
 With these two primitives, you can achieve exactly-once semantics with your state updates. Rather than store just the count in the database, what you can do instead is store the transaction id with the count in the database as an atomic value. Then, when updating the count, you can just compare the transaction id in the database with the transaction id for the current batch. If they're the same, you skip the update – because of the strong ordering, you know for sure that the value in the database incorporates the current batch. If they're different, you increment the count.
 
-Of course, you don't have to do this logic manually in your topologies. This logic is wrapped by the State abstraction and done automatically. Nor is your State object required to implement the transaction id trick: if you don't want to pay the cost of storing the transaction id in the database, you don't have to. In that case the State will have at-least-once-processing semantics in the case of failures (which may be fine for your application). You can read more about how to implement a State and the various fault-tolerance tradeoffs possible [in this doc](Trident-state.html).
+Of course, you don't have to do this logic manually in your topologies. This logic is wrapped by the State abstraction and done automatically. Nor is your State object required to implement the transaction id trick: if you don't want to pay the cost of storing the transaction id in the database, you don't have to. In that case the State will have at-least-once-processing semantics in the case of failures (which may be fine for your application). You can read more about how to implement a State and the various fault-tolerance tradeoffs possible [in this doc](/documentation/Trident-state.html).
 
 A State is allowed to use whatever strategy it wants to store state. So it could store state in an external database or it could keep the state in-memory but backed by HDFS (like how HBase works). State's are not required to hold onto state forever. For example, you could have an in-memory State implementation that only keeps the last X hours of data available and drops anything older. Take a look at the implementation of the [Memcached integration](https://github.com/nathanmarz/trident-memcached/blob/master/src/jvm/trident/memcached/MemcachedState.java) for an example State implementation.
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Troubleshooting.md
----------------------------------------------------------------------
diff --git a/docs/Troubleshooting.md b/docs/Troubleshooting.md
index c9df298..83a7d6b 100644
--- a/docs/Troubleshooting.md
+++ b/docs/Troubleshooting.md
@@ -1,7 +1,8 @@
 ---
+title: Troubleshooting
 layout: documentation
+documentation: true
 ---
-## Troubleshooting
 
 This page lists issues people have run into when using Storm along with their solutions.
 
@@ -139,6 +140,43 @@ Caused by: java.lang.NullPointerException
     ... 6 more
 ```
 
+or 
+
+```
+java.lang.RuntimeException: java.lang.NullPointerException
+        at
+backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:128)
+~[storm-core-0.9.3.jar:0.9.3]
+        at
+backtype.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:99)
+~[storm-core-0.9.3.jar:0.9.3]
+        at
+backtype.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:80)
+~[storm-core-0.9.3.jar:0.9.3]
+        at
+backtype.storm.disruptor$consume_loop_STAR_$fn__759.invoke(disruptor.clj:94)
+~[storm-core-0.9.3.jar:0.9.3]
+        at backtype.storm.util$async_loop$fn__458.invoke(util.clj:463)
+~[storm-core-0.9.3.jar:0.9.3]
+        at clojure.lang.AFn.run(AFn.java:24) [clojure-1.5.1.jar:na]
+        at java.lang.Thread.run(Thread.java:745) [na:1.7.0_65]
+Caused by: java.lang.NullPointerException: null
+        at clojure.lang.RT.intCast(RT.java:1087) ~[clojure-1.5.1.jar:na]
+        at
+backtype.storm.daemon.worker$mk_transfer_fn$fn__3548.invoke(worker.clj:129)
+~[storm-core-0.9.3.jar:0.9.3]
+        at
+backtype.storm.daemon.executor$start_batch_transfer__GT_worker_handler_BANG_$fn__3282.invoke(executor.clj:258)
+~[storm-core-0.9.3.jar:0.9.3]
+        at
+backtype.storm.disruptor$clojure_handler$reify__746.onEvent(disruptor.clj:58)
+~[storm-core-0.9.3.jar:0.9.3]
+        at
+backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:125)
+~[storm-core-0.9.3.jar:0.9.3]
+        ... 6 common frames omitted
+```
+
 Solution:
 
  * This is caused by having multiple threads issue methods on the `OutputCollector`. All emits, acks, and fails must happen on the same thread. One subtle way this can happen is if you make a `IBasicBolt` that emits on a separate thread. `IBasicBolt`'s automatically ack after execute is called, so this would cause multiple threads to use the `OutputCollector` leading to this exception. When using a basic bolt, all emits must happen in the same thread that runs `execute`.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Tutorial.md
----------------------------------------------------------------------
diff --git a/docs/Tutorial.md b/docs/Tutorial.md
index 73bf9a4..cc05504 100644
--- a/docs/Tutorial.md
+++ b/docs/Tutorial.md
@@ -1,11 +1,13 @@
 ---
+title: Tutorial
 layout: documentation
+documentation: true
 ---
 In this tutorial, you'll learn how to create Storm topologies and deploy them to a Storm cluster. Java will be the main language used, but a few examples will use Python to illustrate Storm's multi-language capabilities.
 
 ## Preliminaries
 
-This tutorial uses examples from the [storm-starter](http://github.com/nathanmarz/storm-starter) project. It's recommended that you clone the project and follow along with the examples. Read [Setting up a development environment](Setting-up-development-environment.html) and [Creating a new Storm project](Creating-a-new-Storm-project.html) to get your machine set up. 
+This tutorial uses examples from the [storm-starter]({{page.git-blob-base}}/examples/storm-starter) project. It's recommended that you clone the project and follow along with the examples. Read [Setting up a development environment](Setting-up-development-environment.html) and [Creating a new Storm project](Creating-a-new-Storm-project.html) to get your machine set up.
 
 ## Components of a Storm cluster
 
@@ -101,7 +103,7 @@ This topology contains a spout and two bolts. The spout emits words, and each bo
 
 This code defines the nodes using the `setSpout` and `setBolt` methods. These methods take as input a user-specified id, an object containing the processing logic, and the amount of parallelism you want for the node. In this example, the spout is given id "words" and the bolts are given ids "exclaim1" and "exclaim2". 
 
-The object containing the processing logic implements the [IRichSpout](javadocs/backtype/storm/topology/IRichSpout.html) interface for spouts and the [IRichBolt](javadocs/backtype/storm/topology/IRichBolt.html) interface for bolts. 
+The object containing the processing logic implements the [IRichSpout](javadocs/backtype/storm/topology/IRichSpout.html) interface for spouts and the [IRichBolt](javadocs/backtype/storm/topology/IRichBolt.html) interface for bolts.
 
 The last parameter, how much parallelism you want for the node, is optional. It indicates how many threads should execute that component across the cluster. If you omit it, Storm will only allocate one thread for that node.
 
@@ -137,23 +139,28 @@ As you can see, the implementation is very straightforward.
 public static class ExclamationBolt implements IRichBolt {
     OutputCollector _collector;
 
+    @Override
     public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
         _collector = collector;
     }
 
+    @Override
     public void execute(Tuple tuple) {
         _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
         _collector.ack(tuple);
     }
 
+    @Override
     public void cleanup() {
     }
 
+    @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("word"));
     }
     
-    public Map getComponentConfiguration() {
+    @Override
+    public Map<String, Object> getComponentConfiguration() {
         return null;
     }
 }
@@ -161,9 +168,9 @@ public static class ExclamationBolt implements IRichBolt {
 
 The `prepare` method provides the bolt with an `OutputCollector` that is used for emitting tuples from this bolt. Tuples can be emitted at anytime from the bolt -- in the `prepare`, `execute`, or `cleanup` methods, or even asynchronously in another thread. This `prepare` implementation simply saves the `OutputCollector` as an instance variable to be used later on in the `execute` method.
 
-The `execute` method receives a tuple from one of the bolt's inputs. The `ExclamationBolt` grabs the first field from the tuple and emits a new tuple with the string "!!!" appended to it. If you implement a bolt that subscribes to multiple input sources, you can find out which component the [Tuple](javadocs/backtype/storm/tuple/Tuple.html) came from by using the `Tuple#getSourceComponent` method.
+The `execute` method receives a tuple from one of the bolt's inputs. The `ExclamationBolt` grabs the first field from the tuple and emits a new tuple with the string "!!!" appended to it. If you implement a bolt that subscribes to multiple input sources, you can find out which component the [Tuple](/javadoc/apidocs/backtype/storm/tuple/Tuple.html) came from by using the `Tuple#getSourceComponent` method.
 
-There's a few other things going in in the `execute` method, namely that the input tuple is passed as the first argument to `emit` and the input tuple is acked on the final line. These are part of Storm's reliability API for guaranteeing no data loss and will be explained later in this tutorial. 
+There's a few other things going on in the `execute` method, namely that the input tuple is passed as the first argument to `emit` and the input tuple is acked on the final line. These are part of Storm's reliability API for guaranteeing no data loss and will be explained later in this tutorial. 
 
 The `cleanup` method is called when a Bolt is being shutdown and should cleanup any resources that were opened. There's no guarantee that this method will be called on the cluster: for example, if the machine the task is running on blows up, there's no way to invoke the method. The `cleanup` method is intended for when you run topologies in [local mode](Local-mode.html) (where a Storm cluster is simulated in process), and you want to be able to run and kill many topologies without suffering any resource leaks.
 
@@ -177,15 +184,18 @@ Methods like `cleanup` and `getComponentConfiguration` are often not needed in a
 public static class ExclamationBolt extends BaseRichBolt {
     OutputCollector _collector;
 
+    @Override
     public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
         _collector = collector;
     }
 
+    @Override
     public void execute(Tuple tuple) {
         _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
         _collector.ack(tuple);
     }
 
+    @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("word"));
     }    
@@ -235,7 +245,7 @@ A stream grouping tells a topology how to send tuples between two components. Re
 
 When a task for Bolt A emits a tuple to Bolt B, which task should it send the tuple to?
 
-A "stream grouping" answers this question by telling Storm how to send tuples between sets of tasks. Before we dig into the different kinds of stream groupings, let's take a look at another topology from [storm-starter](http://github.com/nathanmarz/storm-starter). This [WordCountTopology](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/WordCountTopology.java) reads sentences off of a spout and streams out of `WordCountBolt` the total number of times it has seen that word before:
+A "stream grouping" answers this question by telling Storm how to send tuples between sets of tasks. Before we dig into the different kinds of stream groupings, let's take a look at another topology from [storm-starter](http://github.com/apache/storm/blob/{{page.version}}/examples/storm-starter). This [WordCountTopology]({{page.git-blob-base}}/examples/storm-starter/src/jvm/storm/starter/WordCountTopology.java) reads sentences off of a spout and streams out of `WordCountBolt` the total number of times it has seen that word before:
 
 ```java
 TopologyBuilder builder = new TopologyBuilder();

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Understanding-the-parallelism-of-a-Storm-topology.md
----------------------------------------------------------------------
diff --git a/docs/Understanding-the-parallelism-of-a-Storm-topology.md b/docs/Understanding-the-parallelism-of-a-Storm-topology.md
index adc4c41..21d74ef 100644
--- a/docs/Understanding-the-parallelism-of-a-Storm-topology.md
+++ b/docs/Understanding-the-parallelism-of-a-Storm-topology.md
@@ -1,7 +1,9 @@
 ---
+title: Understanding the Parallelism of a Storm Topology
 layout: documentation
+documentation: true
 ---
-# What makes a running topology: worker processes, executors and tasks
+## What makes a running topology: worker processes, executors and tasks
 
 Storm distinguishes between the following three main entities that are used to actually run a topology in a Storm cluster:
 
@@ -19,29 +21,29 @@ An _executor_ is a thread that is spawned by a worker process. It may run one or
 
 A _task_ performs the actual data processing — each spout or bolt that you implement in your code executes as many tasks across the cluster. The number of tasks for a component is always the same throughout the lifetime of a topology, but the number of executors (threads) for a component can change over time. This means that the following condition holds true: ``#threads ≤ #tasks``. By default, the number of tasks is set to be the same as the number of executors, i.e. Storm will run one task per thread.
 
-# Configuring the parallelism of a topology
+## Configuring the parallelism of a topology
 
 Note that in Storm’s terminology "parallelism" is specifically used to describe the so-called _parallelism hint_, which means the initial number of executor (threads) of a component. In this document though we use the term "parallelism" in a more general sense to describe how you can configure not only the number of executors but also the number of worker processes and the number of tasks of a Storm topology. We will specifically call out when "parallelism" is used in the normal, narrow definition of Storm.
 
 The following sections give an overview of the various configuration options and how to set them in your code. There is more than one way of setting these options though, and the table lists only some of them. Storm currently has the following [order of precedence for configuration settings](Configuration.html): ``defaults.yaml`` < ``storm.yaml`` < topology-specific configuration < internal component-specific configuration < external component-specific configuration.
 
-## Number of worker processes
+### Number of worker processes
 
 * Description: How many worker processes to create _for the topology_ across machines in the cluster.
 * Configuration option: [TOPOLOGY_WORKERS](javadocs/backtype/storm/Config.html#TOPOLOGY_WORKERS)
 * How to set in your code (examples):
     * [Config#setNumWorkers](javadocs/backtype/storm/Config.html)
 
-## Number of executors (threads)
+### Number of executors (threads)
 
 * Description: How many executors to spawn _per component_.
-* Configuration option: ?
+* Configuration option: None (pass ``parallelism_hint`` parameter to ``setSpout`` or ``setBolt``)
 * How to set in your code (examples):
     * [TopologyBuilder#setSpout()](javadocs/backtype/storm/topology/TopologyBuilder.html)
     * [TopologyBuilder#setBolt()](javadocs/backtype/storm/topology/TopologyBuilder.html)
     * Note that as of Storm 0.8 the ``parallelism_hint`` parameter now specifies the initial number of executors (not tasks!) for that bolt.
 
-## Number of tasks
+### Number of tasks
 
 * Description: How many tasks to create _per component_.
 * Configuration option: [TOPOLOGY_TASKS](javadocs/backtype/storm/Config.html#TOPOLOGY_TASKS)
@@ -54,12 +56,12 @@ Here is an example code snippet to show these settings in practice:
 ```java
 topologyBuilder.setBolt("green-bolt", new GreenBolt(), 2)
                .setNumTasks(4)
-               .shuffleGrouping("blue-spout);
+               .shuffleGrouping("blue-spout");
 ```
 
 In the above code we configured Storm to run the bolt ``GreenBolt`` with an initial number of two executors and four associated tasks. Storm will run two tasks per executor (thread). If you do not explicitly configure the number of tasks, Storm will run by default one task per executor.
 
-# Example of a running topology
+## Example of a running topology
 
 The following illustration shows how a simple topology would look like in operation. The topology consists of three components: one spout called ``BlueSpout`` and two bolts called ``GreenBolt`` and ``YellowBolt``. The components are linked such that ``BlueSpout`` sends its output to ``GreenBolt``, which in turns sends its own output to ``YellowBolt``.
 
@@ -89,9 +91,9 @@ StormSubmitter.submitTopology(
 
 And of course Storm comes with additional configuration settings to control the parallelism of a topology, including:
 
-* [TOPOLOGY_MAX_TASK_PARALLELISM](javadocs/backtype/storm/Config.html#TOPOLOGY_MAX_TASK_PARALLELISM): This setting puts a ceiling on the number of executors that can be spawned for a single component. It is typically used during testing to limit the number of threads spawned when running a topology in local mode. You can set this option via e.g. [Config#setMaxTaskParallelism()](javadocs/backtype/storm/Config.html).
+* [TOPOLOGY_MAX_TASK_PARALLELISM](javadocs/backtype/storm/Config.html#TOPOLOGY_MAX_TASK_PARALLELISM): This setting puts a ceiling on the number of executors that can be spawned for a single component. It is typically used during testing to limit the number of threads spawned when running a topology in local mode. You can set this option via e.g. [Config#setMaxTaskParallelism()](javadocs/backtype/storm/Config.html#setMaxTaskParallelism(int)).
 
-# How to change the parallelism of a running topology
+## How to change the parallelism of a running topology
 
 A nifty feature of Storm is that you can increase or decrease the number of worker processes and/or executors without being required to restart the cluster or the topology. The act of doing so is called rebalancing.
 
@@ -103,14 +105,14 @@ You have two options to rebalance a topology:
 Here is an example of using the CLI tool:
 
 ```
-# Reconfigure the topology "mytopology" to use 5 worker processes,
-# the spout "blue-spout" to use 3 executors and
-# the bolt "yellow-bolt" to use 10 executors.
+## Reconfigure the topology "mytopology" to use 5 worker processes,
+## the spout "blue-spout" to use 3 executors and
+## the bolt "yellow-bolt" to use 10 executors.
 
 $ storm rebalance mytopology -n 5 -e blue-spout=3 -e yellow-bolt=10
 ```
 
-# References for this article
+## References
 
 * [Concepts](Concepts.html)
 * [Configuration](Configuration.html)

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Using-non-JVM-languages-with-Storm.md
----------------------------------------------------------------------
diff --git a/docs/Using-non-JVM-languages-with-Storm.md b/docs/Using-non-JVM-languages-with-Storm.md
index 7b2a2f2..1b3ae45 100644
--- a/docs/Using-non-JVM-languages-with-Storm.md
+++ b/docs/Using-non-JVM-languages-with-Storm.md
@@ -1,4 +1,5 @@
 ---
+title: Using non JVM languages with Storm
 layout: documentation
 ---
 - two pieces: creating topologies and implementing spouts and bolts in other languages

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/flux.md
----------------------------------------------------------------------
diff --git a/docs/flux.md b/docs/flux.md
new file mode 100644
index 0000000..8f2b264
--- /dev/null
+++ b/docs/flux.md
@@ -0,0 +1,835 @@
+---
+title: Flux
+layout: documentation
+documentation: true
+---
+
+A framework for creating and deploying Apache Storm streaming computations with less friction.
+
+## Definition
+**flux** |fləks| _noun_
+
+1. The action or process of flowing or flowing out
+2. Continuous change
+3. In physics, the rate of flow of a fluid, radiant energy, or particles across a given area
+4. A substance mixed with a solid to lower its melting point
+
+## Rationale
+Bad things happen when configuration is hard-coded. No one should have to recompile or repackage an application in
+order to change configuration.
+
+## About
+Flux is a framework and set of utilities that make defining and deploying Apache Storm topologies less painful and
+deveoper-intensive.
+
+Have you ever found yourself repeating this pattern?:
+
+```java
+
+public static void main(String[] args) throws Exception {
+    // logic to determine if we're running locally or not...
+    // create necessary config options...
+    boolean runLocal = shouldRunLocal();
+    if(runLocal){
+        LocalCluster cluster = new LocalCluster();
+        cluster.submitTopology(name, conf, topology);
+    } else {
+        StormSubmitter.submitTopology(name, conf, topology);
+    }
+}
+```
+
+Wouldn't something like this be easier:
+
+```bash
+storm jar mytopology.jar org.apache.storm.flux.Flux --local config.yaml
+```
+
+or:
+
+```bash
+storm jar mytopology.jar org.apache.storm.flux.Flux --remote config.yaml
+```
+
+Another pain point often mentioned is the fact that the wiring for a Topology graph is often tied up in Java code,
+and that any changes require recompilation and repackaging of the topology jar file. Flux aims to alleviate that
+pain by allowing you to package all your Storm components in a single jar, and use an external text file to define
+the layout and configuration of your topologies.
+
+## Features
+
+ * Easily configure and deploy Storm topologies (Both Storm core and Microbatch API) without embedding configuration
+   in your topology code
+ * Support for existing topology code (see below)
+ * Define Storm Core API (Spouts/Bolts) using a flexible YAML DSL
+ * YAML DSL support for most Storm components (storm-kafka, storm-hdfs, storm-hbase, etc.)
+ * Convenient support for multi-lang components
+ * External property substitution/filtering for easily switching between configurations/environments (similar to Maven-style
+   `${variable.name}` substitution)
+
+## Usage
+
+To use Flux, add it as a dependency and package all your Storm components in a fat jar, then create a YAML document
+to define your topology (see below for YAML configuration options).
+
+### Building from Source
+The easiest way to use Flux, is to add it as a Maven dependency in you project as described below.
+
+If you would like to build Flux from source and run the unit/integration tests, you will need the following installed
+on your system:
+
+* Python 2.6.x or later
+* Node.js 0.10.x or later
+
+#### Building with unit tests enabled:
+
+```
+mvn clean install
+```
+
+#### Building with unit tests disabled:
+If you would like to build Flux without installing Python or Node.js you can simply skip the unit tests:
+
+```
+mvn clean install -DskipTests=true
+```
+
+Note that if you plan on using Flux to deploy topologies to a remote cluster, you will still need to have Python
+installed since it is required by Apache Storm.
+
+
+#### Building with integration tests enabled:
+
+```
+mvn clean install -DskipIntegration=false
+```
+
+
+### Packaging with Maven
+To enable Flux for your Storm components, you need to add it as a dependency such that it's included in the Storm
+topology jar. This can be accomplished with the Maven shade plugin (preferred) or the Maven assembly plugin (not
+recommended).
+
+#### Flux Maven Dependency
+The current version of Flux is available in Maven Central at the following coordinates:
+```xml
+<dependency>
+    <groupId>org.apache.storm</groupId>
+    <artifactId>flux-core</artifactId>
+    <version>${storm.version}</version>
+</dependency>
+```
+
+#### Creating a Flux-Enabled Topology JAR
+The example below illustrates Flux usage with the Maven shade plugin:
+
+ ```xml
+<!-- include Flux and user dependencies in the shaded jar -->
+<dependencies>
+    <!-- Flux include -->
+    <dependency>
+        <groupId>org.apache.storm</groupId>
+        <artifactId>flux-core</artifactId>
+        <version>${storm.version}</version>
+    </dependency>
+
+    <!-- add user dependencies here... -->
+
+</dependencies>
+<!-- create a fat jar that includes all dependencies -->
+<build>
+    <plugins>
+        <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-shade-plugin</artifactId>
+            <version>1.4</version>
+            <configuration>
+                <createDependencyReducedPom>true</createDependencyReducedPom>
+            </configuration>
+            <executions>
+                <execution>
+                    <phase>package</phase>
+                    <goals>
+                        <goal>shade</goal>
+                    </goals>
+                    <configuration>
+                        <transformers>
+                            <transformer
+                                    implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
+                            <transformer
+                                    implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+                                <mainClass>org.apache.storm.flux.Flux</mainClass>
+                            </transformer>
+                        </transformers>
+                    </configuration>
+                </execution>
+            </executions>
+        </plugin>
+    </plugins>
+</build>
+ ```
+
+### Deploying and Running a Flux Topology
+Once your topology components are packaged with the Flux dependency, you can run different topologies either locally
+or remotely using the `storm jar` command. For example, if your fat jar is named `myTopology-0.1.0-SNAPSHOT.jar` you
+could run it locally with the command:
+
+
+```bash
+storm jar myTopology-0.1.0-SNAPSHOT.jar org.apache.storm.flux.Flux --local my_config.yaml
+
+```
+
+### Command line options
+```
+usage: storm jar <my_topology_uber_jar.jar> org.apache.storm.flux.Flux
+             [options] <topology-config.yaml>
+ -d,--dry-run                 Do not run or deploy the topology. Just
+                              build, validate, and print information about
+                              the topology.
+ -e,--env-filter              Perform environment variable substitution.
+                              Replace keys identified with `${ENV-[NAME]}`
+                              will be replaced with the corresponding
+                              `NAME` environment value
+ -f,--filter <file>           Perform property substitution. Use the
+                              specified file as a source of properties,
+                              and replace keys identified with {$[property
+                              name]} with the value defined in the
+                              properties file.
+ -i,--inactive                Deploy the topology, but do not activate it.
+ -l,--local                   Run the topology in local mode.
+ -n,--no-splash               Suppress the printing of the splash screen.
+ -q,--no-detail               Suppress the printing of topology details.
+ -r,--remote                  Deploy the topology to a remote cluster.
+ -R,--resource                Treat the supplied path as a classpath
+                              resource instead of a file.
+ -s,--sleep <ms>              When running locally, the amount of time to
+                              sleep (in ms.) before killing the topology
+                              and shutting down the local cluster.
+ -z,--zookeeper <host:port>   When running in local mode, use the
+                              ZooKeeper at the specified <host>:<port>
+                              instead of the in-process ZooKeeper.
+                              (requires Storm 0.9.3 or later)
+```
+
+**NOTE:** Flux tries to avoid command line switch collision with the `storm` command, and allows any other command line
+switches to pass through to the `storm` command.
+
+For example, you can use the `storm` command switch `-c` to override a topology configuration property. The following
+example command will run Flux and override the `nimbus.seeds` configuration:
+
+```bash
+storm jar myTopology-0.1.0-SNAPSHOT.jar org.apache.storm.flux.Flux --remote my_config.yaml -c 'nimbus.seeds=["localhost"]'
+```
+
+### Sample output
+```
+███████╗██╗     ██╗   ██╗██╗  ██╗
+██╔════╝██║     ██║   ██║╚██╗██╔╝
+█████╗  ██║     ██║   ██║ ╚███╔╝
+██╔══╝  ██║     ██║   ██║ ██╔██╗
+██║     ███████╗╚██████╔╝██╔╝ ██╗
+╚═╝     ╚══════╝ ╚═════╝ ╚═╝  ╚═╝
++-         Apache Storm        -+
++-  data FLow User eXperience  -+
+Version: 0.3.0
+Parsing file: /Users/hsimpson/Projects/donut_domination/storm/shell_test.yaml
+---------- TOPOLOGY DETAILS ----------
+Name: shell-topology
+--------------- SPOUTS ---------------
+sentence-spout[1](org.apache.storm.flux.spouts.GenericShellSpout)
+---------------- BOLTS ---------------
+splitsentence[1](org.apache.storm.flux.bolts.GenericShellBolt)
+log[1](org.apache.storm.flux.wrappers.bolts.LogInfoBolt)
+count[1](backtype.storm.testing.TestWordCounter)
+--------------- STREAMS ---------------
+sentence-spout --SHUFFLE--> splitsentence
+splitsentence --FIELDS--> count
+count --SHUFFLE--> log
+--------------------------------------
+Submitting topology: 'shell-topology' to remote cluster...
+```
+
+## YAML Configuration
+Flux topologies are defined in a YAML file that describes a topology. A Flux topology
+definition consists of the following:
+
+  1. A topology name
+  2. A list of topology "components" (named Java objects that will be made available in the environment)
+  3. **EITHER** (A DSL topology definition):
+      * A list of spouts, each identified by a unique ID
+      * A list of bolts, each identified by a unique ID
+      * A list of "stream" objects representing a flow of tuples between spouts and bolts
+  4. **OR** (A JVM class that can produce a `backtype.storm.generated.StormTopology` instance:
+      * A `topologySource` definition.
+
+
+
+For example, here is a simple definition of a wordcount topology using the YAML DSL:
+
+```yaml
+name: "yaml-topology"
+config:
+  topology.workers: 1
+
+# spout definitions
+spouts:
+  - id: "spout-1"
+    className: "backtype.storm.testing.TestWordSpout"
+    parallelism: 1
+
+# bolt definitions
+bolts:
+  - id: "bolt-1"
+    className: "backtype.storm.testing.TestWordCounter"
+    parallelism: 1
+  - id: "bolt-2"
+    className: "org.apache.storm.flux.wrappers.bolts.LogInfoBolt"
+    parallelism: 1
+
+#stream definitions
+streams:
+  - name: "spout-1 --> bolt-1" # name isn't used (placeholder for logging, UI, etc.)
+    from: "spout-1"
+    to: "bolt-1"
+    grouping:
+      type: FIELDS
+      args: ["word"]
+
+  - name: "bolt-1 --> bolt2"
+    from: "bolt-1"
+    to: "bolt-2"
+    grouping:
+      type: SHUFFLE
+
+
+```
+## Property Substitution/Filtering
+It's common for developers to want to easily switch between configurations, for example switching deployment between
+a development environment and a production environment. This can be accomplished by using separate YAML configuration
+files, but that approach would lead to unnecessary duplication, especially in situations where the Storm topology
+does not change, but configuration settings such as host names, ports, and parallelism paramters do.
+
+For this case, Flux offers properties filtering to allow you two externalize values to a `.properties` file and have
+them substituted before the `.yaml` file is parsed.
+
+To enable property filtering, use the `--filter` command line option and specify a `.properties` file. For example,
+if you invoked flux like so:
+
+```bash
+storm jar myTopology-0.1.0-SNAPSHOT.jar org.apache.storm.flux.Flux --local my_config.yaml --filter dev.properties
+```
+With the following `dev.properties` file:
+
+```properties
+kafka.zookeeper.hosts: localhost:2181
+```
+
+You would then be able to reference those properties by key in your `.yaml` file using `${}` syntax:
+
+```yaml
+  - id: "zkHosts"
+    className: "storm.kafka.ZkHosts"
+    constructorArgs:
+      - "${kafka.zookeeper.hosts}"
+```
+
+In this case, Flux would replace `${kafka.zookeeper.hosts}` with `localhost:2181` before parsing the YAML contents.
+
+### Environment Variable Substitution/Filtering
+Flux also allows environment variable substitution. For example, if an environment variable named `ZK_HOSTS` if defined,
+you can reference it in a Flux YAML file with the following syntax:
+
+```
+${ENV-ZK_HOSTS}
+```
+
+## Components
+Components are essentially named object instances that are made available as configuration options for spouts and
+bolts. If you are familiar with the Spring framework, components are roughly analagous to Spring beans.
+
+Every component is identified, at a minimum, by a unique identifier (String) and a class name (String). For example,
+the following will make an instance of the `storm.kafka.StringScheme` class available as a reference under the key
+`"stringScheme"` . This assumes the `storm.kafka.StringScheme` has a default constructor.
+
+```yaml
+components:
+  - id: "stringScheme"
+    className: "storm.kafka.StringScheme"
+```
+
+### Contructor Arguments, References, Properties and Configuration Methods
+
+####Constructor Arguments
+Arguments to a class constructor can be configured by adding a `contructorArgs` element to a components.
+`constructorArgs` is a list of objects that will be passed to the class' constructor. The following example creates an
+object by calling the constructor that takes a single string as an argument:
+
+```yaml
+  - id: "zkHosts"
+    className: "storm.kafka.ZkHosts"
+    constructorArgs:
+      - "localhost:2181"
+```
+
+####References
+Each component instance is identified by a unique id that allows it to be used/reused by other components. To
+reference an existing component, you specify the id of the component with the `ref` tag.
+
+In the following example, a component with the id `"stringScheme"` is created, and later referenced, as a an argument
+to another component's constructor:
+
+```yaml
+components:
+  - id: "stringScheme"
+    className: "storm.kafka.StringScheme"
+
+  - id: "stringMultiScheme"
+    className: "backtype.storm.spout.SchemeAsMultiScheme"
+    constructorArgs:
+      - ref: "stringScheme" # component with id "stringScheme" must be declared above.
+```
+**N.B.:** References can only be used after (below) the object they point to has been declared.
+
+####Properties
+In addition to calling constructors with different arguments, Flux also allows you to configure components using
+JavaBean-like setter methods and fields declared as `public`:
+
+```yaml
+  - id: "spoutConfig"
+    className: "storm.kafka.SpoutConfig"
+    constructorArgs:
+      # brokerHosts
+      - ref: "zkHosts"
+      # topic
+      - "myKafkaTopic"
+      # zkRoot
+      - "/kafkaSpout"
+      # id
+      - "myId"
+    properties:
+      - name: "forceFromStart"
+        value: true
+      - name: "scheme"
+        ref: "stringMultiScheme"
+```
+
+In the example above, the `properties` declaration will cause Flux to look for a public method in the `SpoutConfig` with
+the signature `setForceFromStart(boolean b)` and attempt to invoke it. If a setter method is not found, Flux will then
+look for a public instance variable with the name `forceFromStart` and attempt to set its value.
+
+References may also be used as property values.
+
+####Configuration Methods
+Conceptually, configuration methods are similar to Properties and Constructor Args -- they allow you to invoke an
+arbitrary method on an object after it is constructed. Configuration methods are useful for working with classes that
+don't expose JavaBean methods or have constructors that can fully configure the object. Common examples include classes
+that use the builder pattern for configuration/composition.
+
+The following YAML example creates a bolt and configures it by calling several methods:
+
+```yaml
+bolts:
+  - id: "bolt-1"
+    className: "org.apache.storm.flux.test.TestBolt"
+    parallelism: 1
+    configMethods:
+      - name: "withFoo"
+        args:
+          - "foo"
+      - name: "withBar"
+        args:
+          - "bar"
+      - name: "withFooBar"
+        args:
+          - "foo"
+          - "bar"
+```
+
+The signatures of the corresponding methods are as follows:
+
+```java
+    public void withFoo(String foo);
+    public void withBar(String bar);
+    public void withFooBar(String foo, String bar);
+```
+
+Arguments passed to configuration methods work much the same way as constructor arguments, and support references as
+well.
+
+### Using Java `enum`s in Contructor Arguments, References, Properties and Configuration Methods
+You can easily use Java `enum` values as arguments in a Flux YAML file, simply by referencing the name of the `enum`.
+
+For example, [Storm's HDFS module]() includes the following `enum` definition (simplified for brevity):
+
+```java
+public static enum Units {
+    KB, MB, GB, TB
+}
+```
+
+And the `org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy` class has the following constructor:
+
+```java
+public FileSizeRotationPolicy(float count, Units units)
+
+```
+The following Flux `component` definition could be used to call the constructor:
+
+```yaml
+  - id: "rotationPolicy"
+    className: "org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy"
+    constructorArgs:
+      - 5.0
+      - MB
+```
+
+The above definition is functionally equivalent to the following Java code:
+
+```java
+// rotate files when they reach 5MB
+FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
+```
+
+## Topology Config
+The `config` section is simply a map of Storm topology configuration parameters that will be passed to the
+`backtype.storm.StormSubmitter` as an instance of the `backtype.storm.Config` class:
+
+```yaml
+config:
+  topology.workers: 4
+  topology.max.spout.pending: 1000
+  topology.message.timeout.secs: 30
+```
+
+# Existing Topologies
+If you have existing Storm topologies, you can still use Flux to deploy/run/test them. This feature allows you to
+leverage Flux Constructor Arguments, References, Properties, and Topology Config declarations for existing topology
+classes.
+
+The easiest way to use an existing topology class is to define
+a `getTopology()` instance method with one of the following signatures:
+
+```java
+public StormTopology getTopology(Map<String, Object> config)
+```
+or:
+
+```java
+public StormTopology getTopology(Config config)
+```
+
+You could then use the following YAML to configure your topology:
+
+```yaml
+name: "existing-topology"
+topologySource:
+  className: "org.apache.storm.flux.test.SimpleTopology"
+```
+
+If the class you would like to use as a topology source has a different method name (i.e. not `getTopology`), you can
+override it:
+
+```yaml
+name: "existing-topology"
+topologySource:
+  className: "org.apache.storm.flux.test.SimpleTopology"
+  methodName: "getTopologyWithDifferentMethodName"
+```
+
+__N.B.:__ The specified method must accept a single argument of type `java.util.Map<String, Object>` or
+`backtype.storm.Config`, and return a `backtype.storm.generated.StormTopology` object.
+
+# YAML DSL
+## Spouts and Bolts
+Spout and Bolts are configured in their own respective section of the YAML configuration. Spout and Bolt definitions
+are extensions to the `component` definition that add a `parallelism` parameter that sets the parallelism  for a
+component when the topology is deployed.
+
+Because spout and bolt definitions extend `component` they support constructor arguments, references, and properties as
+well.
+
+Shell spout example:
+
+```yaml
+spouts:
+  - id: "sentence-spout"
+    className: "org.apache.storm.flux.spouts.GenericShellSpout"
+    # shell spout constructor takes 2 arguments: String[], String[]
+    constructorArgs:
+      # command line
+      - ["node", "randomsentence.js"]
+      # output fields
+      - ["word"]
+    parallelism: 1
+```
+
+Kafka spout example:
+
+```yaml
+components:
+  - id: "stringScheme"
+    className: "storm.kafka.StringScheme"
+
+  - id: "stringMultiScheme"
+    className: "backtype.storm.spout.SchemeAsMultiScheme"
+    constructorArgs:
+      - ref: "stringScheme"
+
+  - id: "zkHosts"
+    className: "storm.kafka.ZkHosts"
+    constructorArgs:
+      - "localhost:2181"
+
+# Alternative kafka config
+#  - id: "kafkaConfig"
+#    className: "storm.kafka.KafkaConfig"
+#    constructorArgs:
+#      # brokerHosts
+#      - ref: "zkHosts"
+#      # topic
+#      - "myKafkaTopic"
+#      # clientId (optional)
+#      - "myKafkaClientId"
+
+  - id: "spoutConfig"
+    className: "storm.kafka.SpoutConfig"
+    constructorArgs:
+      # brokerHosts
+      - ref: "zkHosts"
+      # topic
+      - "myKafkaTopic"
+      # zkRoot
+      - "/kafkaSpout"
+      # id
+      - "myId"
+    properties:
+      - name: "forceFromStart"
+        value: true
+      - name: "scheme"
+        ref: "stringMultiScheme"
+
+config:
+  topology.workers: 1
+
+# spout definitions
+spouts:
+  - id: "kafka-spout"
+    className: "storm.kafka.KafkaSpout"
+    constructorArgs:
+      - ref: "spoutConfig"
+
+```
+
+Bolt Examples:
+
+```yaml
+# bolt definitions
+bolts:
+  - id: "splitsentence"
+    className: "org.apache.storm.flux.bolts.GenericShellBolt"
+    constructorArgs:
+      # command line
+      - ["python", "splitsentence.py"]
+      # output fields
+      - ["word"]
+    parallelism: 1
+    # ...
+
+  - id: "log"
+    className: "org.apache.storm.flux.wrappers.bolts.LogInfoBolt"
+    parallelism: 1
+    # ...
+
+  - id: "count"
+    className: "backtype.storm.testing.TestWordCounter"
+    parallelism: 1
+    # ...
+```
+## Streams and Stream Groupings
+Streams in Flux are represented as a list of connections (Graph edges, data flow, etc.) between the Spouts and Bolts in
+a topology, with an associated Grouping definition.
+
+A Stream definition has the following properties:
+
+**`name`:** A name for the connection (optional, currently unused)
+
+**`from`:** The `id` of a Spout or Bolt that is the source (publisher)
+
+**`to`:** The `id` of a Spout or Bolt that is the destination (subscriber)
+
+**`grouping`:** The stream grouping definition for the Stream
+
+A Grouping definition has the following properties:
+
+**`type`:** The type of grouping. One of `ALL`,`CUSTOM`,`DIRECT`,`SHUFFLE`,`LOCAL_OR_SHUFFLE`,`FIELDS`,`GLOBAL`, or `NONE`.
+
+**`streamId`:** The Storm stream ID (Optional. If unspecified will use the default stream)
+
+**`args`:** For the `FIELDS` grouping, a list of field names.
+
+**`customClass`** For the `CUSTOM` grouping, a definition of custom grouping class instance
+
+The `streams` definition example below sets up a topology with the following wiring:
+
+```
+    kafka-spout --> splitsentence --> count --> log
+```
+
+
+```yaml
+#stream definitions
+# stream definitions define connections between spouts and bolts.
+# note that such connections can be cyclical
+# custom stream groupings are also supported
+
+streams:
+  - name: "kafka --> split" # name isn't used (placeholder for logging, UI, etc.)
+    from: "kafka-spout"
+    to: "splitsentence"
+    grouping:
+      type: SHUFFLE
+
+  - name: "split --> count"
+    from: "splitsentence"
+    to: "count"
+    grouping:
+      type: FIELDS
+      args: ["word"]
+
+  - name: "count --> log"
+    from: "count"
+    to: "log"
+    grouping:
+      type: SHUFFLE
+```
+
+### Custom Stream Groupings
+Custom stream groupings are defined by setting the grouping type to `CUSTOM` and defining a `customClass` parameter
+that tells Flux how to instantiate the custom class. The `customClass` definition extends `component`, so it supports
+constructor arguments, references, and properties as well.
+
+The example below creates a Stream with an instance of the `backtype.storm.testing.NGrouping` custom stream grouping
+class.
+
+```yaml
+  - name: "bolt-1 --> bolt2"
+    from: "bolt-1"
+    to: "bolt-2"
+    grouping:
+      type: CUSTOM
+      customClass:
+        className: "backtype.storm.testing.NGrouping"
+        constructorArgs:
+          - 1
+```
+
+## Includes and Overrides
+Flux allows you to include the contents of other YAML files, and have them treated as though they were defined in the
+same file. Includes may be either files, or classpath resources.
+
+Includes are specified as a list of maps:
+
+```yaml
+includes:
+  - resource: false
+    file: "src/test/resources/configs/shell_test.yaml"
+    override: false
+```
+
+If the `resource` property is set to `true`, the include will be loaded as a classpath resource from the value of the
+`file` attribute, otherwise it will be treated as a regular file.
+
+The `override` property controls how includes affect the values defined in the current file. If `override` is set to
+`true`, values in the included file will replace values in the current file being parsed. If `override` is set to
+`false`, values in the current file being parsed will take precedence, and the parser will refuse to replace them.
+
+**N.B.:** Includes are not yet recursive. Includes from included files will be ignored.
+
+
+## Basic Word Count Example
+
+This example uses a spout implemented in JavaScript, a bolt implemented in Python, and a bolt implemented in Java
+
+Topology YAML config:
+
+```yaml
+---
+name: "shell-topology"
+config:
+  topology.workers: 1
+
+# spout definitions
+spouts:
+  - id: "sentence-spout"
+    className: "org.apache.storm.flux.spouts.GenericShellSpout"
+    # shell spout constructor takes 2 arguments: String[], String[]
+    constructorArgs:
+      # command line
+      - ["node", "randomsentence.js"]
+      # output fields
+      - ["word"]
+    parallelism: 1
+
+# bolt definitions
+bolts:
+  - id: "splitsentence"
+    className: "org.apache.storm.flux.bolts.GenericShellBolt"
+    constructorArgs:
+      # command line
+      - ["python", "splitsentence.py"]
+      # output fields
+      - ["word"]
+    parallelism: 1
+
+  - id: "log"
+    className: "org.apache.storm.flux.wrappers.bolts.LogInfoBolt"
+    parallelism: 1
+
+  - id: "count"
+    className: "backtype.storm.testing.TestWordCounter"
+    parallelism: 1
+
+#stream definitions
+# stream definitions define connections between spouts and bolts.
+# note that such connections can be cyclical
+# custom stream groupings are also supported
+
+streams:
+  - name: "spout --> split" # name isn't used (placeholder for logging, UI, etc.)
+    from: "sentence-spout"
+    to: "splitsentence"
+    grouping:
+      type: SHUFFLE
+
+  - name: "split --> count"
+    from: "splitsentence"
+    to: "count"
+    grouping:
+      type: FIELDS
+      args: ["word"]
+
+  - name: "count --> log"
+    from: "count"
+    to: "log"
+    grouping:
+      type: SHUFFLE
+```
+
+
+## Micro-Batching (Trident) API Support
+Currenty, the Flux YAML DSL only supports the Core Storm API, but support for Storm's micro-batching API is planned.
+
+To use Flux with a Trident topology, define a topology getter method and reference it in your YAML config:
+
+```yaml
+name: "my-trident-topology"
+
+config:
+  topology.workers: 1
+
+topologySource:
+  className: "org.apache.storm.flux.test.TridentTopologySource"
+  # Flux will look for "getTopology", this will override that.
+  methodName: "getTopologyWithDifferentMethodName"
+```

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/images/topology.png
----------------------------------------------------------------------
diff --git a/docs/images/topology.png b/docs/images/topology.png
index 3b29bfb..a45c25c 100644
Binary files a/docs/images/topology.png and b/docs/images/topology.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/index.md
----------------------------------------------------------------------
diff --git a/docs/index.md b/docs/index.md
index 55bf828..6a83b7a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,69 +1,65 @@
 ---
+title: Documentation
 layout: documentation
+documentation: true
 ---
-Storm is a distributed realtime computation system. Similar to how Hadoop provides a set of general primitives for doing batch processing, Storm provides a set of general primitives for doing realtime computation. Storm is simple, can be used with any programming language, [is used by many companies](/documentation/Powered-By.html), and is a lot of fun to use!
+### Basics of Storm
 
-### Read these first
-
-* [Rationale](Rationale.html)
-* [Tutorial](Tutorial.html)
-* [Setting up development environment](Setting-up-development-environment.html)
-* [Creating a new Storm project](Creating-a-new-Storm-project.html)
-
-### Documentation
-
-* [Manual](Documentation.html)
 * [Javadoc](javadocs/index.html)
-* [FAQ](FAQ.html)
-* [SECURITY](SECURITY.html)
+* [Concepts](Concepts.html)
+* [Configuration](Configuration.html)
+* [Guaranteeing message processing](Guaranteeing-message-processing.html)
+* [Daemon Fault Tolerance](Daemon-Fault-Tolerance.html)
+* [Command line client](Command-line-client.html)
 * [REST API](STORM-UI-REST-API.html)
+* [Understanding the parallelism of a Storm topology](Understanding-the-parallelism-of-a-Storm-topology.html)
+* [FAQ](FAQ.html)
 
-### Getting help
-
-__NOTE:__ The google groups account storm-user@googlegroups.com is now officially deprecated in favor of the Apache-hosted user/dev mailing lists.
-
-#### Storm Users
-Storm users should send messages and subscribe to [user@storm.apache.org](mailto:user@storm.apache.org).
-
-You can subscribe to this list by sending an email to [user-subscribe@storm.apache.org](mailto:user-subscribe@storm.apache.org). Likewise, you can cancel a subscription by sending an email to [user-unsubscribe@storm.apache.org](mailto:user-unsubscribe@storm.apache.org).
-
-You can view the archives of the mailing list [here](http://mail-archives.apache.org/mod_mbox/storm-user/).
-
-#### Storm Developers
-Storm developers should send messages and subscribe to [dev@storm.apache.org](mailto:dev@storm.apache.org).
-
-You can subscribe to this list by sending an email to [dev-subscribe@storm.apache.org](mailto:dev-subscribe@storm.apache.org). Likewise, you can cancel a subscription by sending an email to [dev-unsubscribe@storm.apache.org](mailto:dev-unsubscribe@storm.apache.org).
-
-You can view the archives of the mailing list [here](http://mail-archives.apache.org/mod_mbox/storm-dev/).
-
-#### Which list should I send/subscribe to?
-If you are using a pre-built binary distribution of Storm, then chances are you should send questions, comments, storm-related announcements, etc. to [user@storm.apache.org](user@storm.apache.org). 
-
-If you are building storm from source, developing new features, or otherwise hacking storm source code, then [dev@storm.apache.org](dev@storm.apache.org) is more appropriate. 
-
-#### What will happen with storm-user@googlegroups.com?
-All existing messages will remain archived there, and can be accessed/searched [here](https://groups.google.com/forum/#!forum/storm-user).
-
-New messages sent to storm-user@googlegroups.com will either be rejected/bounced or replied to with a message to direct the email to the appropriate Apache-hosted group.
-
-#### IRC
-You can also come to the #storm-user room on [freenode](http://freenode.net/). You can usually find a Storm developer there to help you out.
-
-
-
-### Related projects
-
-* [storm-contrib](https://github.com/nathanmarz/storm-contrib)
-* [storm-deploy](http://github.com/nathanmarz/storm-deploy): One click deploys for Storm clusters on AWS
-* [Spout implementations](Spout-implementations.html)
-* [DSLs and multilang adapters](DSLs-and-multilang-adapters.html)
-* [Serializers](Serializers.html)
-
-### Contributing to Storm
-
-* [Contributing to Storm](Contributing-to-Storm.html)
-* [Project ideas](Project-ideas.html)
-
-### Powered by Storm
+### Trident
+
+Trident is an alternative interface to Storm. It provides exactly-once processing, "transactional" datastore persistence, and a set of common stream analytics operations.
+
+* [Trident Tutorial](Trident-tutorial.html)     -- basic concepts and walkthrough
+* [Trident API Overview](Trident-API-Overview.html) -- operations for transforming and orchestrating data
+* [Trident State](Trident-state.html)        -- exactly-once processing and fast, persistent aggregation
+* [Trident spouts](Trident-spouts.html)       -- transactional and non-transactional data intake
+
+### Setup and deploying
+
+* [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html)
+* [Local mode](Local-mode.html)
+* [Troubleshooting](Troubleshooting.html)
+* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html)
+* [Building Storm](Maven.html) with Maven
+* [Setting up a Secure Cluster](SECURITY.html)
+
+### Intermediate
+
+* [Serialization](Serialization.html)
+* [Common patterns](Common-patterns.html)
+* [Clojure DSL](Clojure-DSL.html)
+* [Using non-JVM languages with Storm](Using-non-JVM-languages-with-Storm.html)
+* [Distributed RPC](Distributed-RPC.html)
+* [Transactional topologies](Transactional-topologies.html)
+* [Direct groupings](Direct-groupings.html)
+* [Hooks](Hooks.html)
+* [Metrics](Metrics.html)
+* [Lifecycle of a trident tuple]()
+
+### Integration With External Systems, and Other Libraries
+* [Flux Data Driven Topology Builder](flux.html)
+* [Event Hubs Intergration](storm-eventhubs.html)
+* [Apache HBase Integration](storm-hbase.html)
+* [Apache HDFS Integration](storm-hdfs.html)
+* [Apache Hive Integration](storm-hive.html)
+* [JDBC Integration](storm-jdbc.html)
+* [Apache Kafka Integration](storm-kafka.html)
+* [REDIS Integration](storm-redis.html) 
+* [Kestrel and Storm](Kestrel-and-Storm.html)
+
+### Advanced
+
+* [Defining a non-JVM language DSL for Storm](Defining-a-non-jvm-language-dsl-for-storm.html)
+* [Multilang protocol](Multilang-protocol.html) (how to provide support for another language)
+* [Implementation docs](Implementation-docs.html)
 
-[Companies and projects powered by Storm](Powered-By.html)

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/storm-eventhubs.md
----------------------------------------------------------------------
diff --git a/docs/storm-eventhubs.md b/docs/storm-eventhubs.md
new file mode 100644
index 0000000..4af8c43
--- /dev/null
+++ b/docs/storm-eventhubs.md
@@ -0,0 +1,40 @@
+---
+title: Azue Event Hubs Integration
+layout: documentation
+documentation: true
+---
+
+Storm spout and bolt implementation for Microsoft Azure Eventhubs
+
+### build ###
+	mvn clean package
+
+### run sample topology ###
+To run the sample topology, you need to modify the config.properties file with
+the eventhubs configurations. Here is an example:
+
+	eventhubspout.username = [username: policy name in EventHubs Portal]
+	eventhubspout.password = [password: shared access key in EventHubs Portal]
+	eventhubspout.namespace = [namespace]
+	eventhubspout.entitypath = [entitypath]
+	eventhubspout.partitions.count = [partitioncount]
+
+	# if not provided, will use storm's zookeeper settings
+	# zookeeper.connectionstring=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181
+
+	eventhubspout.checkpoint.interval = 10
+	eventhub.receiver.credits = 1024
+
+Then you can use storm.cmd to submit the sample topology:
+	storm jar {jarfile} com.microsoft.eventhubs.samples.EventCount {topologyname} {spoutconffile}
+	where the {jarfile} should be: eventhubs-storm-spout-{version}-jar-with-dependencies.jar
+
+### Run EventHubSendClient ###
+We have included a simple EventHubs send client for testing purpose. You can run the client like this:
+	java -cp .\target\eventhubs-storm-spout-{version}-jar-with-dependencies.jar com.microsoft.eventhubs.client.EventHubSendClient
+ 	[username] [password] [entityPath] [partitionId] [messageSize] [messageCount]
+If you want to send messages to all partitions, use "-1" as partitionId.
+
+### Windows Azure Eventhubs ###
+	http://azure.microsoft.com/en-us/services/event-hubs/
+

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/storm-hbase.md
----------------------------------------------------------------------
diff --git a/docs/storm-hbase.md b/docs/storm-hbase.md
new file mode 100644
index 0000000..7f4fb62
--- /dev/null
+++ b/docs/storm-hbase.md
@@ -0,0 +1,241 @@
+---
+title: Storm HBase Integration
+layout: documentation
+documentation: true
+---
+
+Storm/Trident integration for [Apache HBase](https://hbase.apache.org)
+
+## Usage
+The main API for interacting with HBase is the `org.apache.storm.hbase.bolt.mapper.HBaseMapper`
+interface:
+
+```java
+public interface HBaseMapper extends Serializable {
+    byte[] rowKey(Tuple tuple);
+
+    ColumnList columns(Tuple tuple);
+}
+```
+
+The `rowKey()` method is straightforward: given a Storm tuple, return a byte array representing the
+row key.
+
+The `columns()` method defines what will be written to an HBase row. The `ColumnList` class allows you
+to add both standard HBase columns as well as HBase counter columns.
+
+To add a standard column, use one of the `addColumn()` methods:
+
+```java
+ColumnList cols = new ColumnList();
+cols.addColumn(this.columnFamily, field.getBytes(), toBytes(tuple.getValueByField(field)));
+```
+
+To add a counter column, use one of the `addCounter()` methods:
+
+```java
+ColumnList cols = new ColumnList();
+cols.addCounter(this.columnFamily, field.getBytes(), toLong(tuple.getValueByField(field)));
+```
+
+When the remote HBase is security enabled, a kerberos keytab and the corresponding principal name need to be
+provided for the storm-hbase connector. Specifically, the Config object passed into the topology should contain
+{(“storm.keytab.file”, “$keytab”), ("storm.kerberos.principal", “$principal”)}. Example:
+
+```java
+Config config = new Config();
+...
+config.put("storm.keytab.file", "$keytab");
+config.put("storm.kerberos.principal", "$principle");
+StormSubmitter.submitTopology("$topologyName", config, builder.createTopology());
+```
+
+##Working with Secure HBASE using delegation tokens.
+If your topology is going to interact with secure HBase, your bolts/states needs to be authenticated by HBase. 
+The approach described above requires that all potential worker hosts have "storm.keytab.file" on them. If you have 
+multiple topologies on a cluster , each with different hbase user, you will have to create multiple keytabs and distribute
+it to all workers. Instead of doing that you could use the following approach:
+
+Your administrator can configure nimbus to automatically get delegation tokens on behalf of the topology submitter user.
+The nimbus need to start with following configurations:
+
+nimbus.autocredential.plugins.classes : ["org.apache.storm.hbase.security.AutoHBase"] 
+nimbus.credential.renewers.classes : ["org.apache.storm.hbase.security.AutoHBase"] 
+hbase.keytab.file: "/path/to/keytab/on/nimbus" (This is the keytab of hbase super user that can impersonate other users.)
+hbase.kerberos.principal: "superuser@EXAMPLE.com"
+nimbus.credential.renewers.freq.secs : 518400 (6 days, hbase tokens by default expire every 7 days and can not be renewed, 
+if you have custom settings for hbase.auth.token.max.lifetime in hbase-site.xml than you should ensure this value is 
+atleast 1 hour less then that.)
+
+Your topology configuration should have:
+topology.auto-credentials :["org.apache.storm.hbase.security.AutoHBase"] 
+
+If nimbus did not have the above configuration you need to add it and then restart it. Ensure the hbase configuration 
+files(core-site.xml,hdfs-site.xml and hbase-site.xml) and the storm-hbase jar with all the dependencies is present in nimbus's classpath. 
+Nimbus will use the keytab and principal specified in the config to authenticate with HBase. From then on for every
+topology submission, nimbus will impersonate the topology submitter user and acquire delegation tokens on behalf of the
+topology submitter user. If topology was started with topology.auto-credentials set to AutoHBase, nimbus will push the
+delegation tokens to all the workers for your topology and the hbase bolt/state will authenticate with these tokens.
+
+As nimbus is impersonating topology submitter user, you need to ensure the user specified in storm.kerberos.principal 
+has permissions to acquire tokens on behalf of other users. To achieve this you need to follow configuration directions 
+listed on this link
+
+http://hbase.apache.org/book/security.html#security.rest.gateway
+
+You can read about setting up secure HBase here:http://hbase.apache.org/book/security.html.
+
+### SimpleHBaseMapper
+`storm-hbase` includes a general purpose `HBaseMapper` implementation called `SimpleHBaseMapper` that can map Storm
+tuples to both regular HBase columns as well as counter columns.
+
+To use `SimpleHBaseMapper`, you simply tell it which fields to map to which types of columns.
+
+The following code create a `SimpleHBaseMapper` instance that:
+
+1. Uses the `word` tuple value as a row key.
+2. Adds a standard HBase column for the tuple field `word`.
+3. Adds an HBase counter column for the tuple field `count`.
+4. Writes values to the `cf` column family.
+
+```java
+SimpleHBaseMapper mapper = new SimpleHBaseMapper() 
+        .withRowKeyField("word")
+        .withColumnFields(new Fields("word"))
+        .withCounterFields(new Fields("count"))
+        .withColumnFamily("cf");
+```
+### HBaseBolt
+To use the `HBaseBolt`, construct it with the name of the table to write to, an a `HBaseMapper` implementation:
+
+ ```java
+HBaseBolt hbase = new HBaseBolt("WordCount", mapper);
+ ```
+
+The `HBaseBolt` will delegate to the `mapper` instance to figure out how to persist tuple data to HBase.
+
+###HBaseValueMapper
+This class allows you to transform the HBase lookup result into storm Values that will be emitted by the `HBaseLookupBolt`.
+
+```java
+public interface HBaseValueMapper extends Serializable {
+    public List<Values> toTuples(Result result) throws Exception;
+    void declareOutputFields(OutputFieldsDeclarer declarer);
+}
+```
+
+The `toTuples` method takes in a HBase `Result` instance and expects a List of `Values` instant. 
+Each of the value returned by this function will be emitted by the `HBaseLookupBolt`.
+
+The `declareOutputFields` should be used to declare the outputFields of the `HBaseLookupBolt`.
+
+There is an example implementation in `src/test/java` directory.
+
+###HBaseProjectionCriteria
+This class allows you to specify the projection criteria for your HBase Get function. This is optional parameter
+for the lookupBolt and if you do not specify this instance all the columns will be returned by `HBaseLookupBolt`.
+
+```java
+public class HBaseProjectionCriteria implements Serializable {
+    public HBaseProjectionCriteria addColumnFamily(String columnFamily);
+    public HBaseProjectionCriteria addColumn(ColumnMetaData column);
+```    
+`addColumnFamily` takes in columnFamily. Setting this parameter means all columns for this family will be included
+ in the projection.
+ 
+`addColumn` takes in a columnMetaData instance. Setting this parameter means only this column from the column familty 
+ will be part of your projection.
+The following code creates a projectionCriteria which specifies a projection criteria that:
+
+1. includes count column from column family cf.
+2. includes all columns from column family cf2.
+
+```java
+HBaseProjectionCriteria projectionCriteria = new HBaseProjectionCriteria()
+    .addColumn(new HBaseProjectionCriteria.ColumnMetaData("cf", "count"))
+    .addColumnFamily("cf2");
+```
+
+###HBaseLookupBolt
+To use the `HBaseLookupBolt`, Construct it with the name of the table to write to, an implementation of `HBaseMapper` 
+and an implementation of `HBaseRowToStormValueMapper`. You can optionally specify a `HBaseProjectionCriteria`. 
+
+The `HBaseLookupBolt` will use the mapper to get rowKey to lookup for. It will use the `HBaseProjectionCriteria` to 
+figure out which columns to include in the result and it will leverage the `HBaseRowToStormValueMapper` to get the 
+values to be emitted by the bolt.
+
+You can look at an example topology LookupWordCount.java under `src/test/java`.
+## Example: Persistent Word Count
+A runnable example can be found in the `src/test/java` directory.
+
+### Setup
+The following steps assume you are running HBase locally, or there is an `hbase-site.xml` on the
+classpath pointing to your HBase cluster.
+
+Use the `hbase shell` command to create the schema:
+
+```
+> create 'WordCount', 'cf'
+```
+
+### Execution
+Run the `org.apache.storm.hbase.topology.PersistenWordCount` class (it will run the topology for 10 seconds, then exit).
+
+After (or while) the word count topology is running, run the `org.apache.storm.hbase.topology.WordCountClient` class
+to view the counter values stored in HBase. You should see something like to following:
+
+```
+Word: 'apple', Count: 6867
+Word: 'orange', Count: 6645
+Word: 'pineapple', Count: 6954
+Word: 'banana', Count: 6787
+Word: 'watermelon', Count: 6806
+```
+
+For reference, the sample topology is listed below:
+
+```java
+public class PersistentWordCount {
+    private static final String WORD_SPOUT = "WORD_SPOUT";
+    private static final String COUNT_BOLT = "COUNT_BOLT";
+    private static final String HBASE_BOLT = "HBASE_BOLT";
+
+
+    public static void main(String[] args) throws Exception {
+        Config config = new Config();
+
+        WordSpout spout = new WordSpout();
+        WordCounter bolt = new WordCounter();
+
+        SimpleHBaseMapper mapper = new SimpleHBaseMapper()
+                .withRowKeyField("word")
+                .withColumnFields(new Fields("word"))
+                .withCounterFields(new Fields("count"))
+                .withColumnFamily("cf");
+
+        HBaseBolt hbase = new HBaseBolt("WordCount", mapper);
+
+
+        // wordSpout ==> countBolt ==> HBaseBolt
+        TopologyBuilder builder = new TopologyBuilder();
+
+        builder.setSpout(WORD_SPOUT, spout, 1);
+        builder.setBolt(COUNT_BOLT, bolt, 1).shuffleGrouping(WORD_SPOUT);
+        builder.setBolt(HBASE_BOLT, hbase, 1).fieldsGrouping(COUNT_BOLT, new Fields("word"));
+
+
+        if (args.length == 0) {
+            LocalCluster cluster = new LocalCluster();
+            cluster.submitTopology("test", config, builder.createTopology());
+            Thread.sleep(10000);
+            cluster.killTopology("test");
+            cluster.shutdown();
+            System.exit(0);
+        } else {
+            config.setNumWorkers(3);
+            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
+        }
+    }
+}
+```
+


[05/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/css/bootstrap.css.map
----------------------------------------------------------------------
diff --git a/docs/assets/css/bootstrap.css.map b/docs/assets/css/bootstrap.css.map
index a02f6ba..9f60ed2 100644
--- a/docs/assets/css/bootstrap.css.map
+++ b/docs/assets/css/bootstrap.css.map
@@ -1 +1 @@
-{"version":3,"sources":["bootstrap.css","less/normalize.less","less/print.less","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","less/pager.less","less/labe
 ls.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":"AAAA,6DAA4D;ACQ5D;EACE,yBAAA;EACA,4BAAA;EACA,gCAAA;EDND;ACaD;EACE,WAAA;EDXD;ACwBD;;;;;;;;;;;;;EAaE,gBAAA;EDtBD;AC8BD;;;;EAIE,uBAAA;EACA,0BAAA;ED5BD;ACoCD;EACE,eAAA;EACA,WAAA;EDlCD;AC0CD;;EAEE,eAAA;EDxCD;ACkDD;EACE,+BAAA;EDhDD;ACuDD;;EAEE,YAAA;EDrDD;AC+DD;EACE,2BAAA;ED7DD;ACoED;;EAEE,mBAAA;EDlED;ACyED;EACE,oBAAA;EDvE
 D;AC+ED;EACE,gBAAA;EACA,kBAAA;ED7ED;ACoFD;EACE,kBAAA;EACA,aAAA;EDlFD;ACyFD;EACE,gBAAA;EDvFD;AC8FD;;EAEE,gBAAA;EACA,gBAAA;EACA,oBAAA;EACA,0BAAA;ED5FD;AC+FD;EACE,aAAA;ED7FD;ACgGD;EACE,iBAAA;ED9FD;ACwGD;EACE,WAAA;EDtGD;AC6GD;EACE,kBAAA;ED3GD;ACqHD;EACE,kBAAA;EDnHD;AC0HD;EACE,8BAAA;EACA,iCAAA;UAAA,yBAAA;EACA,WAAA;EDxHD;AC+HD;EACE,gBAAA;ED7HD;ACoID;;;;EAIE,mCAAA;EACA,gBAAA;EDlID;ACoJD;;;;;EAKE,gBAAA;EACA,eAAA;EACA,WAAA;EDlJD;ACyJD;EACE,mBAAA;EDvJD;ACiKD;;EAEE,sBAAA;ED/JD;AC0KD;;;;EAIE,4BAAA;EACA,iBAAA;EDxKD;AC+KD;;EAEE,iBAAA;ED7KD;ACoLD;;EAEE,WAAA;EACA,YAAA;EDlLD;AC0LD;EACE,qBAAA;EDxLD;ACmMD;;EAEE,gCAAA;KAAA,6BAAA;UAAA,wBAAA;EACA,YAAA;EDjMD;AC0MD;;EAEE,cAAA;EDxMD;ACiND;EACE,+BAAA;EACA,8BAAA;EACA,iCAAA;EACA,yBAAA;ED/MD;ACwND;;EAEE,0BAAA;EDtND;AC6ND;EACE,2BAAA;EACA,eAAA;EACA,gCAAA;ED3ND;ACmOD;EACE,WAAA;EACA,YAAA;EDjOD;ACwOD;EACE,gBAAA;EDtOD;AC8OD;EACE,mBAAA;ED5OD;ACsPD;EACE,2BAAA;EACA,mBAAA;EDpPD;ACuPD;;EAEE,YAAA;EDrPD;AACD,sFAAqF;AE1ErF;EAnGI;;;IAGI,oCAAA;IACA,wBAAA;IACA,qCAAA;YAAA,6BAAA;
 IACA,8BAAA;IFgLL;EE7KC;;IAEI,4BAAA;IF+KL;EE5KC;IACI,8BAAA;IF8KL;EE3KC;IACI,+BAAA;IF6KL;EExKC;;IAEI,aAAA;IF0KL;EEvKC;;IAEI,wBAAA;IACA,0BAAA;IFyKL;EEtKC;IACI,6BAAA;IFwKL;EErKC;;IAEI,0BAAA;IFuKL;EEpKC;IACI,4BAAA;IFsKL;EEnKC;;;IAGI,YAAA;IACA,WAAA;IFqKL;EElKC;;IAEI,yBAAA;IFoKL;EE7JC;IACI,6BAAA;IF+JL;EE3JC;IACI,eAAA;IF6JL;EE3JC;;IAGQ,mCAAA;IF4JT;EEzJC;IACI,wBAAA;IF2JL;EExJC;IACI,sCAAA;IF0JL;EE3JC;;IAKQ,mCAAA;IF0JT;EEvJC;;IAGQ,mCAAA;IFwJT;EACF;AGpPD;EACE,qCAAA;EACA,uDAAA;EACA,6TAAA;EHsPD;AG/OD;EACE,oBAAA;EACA,UAAA;EACA,uBAAA;EACA,qCAAA;EACA,oBAAA;EACA,qBAAA;EACA,gBAAA;EACA,qCAAA;EACA,oCAAA;EHiPD;AG7OmC;EAAW,gBAAA;EHgP9C;AG/OmC;EAAW,gBAAA;EHkP9C;AGhPmC;;EAAW,kBAAA;EHoP9C;AGnPmC;EAAW,kBAAA;EHsP9C;AGrPmC;EAAW,kBAAA;EHwP9C;AGvPmC;EAAW,kBAAA;EH0P9C;AGzPmC;EAAW,kBAAA;EH4P9C;AG3PmC;EAAW,kBAAA;EH8P9C;AG7PmC;EAAW,kBAAA;EHgQ9C;AG/PmC;EAAW,kBAAA;EHkQ9C;AGjQmC;EAAW,kBAAA;EHoQ9C;AGnQmC;EAAW,kBAAA;EHsQ9C;AGrQmC;EAAW,kBAAA;EHwQ9C;AGvQmC;EAAW,kBAAA;EH0Q9C;AGzQmC;EAAW,kBAAA;EH4Q9C;AG3QmC;EAAW,kBAAA;EH8Q9C;
 AG7QmC;EAAW,kBAAA;EHgR9C;AG/QmC;EAAW,kBAAA;EHkR9C;AGjRmC;EAAW,kBAAA;EHoR9C;AGnRmC;EAAW,kBAAA;EHsR9C;AGrRmC;EAAW,kBAAA;EHwR9C;AGvRmC;EAAW,kBAAA;EH0R9C;AGzRmC;EAAW,kBAAA;EH4R9C;AG3RmC;EAAW,kBAAA;EH8R9C;AG7RmC;EAAW,kBAAA;EHgS9C;AG/RmC;EAAW,kBAAA;EHkS9C;AGjSmC;EAAW,kBAAA;EHoS9C;AGnSmC;EAAW,kBAAA;EHsS9C;AGrSmC;EAAW,kBAAA;EHwS9C;AGvSmC;EAAW,kBAAA;EH0S9C;AGzSmC;EAAW,kBAAA;EH4S9C;AG3SmC;EAAW,kBAAA;EH8S9C;AG7SmC;EAAW,kBAAA;EHgT9C;AG/SmC;EAAW,kBAAA;EHkT9C;AGjTmC;EAAW,kBAAA;EHoT9C;AGnTmC;EAAW,kBAAA;EHsT9C;AGrTmC;EAAW,kBAAA;EHwT9C;AGvTmC;EAAW,kBAAA;EH0T9C;AGzTmC;EAAW,kBAAA;EH4T9C;AG3TmC;EAAW,kBAAA;EH8T9C;AG7TmC;EAAW,kBAAA;EHgU9C;AG/TmC;EAAW,kBAAA;EHkU9C;AGjUmC;EAAW,kBAAA;EHoU9C;AGnUmC;EAAW,kBAAA;EHsU9C;AGrUmC;EAAW,kBAAA;EHwU9C;AGvUmC;EAAW,kBAAA;EH0U9C;AGzUmC;EAAW,kBAAA;EH4U9C;AG3UmC;EAAW,kBAAA;EH8U9C;AG7UmC;EAAW,kBAAA;EHgV9C;AG/UmC;EAAW,kBAAA;EHkV9C;AGjVmC;EAAW,kBAAA;EHoV9C;AGnVmC;EAAW,kBAAA;EHsV9C;AGrVmC;EAAW,kBAAA;EHwV9C;AGvVmC;EAAW,kBAAA;EH0V9C;AGzVmC;EAAW,kBAAA;EH4V9C;AG3VmC;EAAW,kBAAA;EH8V
 9C;AG7VmC;EAAW,kBAAA;EHgW9C;AG/VmC;EAAW,kBAAA;EHkW9C;AGjWmC;EAAW,kBAAA;EHoW9C;AGnWmC;EAAW,kBAAA;EHsW9C;AGrWmC;EAAW,kBAAA;EHwW9C;AGvWmC;EAAW,kBAAA;EH0W9C;AGzWmC;EAAW,kBAAA;EH4W9C;AG3WmC;EAAW,kBAAA;EH8W9C;AG7WmC;EAAW,kBAAA;EHgX9C;AG/WmC;EAAW,kBAAA;EHkX9C;AGjXmC;EAAW,kBAAA;EHoX9C;AGnXmC;EAAW,kBAAA;EHsX9C;AGrXmC;EAAW,kBAAA;EHwX9C;AGvXmC;EAAW,kBAAA;EH0X9C;AGzXmC;EAAW,kBAAA;EH4X9C;AG3XmC;EAAW,kBAAA;EH8X9C;AG7XmC;EAAW,kBAAA;EHgY9C;AG/XmC;EAAW,kBAAA;EHkY9C;AGjYmC;EAAW,kBAAA;EHoY9C;AGnYmC;EAAW,kBAAA;EHsY9C;AGrYmC;EAAW,kBAAA;EHwY9C;AGvYmC;EAAW,kBAAA;EH0Y9C;AGzYmC;EAAW,kBAAA;EH4Y9C;AG3YmC;EAAW,kBAAA;EH8Y9C;AG7YmC;EAAW,kBAAA;EHgZ9C;AG/YmC;EAAW,kBAAA;EHkZ9C;AGjZmC;EAAW,kBAAA;EHoZ9C;AGnZmC;EAAW,kBAAA;EHsZ9C;AGrZmC;EAAW,kBAAA;EHwZ9C;AGvZmC;EAAW,kBAAA;EH0Z9C;AGzZmC;EAAW,kBAAA;EH4Z9C;AG3ZmC;EAAW,kBAAA;EH8Z9C;AG7ZmC;EAAW,kBAAA;EHga9C;AG/ZmC;EAAW,kBAAA;EHka9C;AGjamC;EAAW,kBAAA;EHoa9C;AGnamC;EAAW,kBAAA;EHsa9C;AGramC;EAAW,kBAAA;EHwa9C;AGvamC;EAAW,kBAAA;EH0a9C;AGzamC;EAAW,kBAAA;EH4a9C;AG3amC;EAAW,kBAAA;E
 H8a9C;AG7amC;EAAW,kBAAA;EHgb9C;AG/amC;EAAW,kBAAA;EHkb9C;AGjbmC;EAAW,kBAAA;EHob9C;AGnbmC;EAAW,kBAAA;EHsb9C;AGrbmC;EAAW,kBAAA;EHwb9C;AGvbmC;EAAW,kBAAA;EH0b9C;AGzbmC;EAAW,kBAAA;EH4b9C;AG3bmC;EAAW,kBAAA;EH8b9C;AG7bmC;EAAW,kBAAA;EHgc9C;AG/bmC;EAAW,kBAAA;EHkc9C;AGjcmC;EAAW,kBAAA;EHoc9C;AGncmC;EAAW,kBAAA;EHsc9C;AGrcmC;EAAW,kBAAA;EHwc9C;AGvcmC;EAAW,kBAAA;EH0c9C;AGzcmC;EAAW,kBAAA;EH4c9C;AG3cmC;EAAW,kBAAA;EH8c9C;AG7cmC;EAAW,kBAAA;EHgd9C;AG/cmC;EAAW,kBAAA;EHkd9C;AGjdmC;EAAW,kBAAA;EHod9C;AGndmC;EAAW,kBAAA;EHsd9C;AGrdmC;EAAW,kBAAA;EHwd9C;AGvdmC;EAAW,kBAAA;EH0d9C;AGzdmC;EAAW,kBAAA;EH4d9C;AG3dmC;EAAW,kBAAA;EH8d9C;AG7dmC;EAAW,kBAAA;EHge9C;AG/dmC;EAAW,kBAAA;EHke9C;AGjemC;EAAW,kBAAA;EHoe9C;AGnemC;EAAW,kBAAA;EHse9C;AGremC;EAAW,kBAAA;EHwe9C;AGvemC;EAAW,kBAAA;EH0e9C;AGzemC;EAAW,kBAAA;EH4e9C;AG3emC;EAAW,kBAAA;EH8e9C;AG7emC;EAAW,kBAAA;EHgf9C;AG/emC;EAAW,kBAAA;EHkf9C;AGjfmC;EAAW,kBAAA;EHof9C;AGnfmC;EAAW,kBAAA;EHsf9C;AGrfmC;EAAW,kBAAA;EHwf9C;AGvfmC;EAAW,kBAAA;EH0f9C;AGzfmC;EAAW,kBAAA;EH4f9C;AG3fmC;EAAW,kBAA
 A;EH8f9C;AG7fmC;EAAW,kBAAA;EHggB9C;AG/fmC;EAAW,kBAAA;EHkgB9C;AGjgBmC;EAAW,kBAAA;EHogB9C;AGngBmC;EAAW,kBAAA;EHsgB9C;AGrgBmC;EAAW,kBAAA;EHwgB9C;AGvgBmC;EAAW,kBAAA;EH0gB9C;AGzgBmC;EAAW,kBAAA;EH4gB9C;AG3gBmC;EAAW,kBAAA;EH8gB9C;AG7gBmC;EAAW,kBAAA;EHghB9C;AG/gBmC;EAAW,kBAAA;EHkhB9C;AGjhBmC;EAAW,kBAAA;EHohB9C;AGnhBmC;EAAW,kBAAA;EHshB9C;AGrhBmC;EAAW,kBAAA;EHwhB9C;AGvhBmC;EAAW,kBAAA;EH0hB9C;AGzhBmC;EAAW,kBAAA;EH4hB9C;AG3hBmC;EAAW,kBAAA;EH8hB9C;AG7hBmC;EAAW,kBAAA;EHgiB9C;AG/hBmC;EAAW,kBAAA;EHkiB9C;AGjiBmC;EAAW,kBAAA;EHoiB9C;AGniBmC;EAAW,kBAAA;EHsiB9C;AGriBmC;EAAW,kBAAA;EHwiB9C;AGviBmC;EAAW,kBAAA;EH0iB9C;AGziBmC;EAAW,kBAAA;EH4iB9C;AG3iBmC;EAAW,kBAAA;EH8iB9C;AG7iBmC;EAAW,kBAAA;EHgjB9C;AG/iBmC;EAAW,kBAAA;EHkjB9C;AGjjBmC;EAAW,kBAAA;EHojB9C;AGnjBmC;EAAW,kBAAA;EHsjB9C;AGrjBmC;EAAW,kBAAA;EHwjB9C;AGvjBmC;EAAW,kBAAA;EH0jB9C;AGzjBmC;EAAW,kBAAA;EH4jB9C;AG3jBmC;EAAW,kBAAA;EH8jB9C;AG7jBmC;EAAW,kBAAA;EHgkB9C;AG/jBmC;EAAW,kBAAA;EHkkB9C;AGjkBmC;EAAW,kBAAA;EHokB9C;AGnkBmC;EAAW,kBAAA;EHskB9C;AGrkBmC;EAAW,kBAAA
 ;EHwkB9C;AGvkBmC;EAAW,kBAAA;EH0kB9C;AGzkBmC;EAAW,kBAAA;EH4kB9C;AG3kBmC;EAAW,kBAAA;EH8kB9C;AG7kBmC;EAAW,kBAAA;EHglB9C;AG/kBmC;EAAW,kBAAA;EHklB9C;AGjlBmC;EAAW,kBAAA;EHolB9C;AGnlBmC;EAAW,kBAAA;EHslB9C;AGrlBmC;EAAW,kBAAA;EHwlB9C;AGvlBmC;EAAW,kBAAA;EH0lB9C;AGzlBmC;EAAW,kBAAA;EH4lB9C;AG3lBmC;EAAW,kBAAA;EH8lB9C;AG7lBmC;EAAW,kBAAA;EHgmB9C;AG/lBmC;EAAW,kBAAA;EHkmB9C;AGjmBmC;EAAW,kBAAA;EHomB9C;AGnmBmC;EAAW,kBAAA;EHsmB9C;AGrmBmC;EAAW,kBAAA;EHwmB9C;AGvmBmC;EAAW,kBAAA;EH0mB9C;AGzmBmC;EAAW,kBAAA;EH4mB9C;AG3mBmC;EAAW,kBAAA;EH8mB9C;AG7mBmC;EAAW,kBAAA;EHgnB9C;AG/mBmC;EAAW,kBAAA;EHknB9C;AGjnBmC;EAAW,kBAAA;EHonB9C;AGnnBmC;EAAW,kBAAA;EHsnB9C;AGrnBmC;EAAW,kBAAA;EHwnB9C;AGvnBmC;EAAW,kBAAA;EH0nB9C;AGznBmC;EAAW,kBAAA;EH4nB9C;AG3nBmC;EAAW,kBAAA;EH8nB9C;AI71BD;ECgEE,gCAAA;EACG,6BAAA;EACK,wBAAA;ELgyBT;AI/1BD;;EC6DE,gCAAA;EACG,6BAAA;EACK,wBAAA;ELsyBT;AI71BD;EACE,iBAAA;EACA,+CAAA;EJ+1BD;AI51BD;EACE,6DAAA;EACA,iBAAA;EACA,yBAAA;EACA,gBAAA;EACA,2BAAA;EJ81BD;AI11BD;;;;EAIE,sBAAA;EACA,oBAAA;EACA,sBAAA;EJ41BD;AIt1BD;
 EACE,gBAAA;EACA,uBAAA;EJw1BD;AIt1BC;;EAEE,gBAAA;EACA,4BAAA;EJw1BH;AIr1BC;EErDA,sBAAA;EAEA,4CAAA;EACA,sBAAA;EN44BD;AI/0BD;EACE,WAAA;EJi1BD;AI30BD;EACE,wBAAA;EJ60BD;AIz0BD;;;;;EGvEE,gBAAA;EACA,iBAAA;EACA,cAAA;EPu5BD;AI70BD;EACE,oBAAA;EJ+0BD;AIz0BD;EACE,cAAA;EACA,yBAAA;EACA,2BAAA;EACA,2BAAA;EACA,oBAAA;EC6FA,0CAAA;EACK,qCAAA;EACG,kCAAA;EEvLR,uBAAA;EACA,iBAAA;EACA,cAAA;EPu6BD;AIz0BD;EACE,oBAAA;EJ20BD;AIr0BD;EACE,kBAAA;EACA,qBAAA;EACA,WAAA;EACA,+BAAA;EJu0BD;AI/zBD;EACE,oBAAA;EACA,YAAA;EACA,aAAA;EACA,cAAA;EACA,YAAA;EACA,kBAAA;EACA,wBAAA;EACA,WAAA;EJi0BD;AIzzBC;;EAEE,kBAAA;EACA,aAAA;EACA,cAAA;EACA,WAAA;EACA,mBAAA;EACA,YAAA;EJ2zBH;AQt8BD;;;;;;;;;;;;EAEE,sBAAA;EACA,kBAAA;EACA,kBAAA;EACA,gBAAA;ERk9BD;AQv9BD;;;;;;;;;;;;;;;;;;;;;;;;EASI,qBAAA;EACA,gBAAA;EACA,gBAAA;ERw+BH;AQp+BD;;;;;;EAGE,kBAAA;EACA,qBAAA;ERy+BD;AQ7+BD;;;;;;;;;;;;EAQI,gBAAA;ERm/BH;AQh/BD;;;;;;EAGE,kBAAA;EACA,qBAAA;ERq/BD;AQz/BD;;;;;;;;;;;;EAQI,gBAAA;ER+/BH;AQ3/BD;;EAAU,iBAAA;ER+/BT;AQ9/BD;;EAAU,iBAAA;ERkgCT;AQjgCD;;EAAU,iBAAA;ERq
 gCT;AQpgCD;;EAAU,iBAAA;ERwgCT;AQvgCD;;EAAU,iBAAA;ER2gCT;AQ1gCD;;EAAU,iBAAA;ER8gCT;AQxgCD;EACE,kBAAA;ER0gCD;AQvgCD;EACE,qBAAA;EACA,iBAAA;EACA,kBAAA;EACA,kBAAA;ERygCD;AQpgCD;EAAA;IAFI,iBAAA;IR0gCD;EACF;AQlgCD;;EAEE,gBAAA;ERogCD;AQjgCD;;EAEE,2BAAA;EACA,eAAA;ERmgCD;AQ//BD;EAAuB,kBAAA;ERkgCtB;AQjgCD;EAAuB,mBAAA;ERogCtB;AQngCD;EAAuB,oBAAA;ERsgCtB;AQrgCD;EAAuB,qBAAA;ERwgCtB;AQvgCD;EAAuB,qBAAA;ER0gCtB;AQvgCD;EAAuB,2BAAA;ER0gCtB;AQzgCD;EAAuB,2BAAA;ER4gCtB;AQ3gCD;EAAuB,4BAAA;ER8gCtB;AQ3gCD;EACE,gBAAA;ER6gCD;AQ3gCD;ECrGE,gBAAA;ETmnCD;ASlnCC;EACE,gBAAA;ETonCH;AQ9gCD;ECxGE,gBAAA;ETynCD;ASxnCC;EACE,gBAAA;ET0nCH;AQjhCD;EC3GE,gBAAA;ET+nCD;AS9nCC;EACE,gBAAA;ETgoCH;AQphCD;EC9GE,gBAAA;ETqoCD;ASpoCC;EACE,gBAAA;ETsoCH;AQvhCD;ECjHE,gBAAA;ET2oCD;AS1oCC;EACE,gBAAA;ET4oCH;AQthCD;EAGE,aAAA;EE3HA,2BAAA;EVkpCD;AUjpCC;EACE,2BAAA;EVmpCH;AQvhCD;EE9HE,2BAAA;EVwpCD;AUvpCC;EACE,2BAAA;EVypCH;AQ1hCD;EEjIE,2BAAA;EV8pCD;AU7pCC;EACE,2BAAA;EV+pCH;AQ7hCD;EEpIE,2BAAA;EVoqCD;AUnqCC;EACE,2BAAA;EVqqCH;AQhiCD;EEvIE,2BAAA;EV0qCD
 ;AUzqCC;EACE,2BAAA;EV2qCH;AQ9hCD;EACE,qBAAA;EACA,qBAAA;EACA,kCAAA;ERgiCD;AQxhCD;;EAEE,eAAA;EACA,qBAAA;ER0hCD;AQ7hCD;;;;EAMI,kBAAA;ER6hCH;AQthCD;EACE,iBAAA;EACA,kBAAA;ERwhCD;AQphCD;EALE,iBAAA;EACA,kBAAA;EAMA,mBAAA;ERuhCD;AQzhCD;EAKI,uBAAA;EACA,mBAAA;EACA,oBAAA;ERuhCH;AQlhCD;EACE,eAAA;EACA,qBAAA;ERohCD;AQlhCD;;EAEE,yBAAA;ERohCD;AQlhCD;EACE,mBAAA;ERohCD;AQlhCD;EACE,gBAAA;ERohCD;AQ3/BD;EAAA;IAVM,aAAA;IACA,cAAA;IACA,aAAA;IACA,mBAAA;IGtNJ,kBAAA;IACA,yBAAA;IACA,qBAAA;IXguCC;EQrgCH;IAHM,oBAAA;IR2gCH;EACF;AQlgCD;;EAGE,cAAA;EACA,mCAAA;ERmgCD;AQjgCD;EACE,gBAAA;EACA,2BAAA;ERmgCD;AQ//BD;EACE,oBAAA;EACA,kBAAA;EACA,mBAAA;EACA,gCAAA;ERigCD;AQ5/BG;;;EACE,kBAAA;ERggCL;AQ1gCD;;;EAmBI,gBAAA;EACA,gBAAA;EACA,yBAAA;EACA,gBAAA;ER4/BH;AQ1/BG;;;EACE,wBAAA;ER8/BL;AQt/BD;;EAEE,qBAAA;EACA,iBAAA;EACA,iCAAA;EACA,gBAAA;EACA,mBAAA;ERw/BD;AQl/BG;;;;;;EAAW,aAAA;ER0/Bd;AQz/BG;;;;;;EACE,wBAAA;ERggCL;AQ1/BD;EACE,qBAAA;EACA,oBAAA;EACA,yBAAA;ER4/BD;AYlyCD;;;;EAIE,gEAAA;EZoyCD;AYhyCD;EACE,kBAAA;EACA,gBAAA;EACA,gBAAA;EACA,2
 BAAA;EACA,oBAAA;EZkyCD;AY9xCD;EACE,kBAAA;EACA,gBAAA;EACA,gBAAA;EACA,2BAAA;EACA,oBAAA;EACA,wDAAA;UAAA,gDAAA;EZgyCD;AYtyCD;EASI,YAAA;EACA,iBAAA;EACA,mBAAA;EACA,0BAAA;UAAA,kBAAA;EZgyCH;AY3xCD;EACE,gBAAA;EACA,gBAAA;EACA,kBAAA;EACA,iBAAA;EACA,yBAAA;EACA,uBAAA;EACA,uBAAA;EACA,gBAAA;EACA,2BAAA;EACA,2BAAA;EACA,oBAAA;EZ6xCD;AYxyCD;EAeI,YAAA;EACA,oBAAA;EACA,gBAAA;EACA,uBAAA;EACA,+BAAA;EACA,kBAAA;EZ4xCH;AYvxCD;EACE,mBAAA;EACA,oBAAA;EZyxCD;Aan1CD;ECHE,oBAAA;EACA,mBAAA;EACA,oBAAA;EACA,qBAAA;Edy1CD;Aan1CC;EAAA;IAFE,cAAA;Iby1CD;EACF;Aar1CC;EAAA;IAFE,cAAA;Ib21CD;EACF;Aav1CD;EAAA;IAFI,eAAA;Ib61CD;EACF;Aap1CD;ECvBE,oBAAA;EACA,mBAAA;EACA,oBAAA;EACA,qBAAA;Ed82CD;Aaj1CD;ECvBE,oBAAA;EACA,qBAAA;Ed22CD;Ae32CG;EACE,oBAAA;EAEA,iBAAA;EAEA,oBAAA;EACA,qBAAA;Ef22CL;Ae31CG;EACE,aAAA;Ef61CL;Aet1CC;EACE,aAAA;Efw1CH;Aez1CC;EACE,qBAAA;Ef21CH;Ae51CC;EACE,qBAAA;Ef81CH;Ae/1CC;EACE,YAAA;Efi2CH;Ael2CC;EACE,qBAAA;Efo2CH;Aer2CC;EACE,qBAAA;Efu2CH;Aex2CC;EACE,YAAA;Ef02CH;Ae32CC;EACE,qBAAA;Ef62CH;Ae92CC;EACE,qBAAA;Efg3CH;Aej3C
 C;EACE,YAAA;Efm3CH;Aep3CC;EACE,qBAAA;Efs3CH;Aev3CC;EACE,oBAAA;Efy3CH;Ae32CC;EACE,aAAA;Ef62CH;Ae92CC;EACE,qBAAA;Efg3CH;Aej3CC;EACE,qBAAA;Efm3CH;Aep3CC;EACE,YAAA;Efs3CH;Aev3CC;EACE,qBAAA;Efy3CH;Ae13CC;EACE,qBAAA;Ef43CH;Ae73CC;EACE,YAAA;Ef+3CH;Aeh4CC;EACE,qBAAA;Efk4CH;Aen4CC;EACE,qBAAA;Efq4CH;Aet4CC;EACE,YAAA;Efw4CH;Aez4CC;EACE,qBAAA;Ef24CH;Ae54CC;EACE,oBAAA;Ef84CH;Ae14CC;EACE,aAAA;Ef44CH;Ae55CC;EACE,YAAA;Ef85CH;Ae/5CC;EACE,oBAAA;Efi6CH;Ael6CC;EACE,oBAAA;Efo6CH;Aer6CC;EACE,WAAA;Efu6CH;Aex6CC;EACE,oBAAA;Ef06CH;Ae36CC;EACE,oBAAA;Ef66CH;Ae96CC;EACE,WAAA;Efg7CH;Aej7CC;EACE,oBAAA;Efm7CH;Aep7CC;EACE,oBAAA;Efs7CH;Aev7CC;EACE,WAAA;Efy7CH;Ae17CC;EACE,oBAAA;Ef47CH;Ae77CC;EACE,mBAAA;Ef+7CH;Ae37CC;EACE,YAAA;Ef67CH;Ae/6CC;EACE,mBAAA;Efi7CH;Ael7CC;EACE,2BAAA;Efo7CH;Aer7CC;EACE,2BAAA;Efu7CH;Aex7CC;EACE,kBAAA;Ef07CH;Ae37CC;EACE,2BAAA;Ef67CH;Ae97CC;EACE,2BAAA;Efg8CH;Aej8CC;EACE,kBAAA;Efm8CH;Aep8CC;EACE,2BAAA;Efs8CH;Aev8CC;EACE,2BAAA;Efy8CH;Ae18CC;EACE,kBAAA;Ef48CH;Ae78CC;EACE,2BAAA;Ef+8CH;Aeh9CC;EACE,0
 BAAA;Efk9CH;Aen9CC;EACE,iBAAA;Efq9CH;Aaz9CD;EE9BI;IACE,aAAA;If0/CH;Een/CD;IACE,aAAA;Ifq/CD;Eet/CD;IACE,qBAAA;Ifw/CD;Eez/CD;IACE,qBAAA;If2/CD;Ee5/CD;IACE,YAAA;If8/CD;Ee//CD;IACE,qBAAA;IfigDD;EelgDD;IACE,qBAAA;IfogDD;EergDD;IACE,YAAA;IfugDD;EexgDD;IACE,qBAAA;If0gDD;Ee3gDD;IACE,qBAAA;If6gDD;Ee9gDD;IACE,YAAA;IfghDD;EejhDD;IACE,qBAAA;IfmhDD;EephDD;IACE,oBAAA;IfshDD;EexgDD;IACE,aAAA;If0gDD;Ee3gDD;IACE,qBAAA;If6gDD;Ee9gDD;IACE,qBAAA;IfghDD;EejhDD;IACE,YAAA;IfmhDD;EephDD;IACE,qBAAA;IfshDD;EevhDD;IACE,qBAAA;IfyhDD;Ee1hDD;IACE,YAAA;If4hDD;Ee7hDD;IACE,qBAAA;If+hDD;EehiDD;IACE,qBAAA;IfkiDD;EeniDD;IACE,YAAA;IfqiDD;EetiDD;IACE,qBAAA;IfwiDD;EeziDD;IACE,oBAAA;If2iDD;EeviDD;IACE,aAAA;IfyiDD;EezjDD;IACE,YAAA;If2jDD;Ee5jDD;IACE,oBAAA;If8jDD;Ee/jDD;IACE,oBAAA;IfikDD;EelkDD;IACE,WAAA;IfokDD;EerkDD;IACE,oBAAA;IfukDD;EexkDD;IACE,oBAAA;If0kDD;Ee3kDD;IACE,WAAA;If6kDD;Ee9kDD;IACE,oBAAA;IfglDD;EejlDD;IACE,oBAAA;IfmlDD;EeplDD;IACE,WAAA;IfslDD;EevlDD;IACE,oBAAA;IfylDD;Ee1lDD;IACE,mBAAA;If4lDD;EexlDD;IACE,YAAA;I
 f0lDD;Ee5kDD;IACE,mBAAA;If8kDD;Ee/kDD;IACE,2BAAA;IfilDD;EellDD;IACE,2BAAA;IfolDD;EerlDD;IACE,kBAAA;IfulDD;EexlDD;IACE,2BAAA;If0lDD;Ee3lDD;IACE,2BAAA;If6lDD;Ee9lDD;IACE,kBAAA;IfgmDD;EejmDD;IACE,2BAAA;IfmmDD;EepmDD;IACE,2BAAA;IfsmDD;EevmDD;IACE,kBAAA;IfymDD;Ee1mDD;IACE,2BAAA;If4mDD;Ee7mDD;IACE,0BAAA;If+mDD;EehnDD;IACE,iBAAA;IfknDD;EACF;Aa9mDD;EEvCI;IACE,aAAA;IfwpDH;EejpDD;IACE,aAAA;IfmpDD;EeppDD;IACE,qBAAA;IfspDD;EevpDD;IACE,qBAAA;IfypDD;Ee1pDD;IACE,YAAA;If4pDD;Ee7pDD;IACE,qBAAA;If+pDD;EehqDD;IACE,qBAAA;IfkqDD;EenqDD;IACE,YAAA;IfqqDD;EetqDD;IACE,qBAAA;IfwqDD;EezqDD;IACE,qBAAA;If2qDD;Ee5qDD;IACE,YAAA;If8qDD;Ee/qDD;IACE,qBAAA;IfirDD;EelrDD;IACE,oBAAA;IforDD;EetqDD;IACE,aAAA;IfwqDD;EezqDD;IACE,qBAAA;If2qDD;Ee5qDD;IACE,qBAAA;If8qDD;Ee/qDD;IACE,YAAA;IfirDD;EelrDD;IACE,qBAAA;IforDD;EerrDD;IACE,qBAAA;IfurDD;EexrDD;IACE,YAAA;If0rDD;Ee3rDD;IACE,qBAAA;If6rDD;Ee9rDD;IACE,qBAAA;IfgsDD;EejsDD;IACE,YAAA;IfmsDD;EepsDD;IACE,qBAAA;IfssDD;EevsDD;IACE,oBAAA;IfysDD;EersDD;IACE,aAAA;IfusDD;EevtDD;IACE,YAA
 A;IfytDD;Ee1tDD;IACE,oBAAA;If4tDD;Ee7tDD;IACE,oBAAA;If+tDD;EehuDD;IACE,WAAA;IfkuDD;EenuDD;IACE,oBAAA;IfquDD;EetuDD;IACE,oBAAA;IfwuDD;EezuDD;IACE,WAAA;If2uDD;Ee5uDD;IACE,oBAAA;If8uDD;Ee/uDD;IACE,oBAAA;IfivDD;EelvDD;IACE,WAAA;IfovDD;EervDD;IACE,oBAAA;IfuvDD;EexvDD;IACE,mBAAA;If0vDD;EetvDD;IACE,YAAA;IfwvDD;Ee1uDD;IACE,mBAAA;If4uDD;Ee7uDD;IACE,2BAAA;If+uDD;EehvDD;IACE,2BAAA;IfkvDD;EenvDD;IACE,kBAAA;IfqvDD;EetvDD;IACE,2BAAA;IfwvDD;EezvDD;IACE,2BAAA;If2vDD;Ee5vDD;IACE,kBAAA;If8vDD;Ee/vDD;IACE,2BAAA;IfiwDD;EelwDD;IACE,2BAAA;IfowDD;EerwDD;IACE,kBAAA;IfuwDD;EexwDD;IACE,2BAAA;If0wDD;Ee3wDD;IACE,0BAAA;If6wDD;Ee9wDD;IACE,iBAAA;IfgxDD;EACF;AarwDD;EE9CI;IACE,aAAA;IfszDH;Ee/yDD;IACE,aAAA;IfizDD;EelzDD;IACE,qBAAA;IfozDD;EerzDD;IACE,qBAAA;IfuzDD;EexzDD;IACE,YAAA;If0zDD;Ee3zDD;IACE,qBAAA;If6zDD;Ee9zDD;IACE,qBAAA;Ifg0DD;Eej0DD;IACE,YAAA;Ifm0DD;Eep0DD;IACE,qBAAA;Ifs0DD;Eev0DD;IACE,qBAAA;Ify0DD;Ee10DD;IACE,YAAA;If40DD;Ee70DD;IACE,qBAAA;If+0DD;Eeh1DD;IACE,oBAAA;Ifk1DD;Eep0DD;IACE,aAAA;Ifs0DD;Eev0DD;IACE,
 qBAAA;Ify0DD;Ee10DD;IACE,qBAAA;If40DD;Ee70DD;IACE,YAAA;If+0DD;Eeh1DD;IACE,qBAAA;Ifk1DD;Een1DD;IACE,qBAAA;Ifq1DD;Eet1DD;IACE,YAAA;Ifw1DD;Eez1DD;IACE,qBAAA;If21DD;Ee51DD;IACE,qBAAA;If81DD;Ee/1DD;IACE,YAAA;Ifi2DD;Eel2DD;IACE,qBAAA;Ifo2DD;Eer2DD;IACE,oBAAA;Ifu2DD;Een2DD;IACE,aAAA;Ifq2DD;Eer3DD;IACE,YAAA;Ifu3DD;Eex3DD;IACE,oBAAA;If03DD;Ee33DD;IACE,oBAAA;If63DD;Ee93DD;IACE,WAAA;Ifg4DD;Eej4DD;IACE,oBAAA;Ifm4DD;Eep4DD;IACE,oBAAA;Ifs4DD;Eev4DD;IACE,WAAA;Ify4DD;Ee14DD;IACE,oBAAA;If44DD;Ee74DD;IACE,oBAAA;If+4DD;Eeh5DD;IACE,WAAA;Ifk5DD;Een5DD;IACE,oBAAA;Ifq5DD;Eet5DD;IACE,mBAAA;Ifw5DD;Eep5DD;IACE,YAAA;Ifs5DD;Eex4DD;IACE,mBAAA;If04DD;Ee34DD;IACE,2BAAA;If64DD;Ee94DD;IACE,2BAAA;Ifg5DD;Eej5DD;IACE,kBAAA;Ifm5DD;Eep5DD;IACE,2BAAA;Ifs5DD;Eev5DD;IACE,2BAAA;Ify5DD;Ee15DD;IACE,kBAAA;If45DD;Ee75DD;IACE,2BAAA;If+5DD;Eeh6DD;IACE,2BAAA;Ifk6DD;Een6DD;IACE,kBAAA;Ifq6DD;Eet6DD;IACE,2BAAA;Ifw6DD;Eez6DD;IACE,0BAAA;If26DD;Ee56DD;IACE,iBAAA;If86DD;EACF;AgBl/DD;EACE,+BAAA;EhBo/DD;AgBl/DD;EACE,kBAAA;EACA,qBAAA;EACA,g
 BAAA;EACA,kBAAA;EhBo/DD;AgBl/DD;EACE,kBAAA;EhBo/DD;AgB9+DD;EACE,aAAA;EACA,iBAAA;EACA,qBAAA;EhBg/DD;AgBn/DD;;;;;;EAWQ,cAAA;EACA,yBAAA;EACA,qBAAA;EACA,+BAAA;EhBg/DP;AgB9/DD;EAoBI,wBAAA;EACA,kCAAA;EhB6+DH;AgBlgED;;;;;;EA8BQ,eAAA;EhB4+DP;AgB1gED;EAoCI,+BAAA;EhBy+DH;AgB7gED;EAyCI,2BAAA;EhBu+DH;AgBh+DD;;;;;;EAOQ,cAAA;EhBi+DP;AgBt9DD;EACE,2BAAA;EhBw9DD;AgBz9DD;;;;;;EAQQ,2BAAA;EhBy9DP;AgBj+DD;;EAeM,0BAAA;EhBs9DL;AgB58DD;EAEI,2BAAA;EhB68DH;AgBp8DD;EAEI,2BAAA;EhBq8DH;AgB57DD;EACE,kBAAA;EACA,aAAA;EACA,uBAAA;EhB87DD;AgBz7DG;;EACE,kBAAA;EACA,aAAA;EACA,qBAAA;EhB47DL;AiBxkEC;;;;;;;;;;;;EAOI,2BAAA;EjB+kEL;AiBzkEC;;;;;EAMI,2BAAA;EjB0kEL;AiB7lEC;;;;;;;;;;;;EAOI,2BAAA;EjBomEL;AiB9lEC;;;;;EAMI,2BAAA;EjB+lEL;AiBlnEC;;;;;;;;;;;;EAOI,2BAAA;EjBynEL;AiBnnEC;;;;;EAMI,2BAAA;EjBonEL;AiBvoEC;;;;;;;;;;;;EAOI,2BAAA;EjB8oEL;AiBxoEC;;;;;EAMI,2BAAA;EjByoEL;AiB5pEC;;;;;;;;;;;;EAOI,2BAAA;EjBmqEL;AiB7pEC;;;;;EAMI,2BAAA;EjB8pEL;AgB5gED;EACE,kBAAA;EACA,mBAAA;EhB8gED;AgBj9DD;EAAA;IA1DI,aAAA;IACA,qBAAA;IACA,oBAAA;IACA,8CAA
 A;IACA,2BAAA;IhB+gED;EgBz9DH;IAlDM,kBAAA;IhB8gEH;EgB59DH;;;;;;IAzCY,qBAAA;IhB6gET;EgBp+DH;IAjCM,WAAA;IhBwgEH;EgBv+DH;;;;;;IAxBY,gBAAA;IhBugET;EgB/+DH;;;;;;IApBY,iBAAA;IhB2gET;EgBv/DH;;;;IAPY,kBAAA;IhBogET;EACF;AkB9tED;EACE,YAAA;EACA,WAAA;EACA,WAAA;EAIA,cAAA;ElB6tED;AkB1tED;EACE,gBAAA;EACA,aAAA;EACA,YAAA;EACA,qBAAA;EACA,iBAAA;EACA,sBAAA;EACA,gBAAA;EACA,WAAA;EACA,kCAAA;ElB4tED;AkBztED;EACE,uBAAA;EACA,iBAAA;EACA,oBAAA;EACA,mBAAA;ElB2tED;AkBhtED;Eb4BE,gCAAA;EACG,6BAAA;EACK,wBAAA;ELurET;AkBhtED;;EAEE,iBAAA;EACA,oBAAA;EACA,qBAAA;ElBktED;AkB9sED;EACE,gBAAA;ElBgtED;AkB5sED;EACE,gBAAA;EACA,aAAA;ElB8sED;AkB1sED;;EAEE,cAAA;ElB4sED;AkBxsED;;;EZxEE,sBAAA;EAEA,4CAAA;EACA,sBAAA;ENoxED;AkBxsED;EACE,gBAAA;EACA,kBAAA;EACA,iBAAA;EACA,yBAAA;EACA,gBAAA;ElB0sED;AkBhrED;EACE,gBAAA;EACA,aAAA;EACA,cAAA;EACA,mBAAA;EACA,iBAAA;EACA,yBAAA;EACA,gBAAA;EACA,2BAAA;EACA,wBAAA;EACA,2BAAA;EACA,oBAAA;EbzDA,0DAAA;EACQ,kDAAA;EAyHR,wFAAA;EACK,2EAAA;EACG,wEAAA;ELonET;AmB5vEC;EACE,uBAAA;EACA,YAAA;EdUF,wFAAA;EACQ,gFAAA;ELqvE
 T;AKptEC;EACE,gBAAA;EACA,YAAA;ELstEH;AKptEC;EAA0B,gBAAA;ELutE3B;AKttEC;EAAgC,gBAAA;ELytEjC;AkBxrEC;;;EAGE,qBAAA;EACA,2BAAA;EACA,YAAA;ElB0rEH;AkBtrEC;EACE,cAAA;ElBwrEH;AkB5qED;EACE,0BAAA;ElB8qED;AkB7oED;EArBE;;;;IAIE,mBAAA;IlBqqED;EkBnqED;;;;IAIE,mBAAA;IlBqqED;EkBnqED;;;;IAIE,mBAAA;IlBqqED;EACF;AkB5pED;EACE,qBAAA;ElB8pED;AkBtpED;;EAEE,oBAAA;EACA,gBAAA;EACA,kBAAA;EACA,qBAAA;ElBwpED;AkB7pED;;EAQI,kBAAA;EACA,oBAAA;EACA,kBAAA;EACA,qBAAA;EACA,iBAAA;ElBypEH;AkBtpED;;;;EAIE,oBAAA;EACA,oBAAA;EACA,oBAAA;ElBwpED;AkBrpED;;EAEE,kBAAA;ElBupED;AkBnpED;;EAEE,uBAAA;EACA,oBAAA;EACA,kBAAA;EACA,wBAAA;EACA,qBAAA;EACA,iBAAA;ElBqpED;AkBnpED;;EAEE,eAAA;EACA,mBAAA;ElBqpED;AkB5oEC;;;;;;EAGE,qBAAA;ElBipEH;AkB3oEC;;;;EAEE,qBAAA;ElB+oEH;AkBzoEC;;;;EAGI,qBAAA;ElB4oEL;AkBjoED;EAEE,kBAAA;EACA,qBAAA;EAEA,kBAAA;ElBioED;AkB/nEC;;EAEE,iBAAA;EACA,kBAAA;ElBioEH;AkBvnED;;ECnPE,cAAA;EACA,mBAAA;EACA,iBAAA;EACA,kBAAA;EACA,oBAAA;EnB82ED;AmB52EC;;EACE,cAAA;EACA,mBAAA;EnB+2EH;AmB52EC;;;;EAEE,cAAA;EnBg3EH;AkBroED;;ECxPE,cAAA;EA
 CA,oBAAA;EACA,iBAAA;EACA,mBAAA;EACA,oBAAA;EnBi4ED;AmB/3EC;;EACE,cAAA;EACA,mBAAA;EnBk4EH;AmB/3EC;;;;EAEE,cAAA;EnBm4EH;AkB9oED;EAEE,oBAAA;ElB+oED;AkBjpED;EAMI,uBAAA;ElB8oEH;AkB1oED;EACE,oBAAA;EACA,QAAA;EACA,UAAA;EACA,YAAA;EACA,gBAAA;EACA,aAAA;EACA,cAAA;EACA,mBAAA;EACA,oBAAA;EACA,sBAAA;ElB4oED;AkB1oED;EACE,aAAA;EACA,cAAA;EACA,mBAAA;ElB4oED;AkB1oED;EACE,aAAA;EACA,cAAA;EACA,mBAAA;ElB4oED;AkBxoED;;;;;;;;;;ECxVI,gBAAA;EnB4+EH;AkBppED;ECpVI,uBAAA;Ed+CF,0DAAA;EACQ,kDAAA;EL67ET;AmB3+EG;EACE,uBAAA;Ed4CJ,2EAAA;EACQ,mEAAA;ELk8ET;AkB9pED;EC1UI,gBAAA;EACA,uBAAA;EACA,2BAAA;EnB2+EH;AkBnqED;ECpUI,gBAAA;EnB0+EH;AkBnqED;;;;;;;;;;EC3VI,gBAAA;EnB0gFH;AkB/qED;ECvVI,uBAAA;Ed+CF,0DAAA;EACQ,kDAAA;EL29ET;AmBzgFG;EACE,uBAAA;Ed4CJ,2EAAA;EACQ,mEAAA;ELg+ET;AkBzrED;EC7UI,gBAAA;EACA,uBAAA;EACA,2BAAA;EnBygFH;AkB9rED;ECvUI,gBAAA;EnBwgFH;AkB9rED;;;;;;;;;;EC9VI,gBAAA;EnBwiFH;AkB1sED;EC1VI,uBAAA;Ed+CF,0DAAA;EACQ,kDAAA;ELy/ET;AmBviFG;EACE,uBAAA;Ed4CJ,2EAAA;EACQ,mEAAA;EL8/ET;AkBptED;EChVI,gBAAA;EACA,uBAAA;EACA,2BAAA;EnBui
 FH;AkBztED;EC1UI,gBAAA;EnBsiFH;AkBrtEC;EACG,WAAA;ElButEJ;AkBrtEC;EACG,QAAA;ElButEJ;AkB7sED;EACE,gBAAA;EACA,iBAAA;EACA,qBAAA;EACA,gBAAA;ElB+sED;AkB3nED;EAAA;IA/DM,uBAAA;IACA,kBAAA;IACA,wBAAA;IlB8rEH;EkBjoEH;IAxDM,uBAAA;IACA,aAAA;IACA,wBAAA;IlB4rEH;EkBtoEH;IAjDM,uBAAA;IlB0rEH;EkBzoEH;IA7CM,uBAAA;IACA,wBAAA;IlByrEH;EkB7oEH;;;IAvCQ,aAAA;IlByrEL;EkBlpEH;IAjCM,aAAA;IlBsrEH;EkBrpEH;IA7BM,kBAAA;IACA,wBAAA;IlBqrEH;EkBzpEH;;IApBM,uBAAA;IACA,eAAA;IACA,kBAAA;IACA,wBAAA;IlBirEH;EkBhqEH;;IAdQ,iBAAA;IlBkrEL;EkBpqEH;;IATM,oBAAA;IACA,gBAAA;IlBirEH;EkBzqEH;IAHM,QAAA;IlB+qEH;EACF;AkBrqED;;;;EASI,eAAA;EACA,kBAAA;EACA,kBAAA;ElBkqEH;AkB7qED;;EAiBI,kBAAA;ElBgqEH;AkBjrED;EJrdE,oBAAA;EACA,qBAAA;EdyoFD;AkBlpEC;EAAA;IANI,mBAAA;IACA,kBAAA;IACA,kBAAA;IlB4pEH;EACF;AkB5rED;EAwCI,aAAA;ElBupEH;AkB1oEC;EAAA;IAHM,qBAAA;IlBipEL;EACF;AkBxoEC;EAAA;IAHM,kBAAA;IlB+oEL;EACF;AoBrqFD;EACE,uBAAA;EACA,kBAAA;EACA,qBAAA;EACA,oBAAA;EACA,wBAAA;EACA,gCAAA;MAAA,4BAAA;EACA,iBAAA;EACA,wBAAA;EACA,+BAAA;EACA,qBAAA;EC6BA,mBAAA;EACA,iBAAA
 ;EACA,yBAAA;EACA,oBAAA;EhB4KA,2BAAA;EACG,wBAAA;EACC,uBAAA;EACI,mBAAA;ELg+ET;AoBxqFG;;;;;;EdrBF,sBAAA;EAEA,4CAAA;EACA,sBAAA;ENosFD;AoB5qFC;;;EAGE,gBAAA;EACA,uBAAA;EpB8qFH;AoB3qFC;;EAEE,YAAA;EACA,wBAAA;Ef2BF,0DAAA;EACQ,kDAAA;ELmpFT;AoB3qFC;;;EAGE,qBAAA;EACA,sBAAA;EE9CF,eAAA;EAGA,2BAAA;EjB8DA,0BAAA;EACQ,kBAAA;EL6pFT;AoBvqFD;ECrDE,gBAAA;EACA,2BAAA;EACA,uBAAA;ErB+tFD;AqB7tFC;;;;;;EAME,gBAAA;EACA,2BAAA;EACI,uBAAA;ErB+tFP;AqB7tFC;;;EAGE,wBAAA;ErB+tFH;AqB1tFG;;;;;;;;;;;;;;;;;;EAME,2BAAA;EACI,uBAAA;ErBwuFT;AoBhtFD;ECnBI,gBAAA;EACA,2BAAA;ErBsuFH;AoBjtFD;ECxDE,gBAAA;EACA,2BAAA;EACA,uBAAA;ErB4wFD;AqB1wFC;;;;;;EAME,gBAAA;EACA,2BAAA;EACI,uBAAA;ErB4wFP;AqB1wFC;;;EAGE,wBAAA;ErB4wFH;AqBvwFG;;;;;;;;;;;;;;;;;;EAME,2BAAA;EACI,uBAAA;ErBqxFT;AoB1vFD;ECtBI,gBAAA;EACA,2BAAA;ErBmxFH;AoB1vFD;EC5DE,gBAAA;EACA,2BAAA;EACA,uBAAA;ErByzFD;AqBvzFC;;;;;;EAME,gBAAA;EACA,2BAAA;EACI,uBAAA;ErByzFP;AqBvzFC;;;EAGE,wBAAA;ErByzFH;AqBpzFG;;;;;;;;;;;;;;;;;;EAME,2BAAA;EACI,uBAAA;ErBk0FT;AoBnyFD;EC1BI,gBAAA;EACA,2BAAA;ErBg0FH;A
 oBnyFD;EChEE,gBAAA;EACA,2BAAA;EACA,uBAAA;ErBs2FD;AqBp2FC;;;;;;EAME,gBAAA;EACA,2BAAA;EACI,uBAAA;ErBs2FP;AqBp2FC;;;EAGE,wBAAA;ErBs2FH;AqBj2FG;;;;;;;;;;;;;;;;;;EAME,2BAAA;EACI,uBAAA;ErB+2FT;AoB50FD;EC9BI,gBAAA;EACA,2BAAA;ErB62FH;AoB50FD;ECpEE,gBAAA;EACA,2BAAA;EACA,uBAAA;ErBm5FD;AqBj5FC;;;;;;EAME,gBAAA;EACA,2BAAA;EACI,uBAAA;ErBm5FP;AqBj5FC;;;EAGE,wBAAA;ErBm5FH;AqB94FG;;;;;;;;;;;;;;;;;;EAME,2BAAA;EACI,uBAAA;ErB45FT;AoBr3FD;EClCI,gBAAA;EACA,2BAAA;ErB05FH;AoBr3FD;ECxEE,gBAAA;EACA,2BAAA;EACA,uBAAA;ErBg8FD;AqB97FC;;;;;;EAME,gBAAA;EACA,2BAAA;EACI,uBAAA;ErBg8FP;AqB97FC;;;EAGE,wBAAA;ErBg8FH;AqB37FG;;;;;;;;;;;;;;;;;;EAME,2BAAA;EACI,uBAAA;ErBy8FT;AoB95FD;ECtCI,gBAAA;EACA,2BAAA;ErBu8FH;AoBz5FD;EACE,gBAAA;EACA,qBAAA;EACA,kBAAA;EpB25FD;AoBz5FC;;;;;EAKE,+BAAA;Ef7BF,0BAAA;EACQ,kBAAA;ELy7FT;AoB15FC;;;;EAIE,2BAAA;EpB45FH;AoB15FC;;EAEE,gBAAA;EACA,4BAAA;EACA,+BAAA;EpB45FH;AoBx5FG;;;;EAEE,gBAAA;EACA,uBAAA;EpB45FL;AoBn5FD;;EC/EE,oBAAA;EACA,iBAAA;EACA,mBAAA;EACA,oBAAA;ErBs+FD;AoBt5FD;;ECnFE,mBAAA;EACA,iBAAA;
 EACA,kBAAA;EACA,oBAAA;ErB6+FD;AoBz5FD;;ECvFE,kBAAA;EACA,iBAAA;EACA,kBAAA;EACA,oBAAA;ErBo/FD;AoBx5FD;EACE,gBAAA;EACA,aAAA;EpB05FD;AoBt5FD;EACE,iBAAA;EpBw5FD;AoBj5FC;;;EACE,aAAA;EpBq5FH;AuBziGD;EACE,YAAA;ElBoLA,0CAAA;EACK,qCAAA;EACG,kCAAA;ELw3FT;AuB5iGC;EACE,YAAA;EvB8iGH;AuB1iGD;EACE,eAAA;EACA,oBAAA;EvB4iGD;AuB1iGC;EAAY,gBAAA;EAAgB,qBAAA;EvB8iG7B;AuB7iGC;EAAY,oBAAA;EvBgjGb;AuB/iGC;EAAY,0BAAA;EvBkjGb;AuB/iGD;EACE,oBAAA;EACA,WAAA;EACA,kBAAA;ElBsKA,iDAAA;EACQ,4CAAA;KAAA,yCAAA;EAOR,oCAAA;EACQ,+BAAA;KAAA,4BAAA;EAGR,0CAAA;EACQ,qCAAA;KAAA,kCAAA;ELo4FT;AwB9kGD;EACE,uBAAA;EACA,UAAA;EACA,WAAA;EACA,kBAAA;EACA,wBAAA;EACA,uBAAA;EACA,qCAAA;EACA,oCAAA;ExBglGD;AwB5kGD;EACE,oBAAA;ExB8kGD;AwB1kGD;EACE,YAAA;ExB4kGD;AwBxkGD;EACE,oBAAA;EACA,WAAA;EACA,SAAA;EACA,eAAA;EACA,eAAA;EACA,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,kBAAA;EACA,iBAAA;EACA,kBAAA;EACA,2BAAA;EACA,2BAAA;EACA,uCAAA;EACA,oBAAA;EnBwBA,qDAAA;EACQ,6CAAA;EmBvBR,sCAAA;UAAA,8BAAA;ExB2kGD;AwBtkGC;EACE,UAAA;EACA,YAAA;ExBwkGH;AwBjmGD;ECvBE,aAAA;EAC
 A,eAAA;EACA,kBAAA;EACA,2BAAA;EzB2nGD;AwBvmGD;EAmCI,gBAAA;EACA,mBAAA;EACA,aAAA;EACA,qBAAA;EACA,yBAAA;EACA,gBAAA;EACA,qBAAA;ExBukGH;AwBjkGC;;EAEE,uBAAA;EACA,gBAAA;EACA,2BAAA;ExBmkGH;AwB7jGC;;;EAGE,gBAAA;EACA,uBAAA;EACA,YAAA;EACA,2BAAA;ExB+jGH;AwBtjGC;;;EAGE,gBAAA;ExBwjGH;AwBpjGC;;EAEE,uBAAA;EACA,+BAAA;EACA,wBAAA;EEzGF,qEAAA;EF2GE,qBAAA;ExBsjGH;AwBjjGD;EAGI,gBAAA;ExBijGH;AwBpjGD;EAQI,YAAA;ExB+iGH;AwBviGD;EACE,YAAA;EACA,UAAA;ExByiGD;AwBjiGD;EACE,SAAA;EACA,aAAA;ExBmiGD;AwB/hGD;EACE,gBAAA;EACA,mBAAA;EACA,iBAAA;EACA,yBAAA;EACA,gBAAA;EACA,qBAAA;ExBiiGD;AwB7hGD;EACE,iBAAA;EACA,SAAA;EACA,UAAA;EACA,WAAA;EACA,QAAA;EACA,cAAA;ExB+hGD;AwB3hGD;EACE,UAAA;EACA,YAAA;ExB6hGD;AwBrhGD;;EAII,eAAA;EACA,0BAAA;EACA,aAAA;ExBqhGH;AwB3hGD;;EAUI,WAAA;EACA,cAAA;EACA,oBAAA;ExBqhGH;AwBhgGD;EAXE;IAnEA,YAAA;IACA,UAAA;IxBklGC;EwBhhGD;IAzDA,SAAA;IACA,aAAA;IxB4kGC;EACF;A2B1tGD;;EAEE,oBAAA;EACA,uBAAA;EACA,wBAAA;E3B4tGD;A2BhuGD;;EAMI,oBAAA;EACA,aAAA;E3B8tGH;A2B5tGG;;;;;;;;EAIE,YAAA;E3BkuGL;A2B5tGD;;;;EAKI,mBAAA;E3B6tGH;A2
 BxtGD;EACE,mBAAA;E3B0tGD;A2B3tGD;;EAMI,aAAA;E3BytGH;A2B/tGD;;;EAWI,kBAAA;E3BytGH;A2BrtGD;EACE,kBAAA;E3ButGD;A2BntGD;EACE,gBAAA;E3BqtGD;A2BptGC;ECjDA,+BAAA;EACG,4BAAA;E5BwwGJ;A2BntGD;;EC9CE,8BAAA;EACG,2BAAA;E5BqwGJ;A2BltGD;EACE,aAAA;E3BotGD;A2BltGD;EACE,kBAAA;E3BotGD;A2BltGD;;EClEE,+BAAA;EACG,4BAAA;E5BwxGJ;A2BjtGD;EChEE,8BAAA;EACG,2BAAA;E5BoxGJ;A2BhtGD;;EAEE,YAAA;E3BktGD;A2BjsGD;EACE,mBAAA;EACA,oBAAA;E3BmsGD;A2BjsGD;EACE,oBAAA;EACA,qBAAA;E3BmsGD;A2B9rGD;EtB9CE,0DAAA;EACQ,kDAAA;EL+uGT;A2B9rGC;EtBlDA,0BAAA;EACQ,kBAAA;ELmvGT;A2B3rGD;EACE,gBAAA;E3B6rGD;A2B1rGD;EACE,yBAAA;EACA,wBAAA;E3B4rGD;A2BzrGD;EACE,yBAAA;E3B2rGD;A2BprGD;;;EAII,gBAAA;EACA,aAAA;EACA,aAAA;EACA,iBAAA;E3BqrGH;A2B5rGD;EAcM,aAAA;E3BirGL;A2B/rGD;;;;EAsBI,kBAAA;EACA,gBAAA;E3B+qGH;A2B1qGC;EACE,kBAAA;E3B4qGH;A2B1qGC;EACE,8BAAA;ECnKF,+BAAA;EACC,8BAAA;E5Bg1GF;A2B3qGC;EACE,gCAAA;EC/KF,4BAAA;EACC,2BAAA;E5B61GF;A2B3qGD;EACE,kBAAA;E3B6qGD;A2B3qGD;;EC9KE,+BAAA;EACC,8BAAA;E5B61GF;A2B1qGD;EC5LE,4BAAA;EACC,2BAAA;E5By2GF;A2BtqGD;EACE,gBAA
 A;EACA,aAAA;EACA,qBAAA;EACA,2BAAA;E3BwqGD;A2B5qGD;;EAOI,aAAA;EACA,qBAAA;EACA,WAAA;E3ByqGH;A2BlrGD;EAYI,aAAA;E3ByqGH;A2BrrGD;EAgBI,YAAA;E3BwqGH;A2BvpGD;;;;EAKM,oBAAA;EACA,wBAAA;EACA,sBAAA;E3BwpGL;A6Bj4GD;EACE,oBAAA;EACA,gBAAA;EACA,2BAAA;E7Bm4GD;A6Bh4GC;EACE,aAAA;EACA,iBAAA;EACA,kBAAA;E7Bk4GH;A6B34GD;EAeI,oBAAA;EACA,YAAA;EAKA,aAAA;EAEA,aAAA;EACA,kBAAA;E7B03GH;A6Bj3GD;;;EV8BE,cAAA;EACA,oBAAA;EACA,iBAAA;EACA,mBAAA;EACA,oBAAA;EnBw1GD;AmBt1GC;;;EACE,cAAA;EACA,mBAAA;EnB01GH;AmBv1GC;;;;;;EAEE,cAAA;EnB61GH;A6Bn4GD;;;EVyBE,cAAA;EACA,mBAAA;EACA,iBAAA;EACA,kBAAA;EACA,oBAAA;EnB+2GD;AmB72GC;;;EACE,cAAA;EACA,mBAAA;EnBi3GH;AmB92GC;;;;;;EAEE,cAAA;EnBo3GH;A6Bj5GD;;;EAGE,qBAAA;E7Bm5GD;A6Bj5GC;;;EACE,kBAAA;E7Bq5GH;A6Bj5GD;;EAEE,WAAA;EACA,qBAAA;EACA,wBAAA;E7Bm5GD;A6B94GD;EACE,mBAAA;EACA,iBAAA;EACA,qBAAA;EACA,gBAAA;EACA,gBAAA;EACA,oBAAA;EACA,2BAAA;EACA,2BAAA;EACA,oBAAA;E7Bg5GD;A6B74GC;EACE,mBAAA;EACA,iBAAA;EACA,oBAAA;E7B+4GH;A6B74GC;EACE,oBAAA;EACA,iBAAA;EACA,oBAAA;E7B+4GH;A6Bn6GD;;EA0BI,eAAA;E7B64GH;A6B
 x4GD;;;;;;;EDhGE,+BAAA;EACG,4BAAA;E5Bi/GJ;A6Bz4GD;EACE,iBAAA;E7B24GD;A6Bz4GD;;;;;;;EDpGE,8BAAA;EACG,2BAAA;E5Bs/GJ;A6B14GD;EACE,gBAAA;E7B44GD;A6Bv4GD;EACE,oBAAA;EAGA,cAAA;EACA,qBAAA;E7Bu4GD;A6B54GD;EAUI,oBAAA;E7Bq4GH;A6B/4GD;EAYM,mBAAA;E7Bs4GL;A6Bn4GG;;;EAGE,YAAA;E7Bq4GL;A6Bh4GC;;EAGI,oBAAA;E7Bi4GL;A6B93GC;;EAGI,mBAAA;E7B+3GL;A8BzhHD;EACE,kBAAA;EACA,iBAAA;EACA,kBAAA;E9B2hHD;A8B9hHD;EAOI,oBAAA;EACA,gBAAA;E9B0hHH;A8BliHD;EAWM,oBAAA;EACA,gBAAA;EACA,oBAAA;E9B0hHL;A8BzhHK;;EAEE,uBAAA;EACA,2BAAA;E9B2hHP;A8BthHG;EACE,gBAAA;E9BwhHL;A8BthHK;;EAEE,gBAAA;EACA,uBAAA;EACA,+BAAA;EACA,qBAAA;E9BwhHP;A8BjhHG;;;EAGE,2BAAA;EACA,uBAAA;E9BmhHL;A8B5jHD;ELHE,aAAA;EACA,eAAA;EACA,kBAAA;EACA,2BAAA;EzBkkHD;A8BlkHD;EA0DI,iBAAA;E9B2gHH;A8BlgHD;EACE,kCAAA;E9BogHD;A8BrgHD;EAGI,aAAA;EAEA,qBAAA;E9BogHH;A8BzgHD;EASM,mBAAA;EACA,yBAAA;EACA,+BAAA;EACA,4BAAA;E9BmgHL;A8BlgHK;EACE,uCAAA;E9BogHP;A8B9/GK;;;EAGE,gBAAA;EACA,2BAAA;EACA,2BAAA;EACA,kCAAA;EACA,iBAAA;E9BggHP;A8B3/GC;EAqDA,aAAA;EA8BA,kBAAA;E9B46GD;A8B//GC;EAwDE,aAAA
 ;E9B08GH;A8BlgHC;EA0DI,oBAAA;EACA,oBAAA;E9B28GL;A8BtgHC;EAgEE,WAAA;EACA,YAAA;E9By8GH;A8B77GD;EAAA;IAPM,qBAAA;IACA,WAAA;I9Bw8GH;E8Bl8GH;IAJQ,kBAAA;I9By8GL;EACF;A8BnhHC;EAuFE,iBAAA;EACA,oBAAA;E9B+7GH;A8BvhHC;;;EA8FE,2BAAA;E9B87GH;A8Bh7GD;EAAA;IATM,kCAAA;IACA,4BAAA;I9B67GH;E8Br7GH;;;IAHM,8BAAA;I9B67GH;EACF;A8B9hHD;EAEI,aAAA;E9B+hHH;A8BjiHD;EAMM,oBAAA;E9B8hHL;A8BpiHD;EASM,kBAAA;E9B8hHL;A8BzhHK;;;EAGE,gBAAA;EACA,2BAAA;E9B2hHP;A8BnhHD;EAEI,aAAA;E9BohHH;A8BthHD;EAIM,iBAAA;EACA,gBAAA;E9BqhHL;A8BzgHD;EACE,aAAA;E9B2gHD;A8B5gHD;EAII,aAAA;E9B2gHH;A8B/gHD;EAMM,oBAAA;EACA,oBAAA;E9B4gHL;A8BnhHD;EAYI,WAAA;EACA,YAAA;E9B0gHH;A8B9/GD;EAAA;IAPM,qBAAA;IACA,WAAA;I9BygHH;E8BngHH;IAJQ,kBAAA;I9B0gHL;EACF;A8BlgHD;EACE,kBAAA;E9BogHD;A8BrgHD;EAKI,iBAAA;EACA,oBAAA;E9BmgHH;A8BzgHD;;;EAYI,2BAAA;E9BkgHH;A8Bp/GD;EAAA;IATM,kCAAA;IACA,4BAAA;I9BigHH;E8Bz/GH;;;IAHM,8BAAA;I9BigHH;EACF;A8Bx/GD;EAEI,eAAA;EACA,oBAAA;E9By/GH;A8B5/GD;EAMI,gBAAA;EACA,qBAAA;E9By/GH;A8Bh/GD;EAEE,kBAAA;EF7OA,4BAAA;EACC,2BAAA;E5B+tHF;A+BztHD;EACE
 ,oBAAA;EACA,kBAAA;EACA,qBAAA;EACA,+BAAA;E/B2tHD;A+BntHD;EAAA;IAFI,oBAAA;I/BytHD;EACF;A+B1sHD;EAAA;IAFI,aAAA;I/BgtHD;EACF;A+BlsHD;EACE,qBAAA;EACA,qBAAA;EACA,oBAAA;EACA,mCAAA;EACA,4DAAA;UAAA,oDAAA;EAEA,mCAAA;E/BmsHD;A+BjsHC;EACE,kBAAA;E/BmsHH;A+BtqHD;EAAA;IAzBI,aAAA;IACA,eAAA;IACA,0BAAA;YAAA,kBAAA;I/BmsHD;E+BjsHC;IACE,2BAAA;IACA,gCAAA;IACA,yBAAA;IACA,mBAAA;IACA,8BAAA;I/BmsHH;E+BhsHC;IACE,qBAAA;I/BksHH;E+B7rHC;;;IAGE,iBAAA;IACA,kBAAA;I/B+rHH;EACF;A+B3rHD;;EAGI,mBAAA;E/B4rHH;A+BvrHC;EAAA;;IAFI,mBAAA;I/B8rHH;EACF;A+BrrHD;;;;EAII,qBAAA;EACA,oBAAA;E/BurHH;A+BjrHC;EAAA;;;;IAHI,iBAAA;IACA,gBAAA;I/B2rHH;EACF;A+B/qHD;EACE,eAAA;EACA,uBAAA;E/BirHD;A+B5qHD;EAAA;IAFI,kBAAA;I/BkrHD;EACF;A+B9qHD;;EAEE,iBAAA;EACA,UAAA;EACA,SAAA;EACA,eAAA;E/BgrHD;A+B1qHD;EAAA;;IAFI,kBAAA;I/BirHD;EACF;A+B/qHD;EACE,QAAA;EACA,uBAAA;E/BirHD;A+B/qHD;EACE,WAAA;EACA,kBAAA;EACA,uBAAA;E/BirHD;A+B3qHD;EACE,aAAA;EACA,oBAAA;EACA,iBAAA;EACA,mBAAA;EACA,cAAA;E/B6qHD;A+B3qHC;;EAEE,uBAAA;E/B6qHH;A+BtrHD;EAaI,gBAAA;E/B4qHH;A+BnqHD;EALI
 ;;IAEE,oBAAA;I/B2qHH;EACF;A+BjqHD;EACE,oBAAA;EACA,cAAA;EACA,oBAAA;EACA,mBAAA;EC/LA,iBAAA;EACA,oBAAA;EDgMA,+BAAA;EACA,wBAAA;EACA,+BAAA;EACA,oBAAA;E/BoqHD;A+BhqHC;EACE,YAAA;E/BkqHH;A+BhrHD;EAmBI,gBAAA;EACA,aAAA;EACA,aAAA;EACA,oBAAA;E/BgqHH;A+BtrHD;EAyBI,iBAAA;E/BgqHH;A+B1pHD;EAAA;IAFI,eAAA;I/BgqHD;EACF;A+BvpHD;EACE,qBAAA;E/BypHD;A+B1pHD;EAII,mBAAA;EACA,sBAAA;EACA,mBAAA;E/BypHH;A+B9nHC;EAAA;IArBI,kBAAA;IACA,aAAA;IACA,aAAA;IACA,eAAA;IACA,+BAAA;IACA,WAAA;IACA,0BAAA;YAAA,kBAAA;I/BupHH;E+BxoHD;;IAZM,4BAAA;I/BwpHL;E+B5oHD;IATM,mBAAA;I/BwpHL;E+BvpHK;;IAEE,wBAAA;I/BypHP;EACF;A+BvoHD;EAAA;IAXI,aAAA;IACA,WAAA;I/BspHD;E+B5oHH;IAPM,aAAA;I/BspHH;E+B/oHH;IALQ,mBAAA;IACA,sBAAA;I/BupHL;EACF;A+B5oHD;EACE,oBAAA;EACA,qBAAA;EACA,oBAAA;EACA,mCAAA;EACA,sCAAA;E1B/NA,8FAAA;EACQ,sFAAA;E2B/DR,iBAAA;EACA,oBAAA;EhC86HD;AkBz9GD;EAAA;IA/DM,uBAAA;IACA,kBAAA;IACA,wBAAA;IlB4hHH;EkB/9GH;IAxDM,uBAAA;IACA,aAAA;IACA,wBAAA;IlB0hHH;EkBp+GH;IAjDM,uBAAA;IlBwhHH;EkBv+GH;IA7CM,uBAAA;IACA,wBAAA;IlBuhHH;EkB3+GH;;;IAvCQ,aAAA;IlBu
 hHL;EkBh/GH;IAjCM,aAAA;IlBohHH;EkBn/GH;IA7BM,kBAAA;IACA,wBAAA;IlBmhHH;EkBv/GH;;IApBM,uBAAA;IACA,eAAA;IACA,kBAAA;IACA,wBAAA;IlB+gHH;EkB9/GH;;IAdQ,iBAAA;IlBghHL;EkBlgHH;;IATM,oBAAA;IACA,gBAAA;IlB+gHH;EkBvgHH;IAHM,QAAA;IlB6gHH;EACF;A+BrrHC;EAAA;IANI,oBAAA;I/B+rHH;E+B7rHG;IACE,kBAAA;I/B+rHL;EACF;A+B9qHD;EAAA;IARI,aAAA;IACA,WAAA;IACA,gBAAA;IACA,iBAAA;IACA,gBAAA;IACA,mBAAA;I1B1PF,0BAAA;IACQ,kBAAA;ILq7HP;EACF;A+BprHD;EACE,eAAA;EHrUA,4BAAA;EACC,2BAAA;E5B4/HF;A+BprHD;EHzUE,8BAAA;EACC,6BAAA;EAOD,+BAAA;EACC,8BAAA;E5B0/HF;A+BhrHD;EChVE,iBAAA;EACA,oBAAA;EhCmgID;A+BjrHC;ECnVA,kBAAA;EACA,qBAAA;EhCugID;A+BlrHC;ECtVA,kBAAA;EACA,qBAAA;EhC2gID;A+B5qHD;EChWE,kBAAA;EACA,qBAAA;EhC+gID;A+BxqHD;EAAA;IAJI,aAAA;IACA,mBAAA;IACA,oBAAA;I/BgrHD;EACF;A+BvpHD;EAZE;IExWA,wBAAA;IjC+gIC;E+BtqHD;IE5WA,yBAAA;IF8WE,qBAAA;I/BwqHD;E+B1qHD;IAKI,iBAAA;I/BwqHH;EACF;A+B/pHD;EACE,2BAAA;EACA,uBAAA;E/BiqHD;A+BnqHD;EAKI,gBAAA;E/BiqHH;A+BhqHG;;EAEE,gBAAA;EACA,+BAAA;E/BkqHL;A+B3qHD;EAcI,gBAAA;E/BgqHH;A+B9qHD;EAmBM,gBAAA;E/B8pHL;A+B
 5pHK;;EAEE,gBAAA;EACA,+BAAA;E/B8pHP;A+B1pHK;;;EAGE,gBAAA;EACA,2BAAA;E/B4pHP;A+BxpHK;;;EAGE,gBAAA;EACA,+BAAA;E/B0pHP;A+BlsHD;EA8CI,uBAAA;E/BupHH;A+BtpHG;;EAEE,2BAAA;E/BwpHL;A+BzsHD;EAoDM,2BAAA;E/BwpHL;A+B5sHD;;EA0DI,uBAAA;E/BspHH;A+B/oHK;;;EAGE,2BAAA;EACA,gBAAA;E/BipHP;A+BhnHC;EAAA;IAzBQ,gBAAA;I/B6oHP;E+B5oHO;;IAEE,gBAAA;IACA,+BAAA;I/B8oHT;E+B1oHO;;;IAGE,gBAAA;IACA,2BAAA;I/B4oHT;E+BxoHO;;;IAGE,gBAAA;IACA,+BAAA;I/B0oHT;EACF;A+B5uHD;EA8GI,gBAAA;E/BioHH;A+BhoHG;EACE,gBAAA;E/BkoHL;A+BlvHD;EAqHI,gBAAA;E/BgoHH;A+B/nHG;;EAEE,gBAAA;E/BioHL;A+B7nHK;;;;EAEE,gBAAA;E/BioHP;A+BznHD;EACE,2BAAA;EACA,uBAAA;E/B2nHD;A+B7nHD;EAKI,gBAAA;E/B2nHH;A+B1nHG;;EAEE,gBAAA;EACA,+BAAA;E/B4nHL;A+BroHD;EAcI,gBAAA;E/B0nHH;A+BxoHD;EAmBM,gBAAA;E/BwnHL;A+BtnHK;;EAEE,gBAAA;EACA,+BAAA;E/BwnHP;A+BpnHK;;;EAGE,gBAAA;EACA,2BAAA;E/BsnHP;A+BlnHK;;;EAGE,gBAAA;EACA,+BAAA;E/BonHP;A+B5pHD;EA+CI,uBAAA;E/BgnHH;A+B/mHG;;EAEE,2BAAA;E/BinHL;A+BnqHD;EAqDM,2BAAA;E/BinHL;A+BtqHD;;EA2DI,uBAAA;E/B+mHH;A+BzmHK;;;EAGE,2BAAA;EACA,gBAAA;E/B2mHP
 ;A+BpkHC;EAAA;IA/BQ,uBAAA;I/BumHP;E+BxkHD;IA5BQ,2BAAA;I/BumHP;E+B3kHD;IAzBQ,gBAAA;I/BumHP;E+BtmHO;;IAEE,gBAAA;IACA,+BAAA;I/BwmHT;E+BpmHO;;;IAGE,gBAAA;IACA,2BAAA;I/BsmHT;E+BlmHO;;;IAGE,gBAAA;IACA,+BAAA;I/BomHT;EACF;A+B5sHD;EA+GI,gBAAA;E/BgmHH;A+B/lHG;EACE,gBAAA;E/BimHL;A+BltHD;EAsHI,gBAAA;E/B+lHH;A+B9lHG;;EAEE,gBAAA;E/BgmHL;A+B5lHK;;;;EAEE,gBAAA;E/BgmHP;AkC1uID;EACE,mBAAA;EACA,qBAAA;EACA,kBAAA;EACA,2BAAA;EACA,oBAAA;ElC4uID;AkCjvID;EAQI,uBAAA;ElC4uIH;AkCpvID;EAWM,mBAAA;EACA,gBAAA;EACA,gBAAA;ElC4uIL;AkCzvID;EAkBI,gBAAA;ElC0uIH;AmC9vID;EACE,uBAAA;EACA,iBAAA;EACA,gBAAA;EACA,oBAAA;EnCgwID;AmCpwID;EAOI,iBAAA;EnCgwIH;AmCvwID;;EAUM,oBAAA;EACA,aAAA;EACA,mBAAA;EACA,yBAAA;EACA,uBAAA;EACA,gBAAA;EACA,2BAAA;EACA,2BAAA;EACA,mBAAA;EnCiwIL;AmC/vIG;;EAGI,gBAAA;EPXN,gCAAA;EACG,6BAAA;E5B4wIJ;AmC9vIG;;EPvBF,iCAAA;EACG,8BAAA;E5ByxIJ;AmCzvIG;;;;EAEE,gBAAA;EACA,2BAAA;EACA,uBAAA;EnC6vIL;AmCvvIG;;;;;;EAGE,YAAA;EACA,gBAAA;EACA,2BAAA;EACA,uBAAA;EACA,iBAAA;EnC4vIL;AmClzID;;;;;;EAiEM,gBAAA;EACA,2BAAA;EACA,uBAAA;E
 ACA,qBAAA;EnCyvIL;AmChvID;;EC1EM,oBAAA;EACA,iBAAA;EpC8zIL;AoC5zIG;;ERMF,gCAAA;EACG,6BAAA;E5B0zIJ;AoC3zIG;;ERRF,iCAAA;EACG,8BAAA;E5Bu0IJ;AmC1vID;;EC/EM,mBAAA;EACA,iBAAA;EpC60IL;AoC30IG;;ERMF,gCAAA;EACG,6BAAA;E5By0IJ;AoC10IG;;ERRF,iCAAA;EACG,8BAAA;E5Bs1IJ;AqCz1ID;EACE,iBAAA;EACA,gBAAA;EACA,kBAAA;EACA,oBAAA;ErC21ID;AqC/1ID;EAOI,iBAAA;ErC21IH;AqCl2ID;;EAUM,uBAAA;EACA,mBAAA;EACA,2BAAA;EACA,2BAAA;EACA,qBAAA;ErC41IL;AqC12ID;;EAmBM,uBAAA;EACA,2BAAA;ErC21IL;AqC/2ID;;EA2BM,cAAA;ErCw1IL;AqCn3ID;;EAkCM,aAAA;ErCq1IL;AqCv3ID;;;;EA2CM,gBAAA;EACA,2BAAA;EACA,qBAAA;ErCk1IL;AsCh4ID;EACE,iBAAA;EACA,yBAAA;EACA,gBAAA;EACA,mBAAA;EACA,gBAAA;EACA,gBAAA;EACA,oBAAA;EACA,qBAAA;EACA,0BAAA;EACA,sBAAA;EtCk4ID;AsC93IG;;EAEE,gBAAA;EACA,uBAAA;EACA,iBAAA;EtCg4IL;AsC33IC;EACE,eAAA;EtC63IH;AsCz3IC;EACE,oBAAA;EACA,WAAA;EtC23IH;AsCp3ID;ECtCE,2BAAA;EvC65ID;AuC15IG;;EAEE,2BAAA;EvC45IL;AsCv3ID;EC1CE,2BAAA;EvCo6ID;AuCj6IG;;EAEE,2BAAA;EvCm6IL;AsC13ID;EC9CE,2BAAA;EvC26ID;AuCx6IG;;EAEE,2BAAA;EvC06IL;AsC73ID;EClDE,2BAAA;EvCk7ID;
 AuC/6IG;;EAEE,2BAAA;EvCi7IL;AsCh4ID;ECtDE,2BAAA;EvCy7ID;AuCt7IG;;EAEE,2BAAA;EvCw7IL;AsCn4ID;EC1DE,2BAAA;EvCg8ID;AuC77IG;;EAEE,2BAAA;EvC+7IL;AwCj8ID;EACE,uBAAA;EACA,iBAAA;EACA,kBAAA;EACA,iBAAA;EACA,mBAAA;EACA,gBAAA;EACA,gBAAA;EACA,0BAAA;EACA,qBAAA;EACA,oBAAA;EACA,2BAAA;EACA,qBAAA;ExCm8ID;AwCh8IC;EACE,eAAA;ExCk8IH;AwC97IC;EACE,oBAAA;EACA,WAAA;ExCg8IH;AwC97IC;EACE,QAAA;EACA,kBAAA;ExCg8IH;AwC37IG;;EAEE,gBAAA;EACA,uBAAA;EACA,iBAAA;ExC67IL;AwCx7IC;;EAEE,gBAAA;EACA,2BAAA;ExC07IH;AwCx7IC;EACE,cAAA;ExC07IH;AwCx7IC;EACE,mBAAA;ExC07IH;AwCx7IC;EACE,kBAAA;ExC07IH;AyC/+ID;EACE,oBAAA;EACA,qBAAA;EACA,gBAAA;EACA,2BAAA;EzCi/ID;AyCr/ID;;EAQI,gBAAA;EzCi/IH;AyCz/ID;EAWI,qBAAA;EACA,iBAAA;EACA,kBAAA;EzCi/IH;AyC9/ID;EAiBI,2BAAA;EzCg/IH;AyC7+IC;;EAEE,oBAAA;EzC++IH;AyCrgJD;EA0BI,iBAAA;EzC8+IH;AyC79ID;EAAA;IAbI,iBAAA;IzC8+ID;EyC5+IC;;IAEE,oBAAA;IACA,qBAAA;IzC8+IH;EyCt+IH;;IAHM,iBAAA;IzC6+IH;EACF;A0CrhJD;EACE,gBAAA;EACA,cAAA;EACA,qBAAA;EACA,yBAAA;EACA,2BAAA;EACA,2BAAA;EACA,oBAAA;ErCiLA,6CAAA;EACK,wCAAA;EACG,qC
 AAA;ELu2IT;A0CjiJD;;EAaI,mBAAA;EACA,oBAAA;E1CwhJH;A0CphJC;;;EAGE,uBAAA;E1CshJH;A0C3iJD;EA0BI,cAAA;EACA,gBAAA;E1CohJH;A2C7iJD;EACE,eAAA;EACA,qBAAA;EACA,+BAAA;EACA,oBAAA;E3C+iJD;A2CnjJD;EAQI,eAAA;EAEA,gBAAA;E3C6iJH;A2CvjJD;EAcI,mBAAA;E3C4iJH;A2C1jJD;;EAoBI,kBAAA;E3C0iJH;A2C9jJD;EAuBI,iBAAA;E3C0iJH;A2CliJD;;EAEE,qBAAA;E3CoiJD;A2CtiJD;;EAMI,oBAAA;EACA,WAAA;EACA,cAAA;EACA,gBAAA;E3CoiJH;A2C5hJD;ECrDE,2BAAA;EACA,uBAAA;EACA,gBAAA;E5ColJD;A2CjiJD;EChDI,2BAAA;E5ColJH;A2CpiJD;EC7CI,gBAAA;E5ColJH;A2CpiJD;ECxDE,2BAAA;EACA,uBAAA;EACA,gBAAA;E5C+lJD;A2CziJD;ECnDI,2BAAA;E5C+lJH;A2C5iJD;EChDI,gBAAA;E5C+lJH;A2C5iJD;EC3DE,2BAAA;EACA,uBAAA;EACA,gBAAA;E5C0mJD;A2CjjJD;ECtDI,2BAAA;E5C0mJH;A2CpjJD;ECnDI,gBAAA;E5C0mJH;A2CpjJD;EC9DE,2BAAA;EACA,uBAAA;EACA,gBAAA;E5CqnJD;A2CzjJD;ECzDI,2BAAA;E5CqnJH;A2C5jJD;ECtDI,gBAAA;E5CqnJH;A6CvnJD;EACE;IAAQ,6BAAA;I7C0nJP;E6CznJD;IAAQ,0BAAA;I7C4nJP;EACF;A6CznJD;EACE;IAAQ,6BAAA;I7C4nJP;E6C3nJD;IAAQ,0BAAA;I7C8nJP;EACF;A6CjoJD;EACE;IAAQ,6BAAA;I7C4nJP;E6C3nJD;IAAQ,0BAAA;I7C8nJP;EA
 CF;A6CvnJD;EACE,kBAAA;EACA,cAAA;EACA,qBAAA;EACA,2BAAA;EACA,oBAAA;ExCsCA,wDAAA;EACQ,gDAAA;ELolJT;A6CtnJD;EACE,aAAA;EACA,WAAA;EACA,cAAA;EACA,iBAAA;EACA,mBAAA;EACA,gBAAA;EACA,oBAAA;EACA,2BAAA;ExCyBA,wDAAA;EACQ,gDAAA;EAyHR,qCAAA;EACK,gCAAA;EACG,6BAAA;ELw+IT;A6CnnJD;;ECCI,+MAAA;EACA,0MAAA;EACA,uMAAA;EDAF,oCAAA;UAAA,4BAAA;E7CunJD;A6ChnJD;;ExC5CE,4DAAA;EACK,uDAAA;EACG,oDAAA;ELgqJT;A6C7mJD;EErEE,2BAAA;E/CqrJD;A+ClrJC;EDgDE,+MAAA;EACA,0MAAA;EACA,uMAAA;E9CqoJH;A6CjnJD;EEzEE,2BAAA;E/C6rJD;A+C1rJC;EDgDE,+MAAA;EACA,0MAAA;EACA,uMAAA;E9C6oJH;A6CrnJD;EE7EE,2BAAA;E/CqsJD;A+ClsJC;EDgDE,+MAAA;EACA,0MAAA;EACA,uMAAA;E9CqpJH;A6CznJD;EEjFE,2BAAA;E/C6sJD;A+C1sJC;EDgDE,+MAAA;EACA,0MAAA;EACA,uMAAA;E9C6pJH;AgDrtJD;EAEE,kBAAA;EhDstJD;AgDptJC;EACE,eAAA;EhDstJH;AgDltJD;;EAEE,oBAAA;EhDotJD;AgDjtJD;;EAEE,qBAAA;EhDmtJD;AgDhtJD;;;EAGE,qBAAA;EACA,qBAAA;EhDktJD;AgD/sJD;EACE,wBAAA;EhDitJD;AgD9sJD;EACE,wBAAA;EhDgtJD;AgD5sJD;EACE,eAAA;EACA,oBAAA;EhD8sJD;AgDxsJD;EACE,iBAAA;EACA,kBAAA;EhD0sJD;AiD9uJD;EAEE,qBAAA;EACA,iBAAA;
 EjD+uJD;AiDvuJD;EACE,oBAAA;EACA,gBAAA;EACA,oBAAA;EAEA,qBAAA;EACA,2BAAA;EACA,2BAAA;EjDwuJD;AiDruJC;ErB3BA,8BAAA;EACC,6BAAA;E5BmwJF;AiDtuJC;EACE,kBAAA;ErBvBF,iCAAA;EACC,gCAAA;E5BgwJF;AiD/tJD;EACE,gBAAA;EjDiuJD;AiDluJD;EAII,gBAAA;EjDiuJH;AiD7tJC;;EAEE,uBAAA;EACA,gBAAA;EACA,2BAAA;EjD+tJH;AiDztJC;;;EAGE,2BAAA;EACA,gBAAA;EACA,qBAAA;EjD2tJH;AiDhuJC;;;EASI,gBAAA;EjD4tJL;AiDruJC;;;EAYI,gBAAA;EjD8tJL;AiDztJC;;;EAGE,YAAA;EACA,gBAAA;EACA,2BAAA;EACA,uBAAA;EjD2tJH;AiDjuJC;;;;;;;;;EAYI,gBAAA;EjDguJL;AiD5uJC;;;EAeI,gBAAA;EjDkuJL;AkD9zJC;EACE,gBAAA;EACA,2BAAA;ElDg0JH;AkD9zJG;EACE,gBAAA;ElDg0JL;AkDj0JG;EAII,gBAAA;ElDg0JP;AkD7zJK;;EAEE,gBAAA;EACA,2BAAA;ElD+zJP;AkD7zJK;;;EAGE,aAAA;EACA,2BAAA;EACA,uBAAA;ElD+zJP;AkDp1JC;EACE,gBAAA;EACA,2BAAA;ElDs1JH;AkDp1JG;EACE,gBAAA;ElDs1JL;AkDv1JG;EAII,gBAAA;ElDs1JP;AkDn1JK;;EAEE,gBAAA;EACA,2BAAA;ElDq1JP;AkDn1JK;;;EAGE,aAAA;EACA,2BAAA;EACA,uBAAA;ElDq1JP;AkD12JC;EACE,gBAAA;EACA,2BAAA;ElD42JH;AkD12JG;EACE,gBAAA;ElD42JL;AkD72JG;EAII,gBAAA;ElD42JP;AkDz2JK;;EAEE,gBAAA;EACA
 ,2BAAA;ElD22JP;AkDz2JK;;;EAGE,aAAA;EACA,2BAAA;EACA,uBAAA;ElD22JP;AkDh4JC;EACE,gBAAA;EACA,2BAAA;ElDk4JH;AkDh4JG;EACE,gBAAA;ElDk4JL;AkDn4JG;EAII,gBAAA;ElDk4JP;AkD/3JK;;EAEE,gBAAA;EACA,2BAAA;ElDi4JP;AkD/3JK;;;EAGE,aAAA;EACA,2BAAA;EACA,uBAAA;ElDi4JP;AiDryJD;EACE,eAAA;EACA,oBAAA;EjDuyJD;AiDryJD;EACE,kBAAA;EACA,kBAAA;EjDuyJD;AmD35JD;EACE,qBAAA;EACA,2BAAA;EACA,+BAAA;EACA,oBAAA;E9C0DA,mDAAA;EACQ,2CAAA;ELo2JT;AmD15JD;EACE,eAAA;EnD45JD;AmDv5JD;EACE,oBAAA;EACA,sCAAA;EvBpBA,8BAAA;EACC,6BAAA;E5B86JF;AmD75JD;EAMI,gBAAA;EnD05JH;AmDr5JD;EACE,eAAA;EACA,kBAAA;EACA,iBAAA;EACA,gBAAA;EnDu5JD;AmD35JD;EAOI,gBAAA;EnDu5JH;AmDl5JD;EACE,oBAAA;EACA,2BAAA;EACA,+BAAA;EvBpCA,iCAAA;EACC,gCAAA;E5By7JF;AmD54JD;;EAGI,kBAAA;EnD64JH;AmDh5JD;;EAMM,qBAAA;EACA,kBAAA;EnD84JL;AmD14JG;;EAEI,eAAA;EvBnEN,8BAAA;EACC,6BAAA;E5Bg9JF;AmDz4JG;;EAEI,kBAAA;EvBlEN,iCAAA;EACC,gCAAA;E5B88JF;AmDt4JD;EAEI,qBAAA;EnDu4JH;AmDp4JD;EACE,qBAAA;EnDs4JD;AmD93JD;;;EAII,kBAAA;EnD+3JH;AmDn4JD;;;EAOM,oBAAA;EACA,qBAAA;EnDi4JL;AmDz4JD;;EvB/FE,8BAAA;EACC
 ,6BAAA;E5B4+JF;AmD94JD;;;;EAmBQ,6BAAA;EACA,8BAAA;EnDi4JP;AmDr5JD;;;;;;;;EAwBU,6BAAA;EnDu4JT;AmD/5JD;;;;;;;;EA4BU,8BAAA;EnD64JT;AmDz6JD;;EvBvFE,iCAAA;EACC,gCAAA;E5BogKF;AmD96JD;;;;EAyCQ,gCAAA;EACA,iCAAA;EnD24JP;AmDr7JD;;;;;;;;EA8CU,gCAAA;EnDi5JT;AmD/7JD;;;;;;;;EAkDU,iCAAA;EnDu5JT;AmDz8JD;;;;EA2DI,+BAAA;EnDo5JH;AmD/8JD;;EA+DI,eAAA;EnDo5JH;AmDn9JD;;EAmEI,WAAA;EnDo5JH;AmDv9JD;;;;;;;;;;;;EA0EU,gBAAA;EnD25JT;AmDr+JD;;;;;;;;;;;;EA8EU,iBAAA;EnDq6JT;AmDn/JD;;;;;;;;EAuFU,kBAAA;EnDs6JT;AmD7/JD;;;;;;;;EAgGU,kBAAA;EnDu6JT;AmDvgKD;EAsGI,WAAA;EACA,kBAAA;EnDo6JH;AmD15JD;EACE,qBAAA;EnD45JD;AmD75JD;EAKI,kBAAA;EACA,oBAAA;EnD25JH;AmDj6JD;EASM,iBAAA;EnD25JL;AmDp6JD;EAcI,kBAAA;EnDy5JH;AmDv6JD;;EAkBM,+BAAA;EnDy5JL;AmD36JD;EAuBI,eAAA;EnDu5JH;AmD96JD;EAyBM,kCAAA;EnDw5JL;AmDj5JD;EChPE,uBAAA;EpDooKD;AoDloKC;EACE,gBAAA;EACA,2BAAA;EACA,uBAAA;EpDooKH;AoDvoKC;EAMI,2BAAA;EpDooKL;AoD1oKC;EASI,gBAAA;EACA,2BAAA;EpDooKL;AoDjoKC;EAEI,8BAAA;EpDkoKL;AmDh6JD;ECnPE,uBAAA;EpDspKD;AoDppKC;EACE,gBAAA;EACA,2BAAA;EACA,uBAAA;EpD
 spKH;AoDzpKC;EAMI,2BAAA;EpDspKL;AoD5pKC;EASI,gBAAA;EACA,2BAAA;EpDspKL;AoDnpKC;EAEI,8BAAA;EpDopKL;AmD/6JD;ECtPE,uBAAA;EpDwqKD;AoDtqKC;EACE,gBAAA;EACA,2BAAA;EACA,uBAAA;EpDwqKH;AoD3qKC;EAMI,2BAAA;EpDwqKL;AoD9qKC;EASI,gBAAA;EACA,2BAAA;EpDwqKL;AoDrqKC;EAEI,8BAAA;EpDsqKL;AmD97JD;ECzPE,uBAAA;EpD0rKD;AoDxrKC;EACE,gBAAA;EACA,2BAAA;EACA,uBAAA;EpD0rKH;AoD7rKC;EAMI,2BAAA;EpD0rKL;AoDhsKC;EASI,gBAAA;EACA,2BAAA;EpD0rKL;AoDvrKC;EAEI,8BAAA;EpDwrKL;AmD78JD;EC5PE,uBAAA;EpD4sKD;AoD1sKC;EACE,gBAAA;EACA,2BAAA;EACA,uBAAA;EpD4sKH;AoD/sKC;EAMI,2BAAA;EpD4sKL;AoDltKC;EASI,gBAAA;EACA,2BAAA;EpD4sKL;AoDzsKC;EAEI,8BAAA;EpD0sKL;AmD59JD;EC/PE,uBAAA;EpD8tKD;AoD5tKC;EACE,gBAAA;EACA,2BAAA;EACA,uBAAA;EpD8tKH;AoDjuKC;EAMI,2BAAA;EpD8tKL;AoDpuKC;EASI,gBAAA;EACA,2BAAA;EpD8tKL;AoD3tKC;EAEI,8BAAA;EpD4tKL;AqD5uKD;EACE,oBAAA;EACA,gBAAA;EACA,WAAA;EACA,YAAA;EACA,kBAAA;ErD8uKD;AqDnvKD;;;;;EAYI,oBAAA;EACA,QAAA;EACA,SAAA;EACA,WAAA;EACA,cAAA;EACA,aAAA;EACA,WAAA;ErD8uKH;AqD1uKC;EACE,wBAAA;ErD4uKH;AqDxuKC;EACE,qBAAA;ErD0uKH;AsDpwKD;EA
 CE,kBAAA;EACA,eAAA;EACA,qBAAA;EACA,2BAAA;EACA,2BAAA;EACA,oBAAA;EjDwDA,yDAAA;EACQ,iDAAA;EL+sKT;AsD9wKD;EASI,oBAAA;EACA,mCAAA;EtDwwKH;AsDnwKD;EACE,eAAA;EACA,oBAAA;EtDqwKD;AsDnwKD;EACE,cAAA;EACA,oBAAA;EtDqwKD;AuD3xKD;EACE,cAAA;EACA,iBAAA;EACA,mBAAA;EACA,gBAAA;EACA,gBAAA;EACA,8BAAA;EjCRA,cAAA;EAGA,2BAAA;EtBoyKD;AuD5xKC;;EAEE,gBAAA;EACA,uBAAA;EACA,iBAAA;EjCfF,cAAA;EAGA,2BAAA;EtB4yKD;AuDzxKC;EACE,YAAA;EACA,iBAAA;EACA,yBAAA;EACA,WAAA;EACA,0BAAA;EvD2xKH;AwD/yKD;EACE,kBAAA;ExDizKD;AwD7yKD;EACE,eAAA;EACA,kBAAA;EACA,iBAAA;EACA,QAAA;EACA,UAAA;EACA,WAAA;EACA,SAAA;EACA,eAAA;EACA,mCAAA;EAIA,YAAA;ExD4yKD;AwDzyKC;EnD+GA,uCAAA;EACI,mCAAA;EACC,kCAAA;EACG,+BAAA;EAkER,qDAAA;EAEK,2CAAA;EACG,qCAAA;EL4nKT;AwD/yKC;EnD2GA,oCAAA;EACI,gCAAA;EACC,+BAAA;EACG,4BAAA;ELusKT;AwDnzKD;EACE,oBAAA;EACA,kBAAA;ExDqzKD;AwDjzKD;EACE,oBAAA;EACA,aAAA;EACA,cAAA;ExDmzKD;AwD/yKD;EACE,oBAAA;EACA,2BAAA;EACA,2BAAA;EACA,sCAAA;EACA,oBAAA;EnDaA,kDAAA;EACQ,0CAAA;EmDZR,sCAAA;UAAA,8BAAA;EAEA,YAAA;ExDizKD;AwD7yKD;EACE,oBAAA;EACA,QAAA;EACA
 ,UAAA;EACA,SAAA;EACA,2BAAA;ExD+yKD;AwD7yKC;ElCnEA,YAAA;EAGA,0BAAA;EtBi3KD;AwDhzKC;ElCpEA,cAAA;EAGA,2BAAA;EtBq3KD;AwD/yKD;EACE,eAAA;EACA,kCAAA;EACA,2BAAA;ExDizKD;AwD9yKD;EACE,kBAAA;ExDgzKD;AwD5yKD;EACE,WAAA;EACA,yBAAA;ExD8yKD;AwDzyKD;EACE,oBAAA;EACA,eAAA;ExD2yKD;AwDvyKD;EACE,eAAA;EACA,mBAAA;EACA,+BAAA;ExDyyKD;AwD5yKD;EAQI,kBAAA;EACA,kBAAA;ExDuyKH;AwDhzKD;EAaI,mBAAA;ExDsyKH;AwDnzKD;EAiBI,gBAAA;ExDqyKH;AwDhyKD;EACE,oBAAA;EACA,cAAA;EACA,aAAA;EACA,cAAA;EACA,kBAAA;ExDkyKD;AwDhxKD;EAZE;IACE,cAAA;IACA,mBAAA;IxD+xKD;EwD7xKD;InDrEA,mDAAA;IACQ,2CAAA;ILq2KP;EwD5xKD;IAAY,cAAA;IxD+xKX;EACF;AwD1xKD;EAFE;IAAY,cAAA;IxDgyKX;EACF;AyD76KD;EACE,oBAAA;EACA,eAAA;EACA,gBAAA;EACA,qBAAA;EAEA,6DAAA;EACA,iBAAA;EACA,qBAAA;EACA,kBAAA;EnCZA,YAAA;EAGA,0BAAA;EtBy7KD;AyD76KC;EnCfA,cAAA;EAGA,2BAAA;EtB67KD;AyDh7KC;EAAW,kBAAA;EAAmB,gBAAA;EzDo7K/B;AyDn7KC;EAAW,kBAAA;EAAmB,gBAAA;EzDu7K/B;AyDt7KC;EAAW,iBAAA;EAAmB,gBAAA;EzD07K/B;AyDz7KC;EAAW,mBAAA;EAAmB,gBAAA;EzD67K/B;AyDz7KD;EACE,kBAAA;EACA,kBAAA;EACA,gBAAA;EACA,oBAAA;EAC
 A,uBAAA;EACA,2BAAA;EACA,oBAAA;EzD27KD;AyDv7KD;EACE,oBAAA;EACA,UAAA;EACA,WAAA;EACA,2BAAA;EACA,qBAAA;EzDy7KD;AyDr7KC;EACE,WAAA;EACA,WAAA;EACA,mBAAA;EACA,yBAAA;EACA,2BAAA;EzDu7KH;AyDr7KC;EACE,WAAA;EACA,YAAA;EACA,qBAAA;EACA,yBAAA;EACA,2BAAA;EzDu7KH;AyDr7KC;EACE,WAAA;EACA,WAAA;EACA,qBAAA;EACA,yBAAA;EACA,2BAAA;EzDu7KH;AyDr7KC;EACE,UAAA;EACA,SAAA;EACA,kBAAA;EACA,6BAAA;EACA,6BAAA;EzDu7KH;AyDr7KC;EACE,UAAA;EACA,UAAA;EACA,kBAAA;EACA,6BAAA;EACA,4BAAA;EzDu7KH;AyDr7KC;EACE,QAAA;EACA,WAAA;EACA,mBAAA;EACA,yBAAA;EACA,8BAAA;EzDu7KH;AyDr7KC;EACE,QAAA;EACA,YAAA;EACA,kBAAA;EACA,yBAAA;EACA,8BAAA;EzDu7KH;AyDr7KC;EACE,QAAA;EACA,WAAA;EACA,kBAAA;EACA,yBAAA;EACA,8BAAA;EzDu7KH;A0DthLD;EACE,oBAAA;EACA,QAAA;EACA,SAAA;EACA,eAAA;EACA,eAAA;EACA,kBAAA;EACA,cAAA;EAEA,6DAAA;EACA,iBAAA;EACA,qBAAA;EACA,yBAAA;EACA,kBAAA;EACA,2BAAA;EACA,sCAAA;UAAA,8BAAA;EACA,2BAAA;EACA,sCAAA;EACA,oBAAA;ErD6CA,mDAAA;EACQ,2CAAA;EqD1CR,qBAAA;E1DshLD;A0DnhLC;EAAY,mBAAA;E1DshLb;A0DrhLC;EAAY,mBAAA;E1DwhLb;A0DvhLC;EAAY,kBAAA;E1D0hLb;A0DzhLC;EAA
 Y,oBAAA;E1D4hLb;A0DzhLD;EACE,WAAA;EACA,mBAAA;EACA,iBAAA;EACA,2BAAA;EACA,kCAAA;EACA,4BAAA;E1D2hLD;A0DxhLD;EACE,mBAAA;E1D0hLD;A0DlhLC;;EAEE,oBAAA;EACA,gBAAA;EACA,UAAA;EACA,WAAA;EACA,2BAAA;EACA,qBAAA;E1DohLH;A0DjhLD;EACE,oBAAA;E1DmhLD;A0DjhLD;EACE,oBAAA;EACA,aAAA;E1DmhLD;A0D/gLC;EACE,WAAA;EACA,oBAAA;EACA,wBAAA;EACA,2BAAA;EACA,uCAAA;EACA,eAAA;E1DihLH;A0DhhLG;EACE,cAAA;EACA,aAAA;EACA,oBAAA;EACA,wBAAA;EACA,2BAAA;E1DkhLL;A0D/gLC;EACE,UAAA;EACA,aAAA;EACA,mBAAA;EACA,sBAAA;EACA,6BAAA;EACA,yCAAA;E1DihLH;A0DhhLG;EACE,cAAA;EACA,WAAA;EACA,eAAA;EACA,sBAAA;EACA,6BAAA;E1DkhLL;A0D/gLC;EACE,WAAA;EACA,oBAAA;EACA,qBAAA;EACA,8BAAA;EACA,0CAAA;EACA,YAAA;E1DihLH;A0DhhLG;EACE,cAAA;EACA,UAAA;EACA,oBAAA;EACA,qBAAA;EACA,8BAAA;E1DkhLL;A0D9gLC;EACE,UAAA;EACA,cAAA;EACA,mBAAA;EACA,uBAAA;EACA,4BAAA;EACA,wCAAA;E1DghLH;A0D/gLG;EACE,cAAA;EACA,YAAA;EACA,uBAAA;EACA,4BAAA;EACA,eAAA;E1DihLL;A2D9oLD;EACE,oBAAA;E3DgpLD;A2D7oLD;EACE,oBAAA;EACA,kBAAA;EACA,aAAA;E3D+oLD;A2DlpLD;EAMI,eAAA;EACA,oBAAA;EtD6KF,2CAAA;EACK,sCAAA;EACG,m
 CAAA;ELm+KT;A2DzpLD;;EAcM,gBAAA;E3D+oLL;A2DrnLC;EAAA;IArBI,wDAAA;SAAA,8CAAA;YAAA,wCAAA;IACA,qCAAA;YAAA,6BAAA;IACA,2BAAA;YAAA,mBAAA;I3D8oLH;E2D5oLG;;IAEE,4CAAA;YAAA,oCAAA;IACA,SAAA;I3D8oLL;E2D5oLG;;IAEE,6CAAA;YAAA,qCAAA;IACA,SAAA;I3D8oLL;E2D5oLG;;;IAGE,yCAAA;YAAA,iCAAA;IACA,SAAA;I3D8oLL;EACF;A2DprLD;;;EA6CI,gBAAA;E3D4oLH;A2DzrLD;EAiDI,SAAA;E3D2oLH;A2D5rLD;;EAsDI,oBAAA;EACA,QAAA;EACA,aAAA;E3D0oLH;A2DlsLD;EA4DI,YAAA;E3DyoLH;A2DrsLD;EA+DI,aAAA;E3DyoLH;A2DxsLD;;EAmEI,SAAA;E3DyoLH;A2D5sLD;EAuEI,aAAA;E3DwoLH;A2D/sLD;EA0EI,YAAA;E3DwoLH;A2DhoLD;EACE,oBAAA;EACA,QAAA;EACA,SAAA;EACA,WAAA;EACA,YAAA;ErC9FA,cAAA;EAGA,2BAAA;EqC6FA,iBAAA;EACA,gBAAA;EACA,oBAAA;EACA,2CAAA;E3DmoLD;A2D9nLC;EblGE,oGAAA;EACA,+FAAA;EACA,sHAAA;EAAA,gGAAA;EACA,6BAAA;EACA,wHAAA;E9CmuLH;A2DloLC;EACE,YAAA;EACA,UAAA;EbvGA,oGAAA;EACA,+FAAA;EACA,sHAAA;EAAA,gGAAA;EACA,6BAAA;EACA,wHAAA;E9C4uLH;A2DpoLC;;EAEE,YAAA;EACA,gBAAA;EACA,uBAAA;ErCtHF,cAAA;EAGA,2BAAA;EtB2vLD;A2DrqLD;;;;EAsCI,oBAAA;EACA,UAAA;EACA,YAAA;EACA,uBAAA;E3DqoLH;A2D9qLD
 ;;EA6CI,WAAA;EACA,oBAAA;E3DqoLH;A2DnrLD;;EAkDI,YAAA;EACA,qBAAA;E3DqoLH;A2DxrLD;;EAuDI,aAAA;EACA,cAAA;EACA,mBAAA;EACA,oBAAA;E3DqoLH;A2DhoLG;EACE,kBAAA;E3DkoLL;A2D9nLG;EACE,kBAAA;E3DgoLL;A2DtnLD;EACE,oBAAA;EACA,cAAA;EACA,WAAA;EACA,aAAA;EACA,YAAA;EACA,mBAAA;EACA,iBAAA;EACA,kBAAA;EACA,oBAAA;E3DwnLD;A2DjoLD;EAYI,uBAAA;EACA,aAAA;EACA,cAAA;EACA,aAAA;EACA,qBAAA;EACA,2BAAA;EACA,qBAAA;EACA,iBAAA;EAUA,2BAAA;EACA,oCAAA;E3D+mLH;A2D7oLD;EAiCI,WAAA;EACA,aAAA;EACA,cAAA;EACA,2BAAA;E3D+mLH;A2DxmLD;EACE,oBAAA;EACA,WAAA;EACA,YAAA;EACA,cAAA;EACA,aAAA;EACA,mBAAA;EACA,sBAAA;EACA,gBAAA;EACA,oBAAA;EACA,2CAAA;E3D0mLD;A2DzmLC;EACE,mBAAA;E3D2mLH;A2DlkLD;EAhCE;;;;IAKI,aAAA;IACA,cAAA;IACA,mBAAA;IACA,iBAAA;I3DomLH;E2D5mLD;;IAYI,oBAAA;I3DomLH;E2DhnLD;;IAgBI,qBAAA;I3DomLH;E2D/lLD;IACE,WAAA;IACA,YAAA;IACA,sBAAA;I3DimLD;E2D7lLD;IACE,cAAA;I3D+lLD;EACF;A4D31LC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;EAEE,cAAA;EACA,gBAAA;E5Dy3LH;A4Dv3LC;;;;;;;;;;;;;;;EACE,aAAA;E5Du4LH;AiC/4LD;E4BRE,gBAAA;EACA,mBAAA;EACA,oBAAA;E7D05LD;AiCj5LD;EACE,
 yBAAA;EjCm5LD;AiCj5LD;EACE,wBAAA;EjCm5LD;AiC34LD;EACE,0BAAA;EjC64LD;AiC34LD;EACE,2BAAA;EjC64LD;AiC34LD;EACE,oBAAA;EjC64LD;AiC34LD;E6BzBE,aAAA;EACA,oBAAA;EACA,mBAAA;EACA,+BAAA;EACA,WAAA;E9Du6LD;AiCz4LD;EACE,0BAAA;EACA,+BAAA;EjC24LD;AiCp4LD;EACE,iBAAA;EjCs4LD;A+Dx6LD;EACE,qBAAA;E/D06LD;A+Dp6LD;;;;ECdE,0BAAA;EhEw7LD;A+Dn6LD;;;;;;;;;;;;EAYE,0BAAA;E/Dq6LD;A+D95LD;EAAA;IChDE,2BAAA;IhEk9LC;EgEj9LD;IAAU,gBAAA;IhEo9LT;EgEn9LD;IAAU,+BAAA;IhEs9LT;EgEr9LD;;IACU,gCAAA;IhEw9LT;EACF;A+Dx6LD;EAAA;IAFI,2BAAA;I/D86LD;EACF;A+Dx6LD;EAAA;IAFI,4BAAA;I/D86LD;EACF;A+Dx6LD;EAAA;IAFI,kCAAA;I/D86LD;EACF;A+Dv6LD;EAAA;ICrEE,2BAAA;IhEg/LC;EgE/+LD;IAAU,gBAAA;IhEk/LT;EgEj/LD;IAAU,+BAAA;IhEo/LT;EgEn/LD;;IACU,gCAAA;IhEs/LT;EACF;A+Dj7LD;EAAA;IAFI,2BAAA;I/Du7LD;EACF;A+Dj7LD;EAAA;IAFI,4BAAA;I/Du7LD;EACF;A+Dj7LD;EAAA;IAFI,kCAAA;I/Du7LD;EACF;A+Dh7LD;EAAA;IC1FE,2BAAA;IhE8gMC;EgE7gMD;IAAU,gBAAA;IhEghMT;EgE/gMD;IAAU,+BAAA;IhEkhMT;EgEjhMD;;IACU,gCAAA;IhEohMT;EACF;A+D17LD;EAAA;IAFI,2BAAA;I/Dg8LD;EACF;A+D17LD;EAAA;IAFI,4BAAA;I
 /Dg8LD;EACF;A+D17LD;EAAA;IAFI,kCAAA;I/Dg8LD;EACF;A+Dz7LD;EAAA;IC/GE,2BAAA;IhE4iMC;EgE3iMD;IAAU,gBAAA;IhE8iMT;EgE7iMD;IAAU,+BAAA;IhEgjMT;EgE/iMD;;IACU,gCAAA;IhEkjMT;EACF;A+Dn8LD;EAAA;IAFI,2BAAA;I/Dy8LD;EACF;A+Dn8LD;EAAA;IAFI,4BAAA;I/Dy8LD;EACF;A+Dn8LD;EAAA;IAFI,kCAAA;I/Dy8LD;EACF;A+Dl8LD;EAAA;IC5HE,0BAAA;IhEkkMC;EACF;A+Dl8LD;EAAA;ICjIE,0BAAA;IhEukMC;EACF;A+Dl8LD;EAAA;ICtIE,0BAAA;IhE4kMC;EACF;A+Dl8LD;EAAA;IC3IE,0BAAA;IhEilMC;EACF;A+D/7LD;ECnJE,0BAAA;EhEqlMD;A+D57LD;EAAA;ICjKE,2BAAA;IhEimMC;EgEhmMD;IAAU,gBAAA;IhEmmMT;EgElmMD;IAAU,+BAAA;IhEqmMT;EgEpmMD;;IACU,gCAAA;IhEumMT;EACF;A+D18LD;EACE,0BAAA;E/D48LD;A+Dv8LD;EAAA;IAFI,2BAAA;I/D68LD;EACF;A+D38LD;EACE,0BAAA;E/D68LD;A+Dx8LD;EAAA;IAFI,4BAAA;I/D88LD;EACF;A+D58LD;EACE,0BAAA;E/D88LD;A+Dz8LD;EAAA;IAFI,kCAAA;I/D+8LD;EACF;A+Dx8LD;EAAA;ICpLE,0BAAA;IhEgoMC;EACF","file":"bootstrap.css","sourcesContent":["/*! normalize.css v3.0.2 | MIT License | git.io/normalize */\nhtml {\n  font-family: sans-serif;\n  -ms-text-size-adjust: 100%;\n  -webkit-text-
 size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: 1px dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\nmark {\n  background: #ff0;\n  color: #000;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\nsup {\n  top: -0.5em;\n}\nsub {\n  bottom: -0.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  -moz-box-s
 izing: content-box;\n  box-sizing: content-box;\n  height: 0;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit;\n  font: inherit;\n  margin: 0;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: textfield;\n  -moz-box-sizing
 : content-box;\n  -webkit-box-sizing: content-box;\n  box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n  border: 0;\n  padding: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    background: transparent !important;\n    color: #000 !important;\n    box-shadow: none !important;\n    text-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n 
  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  select {\n    background: #fff !important;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: 'Glyphicons Halflings';\n  src: url('../fonts/glyphicons-halflings-regular.eot')
 ;\n  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: 'Glyphicons Halflings';\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\2a\";\n}\n.glyphicon-plus:before {\n  content: \"\\2b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.gly
 phicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before 
 {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volum
 e-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}
 \n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  conte
 nt: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\
 n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111
 \";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\
 e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e1
 45\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n 
  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.gl
 yphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}
 \n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n* {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333333;\n  background-color: #ffffff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertica
 l-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #ffffff;\n  border: 1px solid #dddddd;\n  border-radius: 4px;\n  -webkit-transition: all 0.2s ease-in-out;\n  -o-transition: all 0.2s ease-in-out;\n  transition: all 0.2s ease-in-out;\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eeeeee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;
 \n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: normal;\n  line-height: 1;\n  color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .sm
 all,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\nmark,\n.mark {\n  background-color: #fcf8e3;\n  padding: .2em;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777777;\n}\n.text-primary 
 {\n  color: #337ab7;\n}\na.text-primary:hover {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  borde
 r-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  list-style: none;\n  margin-left: -5px;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-left: 5px;\n  padding-right: 5px;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.42857143;\n}\ndt {\n  font-weight: bold;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    clear: left;\n    text-align: right;\n    overflow: hidden;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n  border-bottom: 1px dotted #777777;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  paddi
 ng: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.42857143;\n  color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  border-right: 5px solid #eeeeee;\n  border-left: 0;\n  text-align: right;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-r
 ight small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: '\\00A0 \\2014';\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #ffffff;\n  background-color: #333333;\n  border-radius: 3px;\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n  padding: 0;\n  font-size: 100%;\n  font-weight: bold;\n  box-shadow: none;\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.42857143;\n  word-break: break-all;\n  word-wrap: break-word;\n  color: #333333;\n  background-color: #f5f5f5;\n  border: 1px solid #cccccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\
 n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  margin-right: auto;\n  margin-left: auto;\n  padding-left: 15px;\n  padding-right: 15px;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  margin-right: auto;\n  margin-left: auto;\n  padding-left: 15px;\n  padding-right: 15px;\n}\n.row {\n  margin-left: -15px;\n  margin-right: -15px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-l
 g-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-left: 15px;\n  padding-right: 15px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666667%;\n}\n.col-xs-10 {\n  width: 83.33333333%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666667%;\n}\n.col-xs-7 {\n  width: 58.33333333%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666667%;\n}\n.col-xs-4 {\n  width: 33.33333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.66666667%;\n}\n.col-xs-1 {\n  width: 8.33333333%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  rig
 ht: 91.66666667%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666667%;\n}\n.col-xs-pull-7 {\n  right: 58.33333333%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.66666667%;\n}\n.col-xs-pull-1 {\n  right: 8.33333333%;\n}\n.col-xs-pull-0 {\n  right: auto;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666667%;\n}\n.col-xs-push-10 {\n  left: 83.33333333%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666667%;\n}\n.col-xs-push-7 {\n  left: 58.33333333%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.66666667%;\n}\n.col-xs-push-1 {\n  left: 8.33333333%;\n}\n.col-xs-push-0 {\n  
 left: auto;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n  margin-left: 0%;\n}\n@media (min-width: 768px) {\n  .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666667%;\n  }\n  .col-sm-10 {\n    width: 83.33333333%;\n  }\n  .col-sm-9
  {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666667%;\n  }\n  .col-sm-7 {\n    width: 58.33333333%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.66666667%;\n  }\n  .col-sm-1 {\n    width: 8.33333333%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-sm-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-sm-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-sm-pull-0 {\n    right: auto;\n  }\n
   .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666667%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666667%;\n  }\n  .col-sm-push-7 {\n    left: 58.33333333%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.66666667%;\n  }\n  .col-sm-push-1 {\n    left: 8.33333333%;\n  }\n  .col-sm-push-0 {\n    left: auto;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }
 \n  .col-sm-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0%;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666667%;\n  }\n  .col-md-10 {\n    width: 83.33333333%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666667%;\n  }\n  .col-md-7 {\n    width: 58.33333333%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.66666667%;
 \n  }\n  .col-md-1 {\n    width: 8.33333333%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-md-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-md-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-md-pull-0 {\n    right: auto;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666667%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666667%;\n  }\n  .col-md-push-7 {\n    left: 58.33333333%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n 
  }\n  .col-md-push-5 {\n    left: 41.66666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.66666667%;\n  }\n  .col-md-push-1 {\n    left: 8.33333333%;\n  }\n  .col-md-push-0 {\n    left: auto;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-md-offset-0 {\n    margin-lef
 t: 0%;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666667%;\n  }\n  .col-lg-10 {\n    width: 83.33333333%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666667%;\n  }\n  .col-lg-7 {\n    width: 58.33333333%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.66666667%;\n  }\n  .col-lg-1 {\n    width: 8.33333333%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-lg-pull-7 {\n    right
 : 58.33333333%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-lg-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-lg-pull-0 {\n    right: auto;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666667%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666667%;\n  }\n  .col-lg-push-7 {\n    left: 58.33333333%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.66666667%;\n  }\n  .col-lg-push-1 {\n    left: 8.33333333%;\n  }\n  .col-lg-push-0 {\n    left: auto;\n  }\n  .col-lg-offset-12 {\n    margin-le
 ft: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0%;\n  }\n}\ntable {\n  background-color: transparent;\n}\ncaption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777777;\n  text-align: left;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.tab
 le > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.42857143;\n  vertical-align: top;\n  border-top: 1px solid #dddddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #dddddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #dddddd;\n}\n.table .table {\n  background-color: #ffffff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-border
 ed {\n  border: 1px solid #dddddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #dddddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 

<TRUNCATED>

[12/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
STORM-1617: Release Specific Documentation 0.9.x

Conflicts:
	.gitignore
	docs/README.md
	docs/_config.yml
	docs/_includes/footer.html
	docs/_includes/head.html
	docs/_includes/header.html
	docs/_layouts/about.html
	docs/_layouts/default.html
	docs/_layouts/documentation.html
	docs/_layouts/page.html
	docs/_layouts/post.html
	docs/assets/css/bootstrap.css
	docs/assets/css/bootstrap.css.map
	docs/assets/js/bootstrap.min.js
	docs/images/logos/alibaba.jpg
	docs/images/logos/groupon.jpg
	docs/images/logos/parc.png
	docs/images/logos/webmd.jpg
	docs/images/topology.png


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/cf0cdbb0
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/cf0cdbb0
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/cf0cdbb0

Branch: refs/heads/0.10.x-branch
Commit: cf0cdbb03e77249f538cf672065eb4369352780a
Parents: 3a61f87
Author: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Authored: Sat Mar 19 12:15:21 2016 -0500
Committer: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Committed: Sat Mar 19 12:30:14 2016 -0500

----------------------------------------------------------------------
 .gitignore                                      |    1 +
 STORM-UI-REST-API.md                            |  672 ------------
 docs/Acking-framework-implementation.md         |   36 +
 docs/Clojure-DSL.md                             |  264 +++++
 docs/Command-line-client.md                     |  100 ++
 docs/Common-patterns.md                         |   86 ++
 docs/Concepts.md                                |  115 ++
 docs/Configuration.md                           |   29 +
 docs/Contributing-to-Storm.md                   |   31 +
 docs/Creating-a-new-Storm-project.md            |   25 +
 docs/DSLs-and-multilang-adapters.md             |    9 +
 ...Defining-a-non-jvm-language-dsl-for-storm.md |   36 +
 docs/Distributed-RPC.md                         |  197 ++++
 docs/Documentation.md                           |   50 +
 docs/FAQ.md                                     |  121 +++
 docs/Fault-tolerance.md                         |   28 +
 docs/Guaranteeing-message-processing.md         |  179 +++
 docs/Hooks.md                                   |    7 +
 docs/Implementation-docs.md                     |   18 +
 docs/Installing-native-dependencies.md          |   38 +
 docs/Kestrel-and-Storm.md                       |  198 ++++
 docs/Lifecycle-of-a-topology.md                 |   80 ++
 docs/Local-mode.md                              |   27 +
 docs/Maven.md                                   |   56 +
 docs/Message-passing-implementation.md          |   28 +
 docs/Metrics.md                                 |   34 +
 docs/Multilang-protocol.md                      |  221 ++++
 docs/Powered-By.md                              | 1028 ++++++++++++++++++
 docs/Project-ideas.md                           |    6 +
 docs/README.md                                  |   32 +-
 docs/Rationale.md                               |   31 +
 ...unning-topologies-on-a-production-cluster.md |   75 ++
 docs/SECURITY.md                                |   79 ++
 docs/STORM-UI-REST-API.md                       |  678 ++++++++++++
 docs/Serialization-(prior-to-0.6.0).md          |   50 +
 docs/Serialization.md                           |   60 +
 docs/Serializers.md                             |    4 +
 docs/Setting-up-a-Storm-cluster.md              |   83 ++
 docs/Setting-up-a-Storm-project-in-Eclipse.md   |    1 +
 docs/Setting-up-development-environment.md      |   39 +
 docs/Spout-implementations.md                   |    8 +
 ...guage-protocol-(versions-0.7.0-and-below).md |  122 +++
 docs/Structure-of-the-codebase.md               |  140 +++
 docs/Support-for-non-java-languages.md          |    7 +
 docs/Transactional-topologies.md                |  359 ++++++
 docs/Trident-API-Overview.md                    |  311 ++++++
 docs/Trident-spouts.md                          |   42 +
 docs/Trident-state.md                           |  330 ++++++
 docs/Trident-tutorial.md                        |  253 +++++
 docs/Troubleshooting.md                         |  144 +++
 docs/Tutorial.md                                |  310 ++++++
 ...nding-the-parallelism-of-a-Storm-topology.md |  121 +++
 docs/Using-non-JVM-languages-with-Storm.md      |   52 +
 docs/_config.yml                                |    9 +-
 docs/_includes/footer.html                      |   71 +-
 docs/_includes/head.html                        |   21 +-
 docs/_includes/header.html                      |   83 +-
 docs/_layouts/about.html                        |   31 +-
 docs/_layouts/default.html                      |   23 +-
 docs/_layouts/documentation.html                |   11 +-
 docs/_layouts/page.html                         |    9 -
 docs/_layouts/post.html                         |   70 +-
 docs/_plugins/releases.rb                       |   84 ++
 docs/assets/css/bootstrap.css                   |  688 ++++++++++--
 docs/assets/css/bootstrap.css.map               |    2 +-
 docs/assets/css/font-awesome.min.css            |    4 +
 docs/assets/css/main.scss                       |   48 +
 docs/assets/css/owl.carousel.css                |   71 ++
 docs/assets/css/owl.theme.css                   |   79 ++
 docs/assets/css/style.css                       |  503 +++++++++
 docs/assets/js/bootstrap.min.js                 |   10 +-
 docs/assets/js/jquery.min.js                    |    6 +
 docs/assets/js/owl.carousel.min.js              |   47 +
 docs/assets/js/storm.js                         |   67 ++
 docs/css/style.css                              |  553 ++++++++++
 docs/favicon.ico                                |  Bin 0 -> 1150 bytes
 docs/images/ack_tree.png                        |  Bin 0 -> 31463 bytes
 docs/images/batched-stream.png                  |  Bin 0 -> 66336 bytes
 docs/images/drpc-workflow.png                   |  Bin 0 -> 66199 bytes
 docs/images/eclipse-project-properties.png      |  Bin 0 -> 80810 bytes
 docs/images/example-of-a-running-topology.png   |  Bin 0 -> 81430 bytes
 docs/images/footer-bg.png                       |  Bin 0 -> 138 bytes
 docs/images/grouping.png                        |  Bin 0 -> 39701 bytes
 docs/images/header-bg.png                       |  Bin 0 -> 470 bytes
 docs/images/ld-library-path-eclipse-linux.png   |  Bin 0 -> 114597 bytes
 docs/images/loading.gif                         |  Bin 0 -> 12150 bytes
 docs/images/logo.png                            |  Bin 0 -> 26889 bytes
 docs/images/logos/aeris.jpg                     |  Bin 0 -> 7420 bytes
 docs/images/logos/alibaba.jpg                   |  Bin 43703 -> 10317 bytes
 docs/images/logos/bai.jpg                       |  Bin 0 -> 10026 bytes
 docs/images/logos/cerner.jpg                    |  Bin 0 -> 7244 bytes
 docs/images/logos/flipboard.jpg                 |  Bin 0 -> 8318 bytes
 docs/images/logos/fullcontact.jpg               |  Bin 0 -> 6172 bytes
 docs/images/logos/groupon.jpg                   |  Bin 41413 -> 9849 bytes
 docs/images/logos/health-market-science.jpg     |  Bin 0 -> 6509 bytes
 docs/images/logos/images.png                    |  Bin 0 -> 7339 bytes
 docs/images/logos/infochimp.jpg                 |  Bin 0 -> 5290 bytes
 docs/images/logos/klout.jpg                     |  Bin 0 -> 7251 bytes
 docs/images/logos/loggly.jpg                    |  Bin 0 -> 9258 bytes
 docs/images/logos/ooyala.jpg                    |  Bin 0 -> 5675 bytes
 docs/images/logos/parc.png                      |  Bin 7256 -> 13720 bytes
 docs/images/logos/premise.jpg                   |  Bin 0 -> 5391 bytes
 docs/images/logos/qiy.jpg                       |  Bin 0 -> 7441 bytes
 docs/images/logos/quicklizard.jpg               |  Bin 0 -> 7382 bytes
 docs/images/logos/rocketfuel.jpg                |  Bin 0 -> 10007 bytes
 docs/images/logos/rubicon.jpg                   |  Bin 0 -> 7120 bytes
 docs/images/logos/spider.jpg                    |  Bin 0 -> 6265 bytes
 docs/images/logos/spotify.jpg                   |  Bin 0 -> 6445 bytes
 docs/images/logos/taobao.jpg                    |  Bin 0 -> 16814 bytes
 docs/images/logos/the-weather-channel.jpg       |  Bin 0 -> 13295 bytes
 docs/images/logos/twitter.jpg                   |  Bin 0 -> 7139 bytes
 docs/images/logos/verisign.jpg                  |  Bin 0 -> 5982 bytes
 docs/images/logos/webmd.jpg                     |  Bin 6193 -> 8226 bytes
 docs/images/logos/wego.jpg                      |  Bin 0 -> 6836 bytes
 docs/images/logos/yahoo-japan.jpg               |  Bin 0 -> 10350 bytes
 docs/images/logos/yahoo.png                     |  Bin 0 -> 13067 bytes
 docs/images/logos/yelp.jpg                      |  Bin 0 -> 7220 bytes
 ...onships-worker-processes-executors-tasks.png |  Bin 0 -> 54804 bytes
 docs/images/spout-vs-state.png                  |  Bin 0 -> 24804 bytes
 docs/images/storm-cluster.png                   |  Bin 0 -> 34604 bytes
 docs/images/storm-flow.png                      |  Bin 0 -> 59688 bytes
 docs/images/topology-tasks.png                  |  Bin 0 -> 45960 bytes
 docs/images/transactional-batches.png           |  Bin 0 -> 23293 bytes
 docs/images/transactional-commit-flow.png       |  Bin 0 -> 17725 bytes
 docs/images/transactional-design-2.png          |  Bin 0 -> 13537 bytes
 docs/images/transactional-spout-structure.png   |  Bin 0 -> 25067 bytes
 docs/images/trident-to-storm1.png               |  Bin 0 -> 67173 bytes
 docs/images/trident-to-storm2.png               |  Bin 0 -> 68943 bytes
 docs/images/tuple-dag.png                       |  Bin 0 -> 18849 bytes
 docs/images/tuple_tree.png                      |  Bin 0 -> 58186 bytes
 docs/index.md                                   |   69 ++
 131 files changed, 8703 insertions(+), 907 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 02816a1..f7a14a8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,3 +35,4 @@ _site
 dependency-reduced-pom.xml
 derby.log
 metastore_db
+/docs/javadocs

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/STORM-UI-REST-API.md
----------------------------------------------------------------------
diff --git a/STORM-UI-REST-API.md b/STORM-UI-REST-API.md
deleted file mode 100644
index 2836105..0000000
--- a/STORM-UI-REST-API.md
+++ /dev/null
@@ -1,672 +0,0 @@
-# Storm UI REST API
-
-The Storm UI daemon provides a REST API that allows you to interact with a Storm cluster, which includes retrieving
-metrics data and configuration information as well as management operations such as starting or stopping topologies.
-
-
-# Data format
-
-The REST API returns JSON responses and supports JSONP.
-Clients can pass a callback query parameter to wrap JSON in the callback function.
-
-
-# Using the UI REST API
-
-_Note: It is recommended to ignore undocumented elements in the JSON response because future versions of Storm may not_
-_support those elements anymore._
-
-
-## REST API Base URL
-
-The REST API is part of the UI daemon of Storm (started by `storm ui`) and thus runs on the same host and port as the
-Storm UI (the UI daemon is often run on the same host as the Nimbus daemon).  The port is configured by `ui.port`,
-which is set to `8080` by default (see [defaults.yaml](conf/defaults.yaml)).
-
-The API base URL would thus be:
-
-    http://<ui-host>:<ui-port>/api/v1/...
-
-You can use a tool such as `curl` to talk to the REST API:
-
-    # Request the cluster configuration.
-    # Note: We assume ui.port is configured to the default value of 8080.
-    $ curl http://<ui-host>:8080/api/v1/cluster/configuration
-
-##Impersonating a user in secure environment
-In a secure environment an authenticated user can impersonate another user. To impersonate a user the caller must pass
-`doAsUser` param or header with value set to the user that the request needs to be performed as. Please see SECURITY.MD
-to learn more about how to setup impersonation ACLs and authorization. The rest API uses the same configs and acls that
-are used by nimbus.
-
-Examples:
-
-```no-highlight
- 1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1425844354\?doAsUser=testUSer1
- 2. curl 'http://localhost:8080/api/v1/topology/wordcount-1-1425844354/activate' -X POST -H 'doAsUser:testUSer1'
-```
-
-## GET Operations
-
-### /api/v1/cluster/configuration (GET)
-
-Returns the cluster configuration.
-
-Sample response (does not include all the data fields):
-
-```json
-  {
-    "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
-    "topology.tick.tuple.freq.secs": null,
-    "topology.builtin.metrics.bucket.size.secs": 60,
-    "topology.fall.back.on.java.serialization": true,
-    "topology.max.error.report.per.interval": 5,
-    "zmq.linger.millis": 5000,
-    "topology.skip.missing.kryo.registrations": false,
-    "storm.messaging.netty.client_worker_threads": 1,
-    "ui.childopts": "-Xmx768m",
-    "storm.zookeeper.session.timeout": 20000,
-    "nimbus.reassign": true,
-    "topology.trident.batch.emit.interval.millis": 500,
-    "storm.messaging.netty.flush.check.interval.ms": 10,
-    "nimbus.monitor.freq.secs": 10,
-    "logviewer.childopts": "-Xmx128m",
-    "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
-    "topology.executor.send.buffer.size": 1024,
-    }
-```
-
-### /api/v1/cluster/summary (GET)
-
-Returns cluster summary information such as nimbus uptime or number of supervisors.
-
-Response fields:
-
-|Field  |Value|Description
-|---	|---	|---
-|stormVersion|String| Storm version|
-|nimbusUptime|String| Shows how long the cluster is running|
-|supervisors|Integer| Number of supervisors running|
-|topologies| Integer| Number of topologies running| 
-|slotsTotal| Integer|Total number of available worker slots|
-|slotsUsed| Integer| Number of worker slots used|
-|slotsFree| Integer |Number of worker slots available|
-|executorsTotal| Integer |Total number of executors|
-|tasksTotal| Integer |Total tasks|
-
-Sample response:
-
-```json
-   {
-    "stormVersion": "0.9.2-incubating-SNAPSHOT",
-    "nimbusUptime": "3m 53s",
-    "supervisors": 1,
-    "slotsTotal": 4,
-    "slotsUsed": 3,
-    "slotsFree": 1,
-    "executorsTotal": 28,
-    "tasksTotal": 28
-    }
-```
-
-### /api/v1/supervisor/summary (GET)
-
-Returns summary information for all supervisors.
-
-Response fields:
-
-|Field  |Value|Description|
-|---	|---	|---
-|id| String | Supervisor's id|
-|host| String| Supervisor's host name|
-|uptime| String| Shows how long the supervisor is running|
-|slotsTotal| Integer| Total number of available worker slots for this supervisor|
-|slotsUsed| Integer| Number of worker slots used on this supervisor|
-
-Sample response:
-
-```json
-{
-    "supervisors": [
-        {
-            "id": "0b879808-2a26-442b-8f7d-23101e0c3696",
-            "host": "10.11.1.7",
-            "uptime": "5m 58s",
-            "slotsTotal": 4,
-            "slotsUsed": 3
-        }
-    ]
-}
-```
-
-### /api/v1/topology/summary (GET)
-
-Returns summary information for all topologies.
-
-Response fields:
-
-|Field  |Value | Description|
-|---	|---	|---
-|id| String| Topology Id|
-|name| String| Topology Name|
-|status| String| Topology Status|
-|uptime| String|  Shows how long the topology is running|
-|tasksTotal| Integer |Total number of tasks for this topology|
-|workersTotal| Integer |Number of workers used for this topology|
-|executorsTotal| Integer |Number of executors used for this topology|
-
-Sample response:
-
-```json
-{
-    "topologies": [
-        {
-            "id": "WordCount3-1-1402960825",
-            "name": "WordCount3",
-            "status": "ACTIVE",
-            "uptime": "6m 5s",
-            "tasksTotal": 28,
-            "workersTotal": 3,
-            "executorsTotal": 28
-        }
-    ]
-}
-```
-
-### /api/v1/topology/:id (GET)
-
-Returns topology information and statistics.  Substitute id with topology id.
-
-Request parameters:
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|window    |String. Default value :all-time| Window duration for metrics in seconds|
-|sys       |String. Values 1 or 0. Default value 0| Controls including sys stats part of the response|
-
-
-Response fields:
-
-|Field  |Value |Description|
-|---	|---	|---
-|id| String| Topology Id|
-|name| String |Topology Name|
-|uptime| String |How long the topology has been running|
-|status| String |Current status of the topology, e.g. "ACTIVE"|
-|tasksTotal| Integer |Total number of tasks for this topology|
-|workersTotal| Integer |Number of workers used for this topology|
-|executorsTotal| Integer |Number of executors used for this topology|
-|msgTimeout| Integer | Number of seconds a tuple has before the spout considers it failed |
-|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"|
-|topologyStats| Array | Array of all the topology related stats per time window|
-|topologyStats.windowPretty| String |Duration passed in HH:MM:SS format|
-|topologyStats.window| String |User requested time window for metrics|
-|topologyStats.emitted| Long |Number of messages emitted in given window|
-|topologyStats.trasferred| Long |Number messages transferred in given window|
-|topologyStats.completeLatency| String (double value returned in String format) |Total latency for processing the message|
-|topologyStats.acked| Long |Number of messages acked in given window|
-|topologyStats.failed| Long |Number of messages failed in given window|
-|spouts| Array | Array of all the spout components in the topology|
-|spouts.spoutId| String |Spout id|
-|spouts.executors| Integer |Number of executors for the spout|
-|spouts.emitted| Long |Number of messages emitted in given window |
-|spouts.completeLatency| String (double value returned in String format) |Total latency for processing the message|
-|spouts.transferred| Long |Total number of messages  transferred in given window|
-|spouts.tasks| Integer |Total number of tasks for the spout|
-|spouts.lastError| String |Shows the last error happened in a spout|
-|spouts.errorLapsedSecs| Integer | Number of seconds elapsed since that last error happened in a spout|
-|spouts.errorWorkerLogLink| String | Link to the worker log that reported the exception |
-|spouts.acked| Long |Number of messages acked|
-|spouts.failed| Long |Number of messages failed|
-|bolts| Array | Array of bolt components in the topology|
-|bolts.boltId| String |Bolt id|
-|bolts.capacity| String (double value returned in String format) |This value indicates number of messages executed * average execute latency / time window|
-|bolts.processLatency| String (double value returned in String format)  |Average time of the bolt to ack a message after it was received|
-|bolts.executeLatency| String (double value returned in String format) |Average time to run the execute method of the bolt|
-|bolts.executors| Integer |Number of executor tasks in the bolt component|
-|bolts.tasks| Integer |Number of instances of bolt|
-|bolts.acked| Long |Number of tuples acked by the bolt|
-|bolts.failed| Long |Number of tuples failed by the bolt|
-|bolts.lastError| String |Shows the last error occurred in the bolt|
-|bolts.errorLapsedSecs| Integer |Number of seconds elapsed since that last error happened in a bolt|
-|bolts.errorWorkerLogLink| String | Link to the worker log that reported the exception |
-|bolts.emitted| Long |Number of tuples emitted|
-
-Examples:
-
-```no-highlight
- 1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825
- 2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?sys=1
- 3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?window=600
-```
-
-Sample response:
-
-```json
- {
-    "name": "WordCount3",
-    "id": "WordCount3-1-1402960825",
-    "workersTotal": 3,
-    "window": "600",
-    "status": "ACTIVE",
-    "tasksTotal": 28,
-    "executorsTotal": 28,
-    "uptime": "29m 19s",
-    "msgTimeout": 30,
-    "windowHint": "10m 0s",
-    "topologyStats": [
-        {
-            "windowPretty": "10m 0s",
-            "window": "600",
-            "emitted": 397960,
-            "transferred": 213380,
-            "completeLatency": "0.000",
-            "acked": 213460,
-            "failed": 0
-        },
-        {
-            "windowPretty": "3h 0m 0s",
-            "window": "10800",
-            "emitted": 1190260,
-            "transferred": 638260,
-            "completeLatency": "0.000",
-            "acked": 638280,
-            "failed": 0
-        },
-        {
-            "windowPretty": "1d 0h 0m 0s",
-            "window": "86400",
-            "emitted": 1190260,
-            "transferred": 638260,
-            "completeLatency": "0.000",
-            "acked": 638280,
-            "failed": 0
-        },
-        {
-            "windowPretty": "All time",
-            "window": ":all-time",
-            "emitted": 1190260,
-            "transferred": 638260,
-            "completeLatency": "0.000",
-            "acked": 638280,
-            "failed": 0
-        }
-    ],
-    "spouts": [
-        {
-            "executors": 5,
-            "emitted": 28880,
-            "completeLatency": "0.000",
-            "transferred": 28880,
-            "acked": 0,
-            "spoutId": "spout",
-            "tasks": 5,
-            "lastError": "",
-            "errorLapsedSecs": null,
-            "failed": 0
-        }
-    ],
-        "bolts": [
-        {
-            "executors": 12,
-            "emitted": 184580,
-            "transferred": 0,
-            "acked": 184640,
-            "executeLatency": "0.048",
-            "tasks": 12,
-            "executed": 184620,
-            "processLatency": "0.043",
-            "boltId": "count",
-            "lastError": "",
-            "errorLapsedSecs": null,
-            "capacity": "0.003",
-            "failed": 0
-        },
-        {
-            "executors": 8,
-            "emitted": 184500,
-            "transferred": 184500,
-            "acked": 28820,
-            "executeLatency": "0.024",
-            "tasks": 8,
-            "executed": 28780,
-            "processLatency": "2.112",
-            "boltId": "split",
-            "lastError": "",
-            "errorLapsedSecs": null,
-            "capacity": "0.000",
-            "failed": 0
-        }
-    ],
-    "configuration": {
-        "storm.id": "WordCount3-1-1402960825",
-        "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
-        "topology.tick.tuple.freq.secs": null,
-        "topology.builtin.metrics.bucket.size.secs": 60,
-        "topology.fall.back.on.java.serialization": true,
-        "topology.max.error.report.per.interval": 5,
-        "zmq.linger.millis": 5000,
-        "topology.skip.missing.kryo.registrations": false,
-        "storm.messaging.netty.client_worker_threads": 1,
-        "ui.childopts": "-Xmx768m",
-        "storm.zookeeper.session.timeout": 20000,
-        "nimbus.reassign": true,
-        "topology.trident.batch.emit.interval.millis": 500,
-        "storm.messaging.netty.flush.check.interval.ms": 10,
-        "nimbus.monitor.freq.secs": 10,
-        "logviewer.childopts": "-Xmx128m",
-        "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
-        "topology.executor.send.buffer.size": 1024,
-        "storm.local.dir": "storm-local",
-        "storm.messaging.netty.buffer_size": 5242880,
-        "supervisor.worker.start.timeout.secs": 120,
-        "topology.enable.message.timeouts": true,
-        "nimbus.cleanup.inbox.freq.secs": 600,
-        "nimbus.inbox.jar.expiration.secs": 3600,
-        "drpc.worker.threads": 64,
-        "topology.worker.shared.thread.pool.size": 4,
-        "nimbus.host": "hw10843.local",
-        "storm.messaging.netty.min_wait_ms": 100,
-        "storm.zookeeper.port": 2181,
-        "transactional.zookeeper.port": null,
-        "topology.executor.receive.buffer.size": 1024,
-        "transactional.zookeeper.servers": null,
-        "storm.zookeeper.root": "/storm",
-        "storm.zookeeper.retry.intervalceiling.millis": 30000,
-        "supervisor.enable": true,
-        "storm.messaging.netty.server_worker_threads": 1
-    }
-}
-```
-
-
-### /api/v1/topology/:id/component/:component (GET)
-
-Returns detailed metrics and executor information
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|component |String (required)| Component Id |
-|window    |String. Default value :all-time| window duration for metrics in seconds|
-|sys       |String. Values 1 or 0. Default value 0| controls including sys stats part of the response|
-
-Response fields:
-
-|Field  |Value |Description|
-|---	|---	|---
-|id   | String | Component id|
-|name | String | Topology name|
-|componentType | String | component type: SPOUT or BOLT|
-|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"|
-|executors| Integer |Number of executor tasks in the component|
-|componentErrors| Array of Errors | List of component errors|
-|componentErrors.time| Long | Timestamp when the exception occurred |
-|componentErrors.errorHost| String | host name for the error|
-|componentErrors.errorPort| String | port for the error|
-|componentErrors.error| String |Shows the error happened in a component|
-|componentErrors.errorLapsedSecs| Integer | Number of seconds elapsed since the error happened in a component |
-|componentErrors.errorWorkerLogLink| String | Link to the worker log that reported the exception |
-|topologyId| String | Topology id|
-|tasks| Integer |Number of instances of component|
-|window    |String. Default value "All Time" | window duration for metrics in seconds|
-|spoutSummary or boltStats| Array |Array of component stats. **Please note this element tag can be spoutSummary or boltStats depending on the componentType**|
-|spoutSummary.windowPretty| String |Duration passed in HH:MM:SS format|
-|spoutSummary.window| String | window duration for metrics in seconds|
-|spoutSummary.emitted| Long |Number of messages emitted in given window |
-|spoutSummary.completeLatency| String (double value returned in String format) |Total latency for processing the message|
-|spoutSummary.transferred| Long |Total number of messages  transferred in given window|
-|spoutSummary.acked| Long |Number of messages acked|
-|spoutSummary.failed| Long |Number of messages failed|
-|boltStats.windowPretty| String |Duration passed in HH:MM:SS format|
-|boltStats..window| String | window duration for metrics in seconds|
-|boltStats.transferred| Long |Total number of messages  transferred in given window|
-|boltStats.processLatency| String (double value returned in String format)  |Average time of the bolt to ack a message after it was received|
-|boltStats.acked| Long |Number of messages acked|
-|boltStats.failed| Long |Number of messages failed|
-
-Examples:
-
-```no-highlight
-1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout
-2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?sys=1
-3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?window=600
-```
-
-Sample response:
-
-```json
-{
-    "name": "WordCount3",
-    "id": "spout",
-    "componentType": "spout",
-    "windowHint": "10m 0s",
-    "executors": 5,
-    "componentErrors":[{"time": 1406006074000,
-                        "errorHost": "10.11.1.70",
-                        "errorPort": 6701,
-                        "errorWorkerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
-                        "errorLapsedSecs": 16,
-                        "error": "java.lang.RuntimeException: java.lang.StringIndexOutOfBoundsException: Some Error\n\tat backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:128)\n\tat backtype.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:99)\n\tat backtype.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:80)\n\tat backtype...more.."
-    }],
-    "topologyId": "WordCount3-1-1402960825",
-    "tasks": 5,
-    "window": "600",
-    "spoutSummary": [
-        {
-            "windowPretty": "10m 0s",
-            "window": "600",
-            "emitted": 28500,
-            "transferred": 28460,
-            "completeLatency": "0.000",
-            "acked": 0,
-            "failed": 0
-        },
-        {
-            "windowPretty": "3h 0m 0s",
-            "window": "10800",
-            "emitted": 127640,
-            "transferred": 127440,
-            "completeLatency": "0.000",
-            "acked": 0,
-            "failed": 0
-        },
-        {
-            "windowPretty": "1d 0h 0m 0s",
-            "window": "86400",
-            "emitted": 127640,
-            "transferred": 127440,
-            "completeLatency": "0.000",
-            "acked": 0,
-            "failed": 0
-        },
-        {
-            "windowPretty": "All time",
-            "window": ":all-time",
-            "emitted": 127640,
-            "transferred": 127440,
-            "completeLatency": "0.000",
-            "acked": 0,
-            "failed": 0
-        }
-    ],
-    "outputStats": [
-        {
-            "stream": "__metrics",
-            "emitted": 40,
-            "transferred": 0,
-            "completeLatency": "0",
-            "acked": 0,
-            "failed": 0
-        },
-        {
-            "stream": "default",
-            "emitted": 28460,
-            "transferred": 28460,
-            "completeLatency": "0",
-            "acked": 0,
-            "failed": 0
-        }
-    ],
-    "executorStats": [
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
-            "emitted": 5720,
-            "port": 6701,
-            "completeLatency": "0.000",
-            "transferred": 5720,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "43m 4s",
-            "id": "[24-24]",
-            "failed": 0
-        },
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log",
-            "emitted": 5700,
-            "port": 6703,
-            "completeLatency": "0.000",
-            "transferred": 5700,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "42m 57s",
-            "id": "[25-25]",
-            "failed": 0
-        },
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6702.log",
-            "emitted": 5700,
-            "port": 6702,
-            "completeLatency": "0.000",
-            "transferred": 5680,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "42m 57s",
-            "id": "[26-26]",
-            "failed": 0
-        },
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
-            "emitted": 5700,
-            "port": 6701,
-            "completeLatency": "0.000",
-            "transferred": 5680,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "43m 4s",
-            "id": "[27-27]",
-            "failed": 0
-        },
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log",
-            "emitted": 5680,
-            "port": 6703,
-            "completeLatency": "0.000",
-            "transferred": 5680,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "42m 57s",
-            "id": "[28-28]",
-            "failed": 0
-        }
-    ]
-}
-```
-
-## POST Operations
-
-### /api/v1/topology/:id/activate (POST)
-
-Activates a topology.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-
-Sample Response:
-
-```json
-{"topologyOperation":"activate","topologyId":"wordcount-1-1420308665","status":"success"}
-```
-
-
-### /api/v1/topology/:id/deactivate (POST)
-
-Deactivates a topology.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-
-Sample Response:
-
-```json
-{"topologyOperation":"deactivate","topologyId":"wordcount-1-1420308665","status":"success"}
-```
-
-
-### /api/v1/topology/:id/rebalance/:wait-time (POST)
-
-Rebalances a topology.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|wait-time |String (required)| Wait time before rebalance happens |
-|rebalanceOptions| Json (optional) | topology rebalance options |
-
-
-Sample rebalanceOptions json:
-
-```json
-{"rebalanceOptions" : {"numWorkers" : 2, "executors" : {"spout" :4, "count" : 10}}, "callback" : "foo"}
-```
-
-Examples:
-
-```no-highlight
-curl  -i -b ~/cookiejar.txt -c ~/cookiejar.txt -X POST  
--H "Content-Type: application/json" 
--d  '{"rebalanceOptions": {"numWorkers": 2, "executors": { "spout" : "5", "split": 7, "count": 5 }}, "callback":"foo"}' 
-http://localhost:8080/api/v1/topology/wordcount-1-1420308665/rebalance/0
-```
-
-Sample Response:
-
-```json
-{"topologyOperation":"rebalance","topologyId":"wordcount-1-1420308665","status":"success"}
-```
-
-
-
-### /api/v1/topology/:id/kill/:wait-time (POST)
-
-Kills a topology.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|wait-time |String (required)| Wait time before rebalance happens |
-
-Caution: Small wait times (0-5 seconds) may increase the probability of triggering the bug reported in
-[STORM-112](https://issues.apache.org/jira/browse/STORM-112), which may result in broker Supervisor
-daemons.
-
-Sample Response:
-
-```json
-{"topologyOperation":"kill","topologyId":"wordcount-1-1420308665","status":"success"}
-```
-
-## API errors
-
-The API returns 500 HTTP status codes in case of any errors.
-
-Sample response:
-
-```json
-{
-  "error": "Internal Server Error",
-  "errorMessage": "java.lang.NullPointerException\n\tat clojure.core$name.invoke(core.clj:1505)\n\tat backtype.storm.ui.core$component_page.invoke(core.clj:752)\n\tat backtype.storm.ui.core$fn__7766.invoke(core.clj:782)\n\tat compojure.core$make_route$fn__5755.invoke(core.clj:93)\n\tat compojure.core$if_route$fn__5743.invoke(core.clj:39)\n\tat compojure.core$if_method$fn__5736.invoke(core.clj:24)\n\tat compojure.core$routing$fn__5761.invoke(core.clj:106)\n\tat clojure.core$some.invoke(core.clj:2443)\n\tat compojure.core$routing.doInvoke(core.clj:106)\n\tat clojure.lang.RestFn.applyTo(RestFn.java:139)\n\tat clojure.core$apply.invoke(core.clj:619)\n\tat compojure.core$routes$fn__5765.invoke(core.clj:111)\n\tat ring.middleware.reload$wrap_reload$fn__6880.invoke(reload.clj:14)\n\tat backtype.storm.ui.core$catch_errors$fn__7800.invoke(core.clj:836)\n\tat ring.middleware.keyword_params$wrap_keyword_params$fn__6319.invoke(keyword_params.clj:27)\n\tat ring.middleware.nested_params$wrap_nest
 ed_params$fn__6358.invoke(nested_params.clj:65)\n\tat ring.middleware.params$wrap_params$fn__6291.invoke(params.clj:55)\n\tat ring.middleware.multipart_params$wrap_multipart_params$fn__6386.invoke(multipart_params.clj:103)\n\tat ring.middleware.flash$wrap_flash$fn__6675.invoke(flash.clj:14)\n\tat ring.middleware.session$wrap_session$fn__6664.invoke(session.clj:43)\n\tat ring.middleware.cookies$wrap_cookies$fn__6595.invoke(cookies.clj:160)\n\tat ring.adapter.jetty$proxy_handler$fn__6112.invoke(jetty.clj:16)\n\tat ring.adapter.jetty.proxy$org.mortbay.jetty.handler.AbstractHandler$0.handle(Unknown Source)\n\tat org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)\n\tat org.mortbay.jetty.Server.handle(Server.java:326)\n\tat org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)\n\tat org.mortbay.jetty.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:928)\n\tat org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:549)\n\tat org.mortb
 ay.jetty.HttpParser.parseAvailable(HttpParser.java:212)\n\tat org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)\n\tat org.mortbay.jetty.bio.SocketConnector$Connection.run(SocketConnector.java:228)\n\tat org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)\n"
-}
-```

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Acking-framework-implementation.md
----------------------------------------------------------------------
diff --git a/docs/Acking-framework-implementation.md b/docs/Acking-framework-implementation.md
new file mode 100644
index 0000000..5ca5d93
--- /dev/null
+++ b/docs/Acking-framework-implementation.md
@@ -0,0 +1,36 @@
+---
+layout: documentation
+---
+[Storm's acker](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/acker.clj#L28) tracks completion of each tupletree with a checksum hash: each time a tuple is sent, its value is XORed into the checksum, and each time a tuple is acked its value is XORed in again. If all tuples have been successfully acked, the checksum will be zero (the odds that the checksum will be zero otherwise are vanishingly small).
+
+You can read a bit more about the [reliability mechanism](Guaranteeing-message-processing.html#what-is-storms-reliability-api) elsewhere on the wiki -- this explains the internal details.
+
+### acker `execute()`
+
+The acker is actually a regular bolt, with its  [execute method](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/acker.clj#L36) defined withing `mk-acker-bolt`.  When a new tupletree is born, the spout sends the XORed edge-ids of each tuple recipient, which the acker records in its `pending` ledger. Every time an executor acks a tuple, the acker receives a partial checksum that is the XOR of the tuple's own edge-id (clearing it from the ledger) and the edge-id of each downstream tuple the executor emitted (thus entering them into the ledger).
+
+This is accomplished as follows.
+
+On a tick tuple, just advance pending tupletree checksums towards death and return. Otherwise, update or create the record for this tupletree:
+
+* on init: initialize with the given checksum value, and record the spout's id for later.
+* on ack:  xor the partial checksum into the existing checksum value
+* on fail: just mark it as failed
+
+Next, [put the record](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/acker.clj#L50)),  into the RotatingMap (thus resetting is countdown to expiry) and take action:
+
+* if the total checksum is zero, the tupletree is complete: remove it from the pending collection and notify the spout of success
+* if the tupletree has failed, it is also complete:   remove it from the pending collection and notify the spout of failure
+
+Finally, pass on an ack of our own.
+
+### Pending tuples and the `RotatingMap`
+
+The acker stores pending tuples in a [`RotatingMap`](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/utils/RotatingMap.java#L19), a simple device used in several places within Storm to efficiently time-expire a process.
+
+The RotatingMap behaves as a HashMap, and offers the same O(1) access guarantees.
+
+Internally, it holds several HashMaps ('buckets') of its own, each holding a cohort of records that will expire at the same time.  Let's call the longest-lived bucket death row, and the most recent the nursery. Whenever a value is `.put()` to the RotatingMap, it is relocated to the nursery -- and removed from any other bucket it might have been in (effectively resetting its death clock).
+
+Whenever its owner calls `.rotate()`, the RotatingMap advances each cohort one step further towards expiration. (Typically, Storm objects call rotate on every receipt of a system tick stream tuple.) If there are any key-value pairs in the former death row bucket, the RotatingMap invokes a callback (given in the constructor) for each key-value pair, letting its owner take appropriate action (eg, failing a tuple.
+

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Clojure-DSL.md
----------------------------------------------------------------------
diff --git a/docs/Clojure-DSL.md b/docs/Clojure-DSL.md
new file mode 100644
index 0000000..b3109fa
--- /dev/null
+++ b/docs/Clojure-DSL.md
@@ -0,0 +1,264 @@
+---
+layout: documentation
+---
+Storm comes with a Clojure DSL for defining spouts, bolts, and topologies. The Clojure DSL has access to everything the Java API exposes, so if you're a Clojure user you can code Storm topologies without touching Java at all. The Clojure DSL is defined in the source in the [backtype.storm.clojure](https://github.com/apache/incubator-storm/blob/0.5.3/src/clj/backtype/storm/clojure.clj) namespace.
+
+This page outlines all the pieces of the Clojure DSL, including:
+
+1. Defining topologies
+2. `defbolt`
+3. `defspout`
+4. Running topologies in local mode or on a cluster
+5. Testing topologies
+
+### Defining topologies
+
+To define a topology, use the `topology` function. `topology` takes in two arguments: a map of "spout specs" and a map of "bolt specs". Each spout and bolt spec wires the code for the component into the topology by specifying things like inputs and parallelism.
+
+Let's take a look at an example topology definition [from the storm-starter project](https://github.com/nathanmarz/storm-starter/blob/master/src/clj/storm/starter/clj/word_count.clj):
+
+```clojure
+(topology
+ {"1" (spout-spec sentence-spout)
+  "2" (spout-spec (sentence-spout-parameterized
+                   ["the cat jumped over the door"
+                    "greetings from a faraway land"])
+                   :p 2)}
+ {"3" (bolt-spec {"1" :shuffle "2" :shuffle}
+                 split-sentence
+                 :p 5)
+  "4" (bolt-spec {"3" ["word"]}
+                 word-count
+                 :p 6)})
+```
+
+The maps of spout and bolt specs are maps from the component id to the corresponding spec. The component ids must be unique across the maps. Just like defining topologies in Java, component ids are used when declaring inputs for bolts in the topology.
+
+#### spout-spec
+
+`spout-spec` takes as arguments the spout implementation (an object that implements [IRichSpout](javadocs/backtype/storm/topology/IRichSpout.html)) and optional keyword arguments. The only option that exists currently is the `:p` option, which specifies the parallelism for the spout. If you omit `:p`, the spout will execute as a single task.
+
+#### bolt-spec
+
+`bolt-spec` takes as arguments the input declaration for the bolt, the bolt implementation (an object that implements [IRichBolt](javadocs/backtype/storm/topology/IRichBolt.html)), and optional keyword arguments.
+
+The input declaration is a map from stream ids to stream groupings. A stream id can have one of two forms:
+
+1. `[==component id== ==stream id==]`: Subscribes to a specific stream on a component
+2. `==component id==`: Subscribes to the default stream on a component
+
+A stream grouping can be one of the following:
+
+1. `:shuffle`: subscribes with a shuffle grouping
+2. Vector of field names, like `["id" "name"]`: subscribes with a fields grouping on the specified fields
+3. `:global`: subscribes with a global grouping
+4. `:all`: subscribes with an all grouping
+5. `:direct`: subscribes with a direct grouping
+
+See [Concepts](Concepts.html) for more info on stream groupings. Here's an example input declaration showcasing the various ways to declare inputs:
+
+```clojure
+{["2" "1"] :shuffle
+ "3" ["field1" "field2"]
+ ["4" "2"] :global}
+```
+
+This input declaration subscribes to three streams total. It subscribes to stream "1" on component "2" with a shuffle grouping, subscribes to the default stream on component "3" with a fields grouping on the fields "field1" and "field2", and subscribes to stream "2" on component "4" with a global grouping.
+
+Like `spout-spec`, the only current supported keyword argument for `bolt-spec` is `:p` which specifies the parallelism for the bolt.
+
+#### shell-bolt-spec
+
+`shell-bolt-spec` is used for defining bolts that are implemented in a non-JVM language. It takes as arguments the input declaration, the command line program to run, the name of the file implementing the bolt, an output specification, and then the same keyword arguments that `bolt-spec` accepts.
+
+Here's an example `shell-bolt-spec`:
+
+```clojure
+(shell-bolt-spec {"1" :shuffle "2" ["id"]}
+                 "python"
+                 "mybolt.py"
+                 ["outfield1" "outfield2"]
+                 :p 25)
+```
+
+The syntax of output declarations is described in more detail in the `defbolt` section below. See [Using non JVM languages with Storm](Using-non-JVM-languages-with-Storm.html) for more details on how multilang works within Storm.
+
+### defbolt
+
+`defbolt` is used for defining bolts in Clojure. Bolts have the constraint that they must be serializable, and this is why you can't just reify `IRichBolt` to implement a bolt (closures aren't serializable). `defbolt` works around this restriction and provides a nicer syntax for defining bolts than just implementing a Java interface.
+
+At its fullest expressiveness, `defbolt` supports parameterized bolts and maintaining state in a closure around the bolt implementation. It also provides shortcuts for defining bolts that don't need this extra functionality. The signature for `defbolt` looks like the following:
+
+(defbolt _name_ _output-declaration_ *_option-map_ & _impl_)
+
+Omitting the option map is equivalent to having an option map of `{:prepare false}`.
+
+#### Simple bolts
+
+Let's start with the simplest form of `defbolt`. Here's an example bolt that splits a tuple containing a sentence into a tuple for each word:
+
+```clojure
+(defbolt split-sentence ["word"] [tuple collector]
+  (let [words (.split (.getString tuple 0) " ")]
+    (doseq [w words]
+      (emit-bolt! collector [w] :anchor tuple))
+    (ack! collector tuple)
+    ))
+```
+
+Since the option map is omitted, this is a non-prepared bolt. The DSL simply expects an implementation for the `execute` method of `IRichBolt`. The implementation takes two parameters, the tuple and the `OutputCollector`, and is followed by the body of the `execute` function. The DSL automatically type-hints the parameters for you so you don't need to worry about reflection if you use Java interop.
+
+This implementation binds `split-sentence` to an actual `IRichBolt` object that you can use in topologies, like so:
+
+```clojure
+(bolt-spec {"1" :shuffle}
+           split-sentence
+           :p 5)
+```
+
+
+#### Parameterized bolts
+
+Many times you want to parameterize your bolts with other arguments. For example, let's say you wanted to have a bolt that appends a suffix to every input string it receives, and you want that suffix to be set at runtime. You do this with `defbolt` by including a `:params` option in the option map, like so:
+
+```clojure
+(defbolt suffix-appender ["word"] {:params [suffix]}
+  [tuple collector]
+  (emit-bolt! collector [(str (.getString tuple 0) suffix)] :anchor tuple)
+  )
+```
+
+Unlike the previous example, `suffix-appender` will be bound to a function that returns an `IRichBolt` rather than be an `IRichBolt` object directly. This is caused by specifying `:params` in its option map. So to use `suffix-appender` in a topology, you would do something like:
+
+```clojure
+(bolt-spec {"1" :shuffle}
+           (suffix-appender "-suffix")
+           :p 10)
+```
+
+#### Prepared bolts
+
+To do more complex bolts, such as ones that do joins and streaming aggregations, the bolt needs to store state. You can do this by creating a prepared bolt which is specified by including `{:prepare true}` in the option map. Consider, for example, this bolt that implements word counting:
+
+```clojure
+(defbolt word-count ["word" "count"] {:prepare true}
+  [conf context collector]
+  (let [counts (atom {})]
+    (bolt
+     (execute [tuple]
+       (let [word (.getString tuple 0)]
+         (swap! counts (partial merge-with +) {word 1})
+         (emit-bolt! collector [word (@counts word)] :anchor tuple)
+         (ack! collector tuple)
+         )))))
+```
+
+The implementation for a prepared bolt is a function that takes as input the topology config, `TopologyContext`, and `OutputCollector`, and returns an implementation of the `IBolt` interface. This design allows you to have a closure around the implementation of `execute` and `cleanup`. 
+
+In this example, the word counts are stored in the closure in a map called `counts`. The `bolt` macro is used to create the `IBolt` implementation. The `bolt` macro is a more concise way to implement the interface than reifying, and it automatically type-hints all of the method parameters. This bolt implements the execute method which updates the count in the map and emits the new word count.
+
+Note that the `execute` method in prepared bolts only takes as input the tuple since the `OutputCollector` is already in the closure of the function (for simple bolts the collector is a second parameter to the `execute` function).
+
+Prepared bolts can be parameterized just like simple bolts.
+
+#### Output declarations
+
+The Clojure DSL has a concise syntax for declaring the outputs of a bolt. The most general way to declare the outputs is as a map from stream id a stream spec. For example:
+
+```clojure
+{"1" ["field1" "field2"]
+ "2" (direct-stream ["f1" "f2" "f3"])
+ "3" ["f1"]}
+```
+
+The stream id is a string, while the stream spec is either a vector of fields or a vector of fields wrapped by `direct-stream`. `direct stream` marks the stream as a direct stream (See [Concepts](Concepts.html) and [Direct groupings]() for more details on direct streams).
+
+If the bolt only has one output stream, you can define the default stream of the bolt by using a vector instead of a map for the output declaration. For example:
+
+```clojure
+["word" "count"]
+```
+This declares the output of the bolt as the fields ["word" "count"] on the default stream id.
+
+#### Emitting, acking, and failing
+
+Rather than use the Java methods on `OutputCollector` directly, the DSL provides a nicer set of functions for using `OutputCollector`: `emit-bolt!`, `emit-direct-bolt!`, `ack!`, and `fail!`.
+
+1. `emit-bolt!`: takes as parameters the `OutputCollector`, the values to emit (a Clojure sequence), and keyword arguments for `:anchor` and `:stream`. `:anchor` can be a single tuple or a list of tuples, and `:stream` is the id of the stream to emit to. Omitting the keyword arguments emits an unanchored tuple to the default stream.
+2. `emit-direct-bolt!`: takes as parameters the `OutputCollector`, the task id to send the tuple to, the values to emit, and keyword arguments for `:anchor` and `:stream`. This function can only emit to streams declared as direct streams.
+2. `ack!`: takes as parameters the `OutputCollector` and the tuple to ack.
+3. `fail!`: takes as parameters the `OutputCollector` and the tuple to fail.
+
+See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more info on acking and anchoring.
+
+### defspout
+
+`defspout` is used for defining spouts in Clojure. Like bolts, spouts must be serializable so you can't just reify `IRichSpout` to do spout implementations in Clojure. `defspout` works around this restriction and provides a nicer syntax for defining spouts than just implementing a Java interface.
+
+The signature for `defspout` looks like the following:
+
+(defspout _name_ _output-declaration_ *_option-map_ & _impl_)
+
+If you leave out the option map, it defaults to {:prepare true}. The output declaration for `defspout` has the same syntax as `defbolt`.
+
+Here's an example `defspout` implementation from [storm-starter](https://github.com/nathanmarz/storm-starter/blob/master/src/clj/storm/starter/clj/word_count.clj):
+
+```clojure
+(defspout sentence-spout ["sentence"]
+  [conf context collector]
+  (let [sentences ["a little brown dog"
+                   "the man petted the dog"
+                   "four score and seven years ago"
+                   "an apple a day keeps the doctor away"]]
+    (spout
+     (nextTuple []
+       (Thread/sleep 100)
+       (emit-spout! collector [(rand-nth sentences)])         
+       )
+     (ack [id]
+        ;; You only need to define this method for reliable spouts
+        ;; (such as one that reads off of a queue like Kestrel)
+        ;; This is an unreliable spout, so it does nothing here
+        ))))
+```
+
+The implementation takes in as input the topology config, `TopologyContext`, and `SpoutOutputCollector`. The implementation returns an `ISpout` object. Here, the `nextTuple` function emits a random sentence from `sentences`. 
+
+This spout isn't reliable, so the `ack` and `fail` methods will never be called. A reliable spout will add a message id when emitting tuples, and then `ack` or `fail` will be called when the tuple is completed or failed respectively. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more info on how reliability works within Storm.
+
+`emit-spout!` takes in as parameters the `SpoutOutputCollector` and the new tuple to be emitted, and accepts as keyword arguments `:stream` and `:id`. `:stream` specifies the stream to emit to, and `:id` specifies a message id for the tuple (used in the `ack` and `fail` callbacks). Omitting these arguments emits an unanchored tuple to the default output stream.
+
+There is also a `emit-direct-spout!` function that emits a tuple to a direct stream and takes an additional argument as the second parameter of the task id to send the tuple to.
+
+Spouts can be parameterized just like bolts, in which case the symbol is bound to a function returning `IRichSpout` instead of the `IRichSpout` itself. You can also declare an unprepared spout which only defines the `nextTuple` method. Here is an example of an unprepared spout that emits random sentences parameterized at runtime:
+
+```clojure
+(defspout sentence-spout-parameterized ["word"] {:params [sentences] :prepare false}
+  [collector]
+  (Thread/sleep 500)
+  (emit-spout! collector [(rand-nth sentences)]))
+```
+
+The following example illustrates how to use this spout in a `spout-spec`:
+
+```clojure
+(spout-spec (sentence-spout-parameterized
+                   ["the cat jumped over the door"
+                    "greetings from a faraway land"])
+            :p 2)
+```
+
+### Running topologies in local mode or on a cluster
+
+That's all there is to the Clojure DSL. To submit topologies in remote mode or local mode, just use the `StormSubmitter` or `LocalCluster` classes just like you would from Java.
+
+To create topology configs, it's easiest to use the `backtype.storm.config` namespace which defines constants for all of the possible configs. The constants are the same as the static constants in the `Config` class, except with dashes instead of underscores. For example, here's a topology config that sets the number of workers to 15 and configures the topology in debug mode:
+
+```clojure
+{TOPOLOGY-DEBUG true
+ TOPOLOGY-WORKERS 15}
+```
+
+### Testing topologies
+
+[This blog post](http://www.pixelmachine.org/2011/12/17/Testing-Storm-Topologies.html) and its [follow-up](http://www.pixelmachine.org/2011/12/21/Testing-Storm-Topologies-Part-2.html) give a good overview of Storm's powerful built-in facilities for testing topologies in Clojure.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Command-line-client.md
----------------------------------------------------------------------
diff --git a/docs/Command-line-client.md b/docs/Command-line-client.md
new file mode 100644
index 0000000..0e645d7
--- /dev/null
+++ b/docs/Command-line-client.md
@@ -0,0 +1,100 @@
+---
+layout: documentation
+---
+This page describes all the commands that are possible with the "storm" command line client. To learn how to set up your "storm" client to talk to a remote cluster, follow the instructions in [Setting up development environment](Setting-up-a-development-environment.html).
+
+These commands are:
+
+1. jar
+1. kill
+1. activate
+1. deactivate
+1. rebalance
+1. repl
+1. classpath
+1. localconfvalue
+1. remoteconfvalue
+1. nimbus
+1. supervisor
+1. ui
+1. drpc
+
+### jar
+
+Syntax: `storm jar topology-jar-path class ...`
+
+Runs the main method of `class` with the specified arguments. The storm jars and configs in `~/.storm` are put on the classpath. The process is configured so that [StormSubmitter](javadocs/backtype/storm/StormSubmitter.html) will upload the jar at `topology-jar-path` when the topology is submitted.
+
+### kill
+
+Syntax: `storm kill topology-name [-w wait-time-secs]`
+
+Kills the topology with the name `topology-name`. Storm will first deactivate the topology's spouts for the duration of the topology's message timeout to allow all messages currently being processed to finish processing. Storm will then shutdown the workers and clean up their state. You can override the length of time Storm waits between deactivation and shutdown with the -w flag.
+
+### activate
+
+Syntax: `storm activate topology-name`
+
+Activates the specified topology's spouts.
+
+### deactivate
+
+Syntax: `storm deactivate topology-name`
+
+Deactivates the specified topology's spouts.
+
+### rebalance
+
+Syntax: `storm rebalance topology-name [-w wait-time-secs]`
+
+Sometimes you may wish to spread out where the workers for a topology are running. For example, let's say you have a 10 node cluster running 4 workers per node, and then let's say you add another 10 nodes to the cluster. You may wish to have Storm spread out the workers for the running topology so that each node runs 2 workers. One way to do this is to kill the topology and resubmit it, but Storm provides a "rebalance" command that provides an easier way to do this. 
+
+Rebalance will first deactivate the topology for the duration of the message timeout (overridable with the -w flag) and then redistribute the workers evenly around the cluster. The topology will then return to its previous state of activation (so a deactivated topology will still be deactivated and an activated topology will go back to being activated).
+
+### repl
+
+Syntax: `storm repl`
+
+Opens up a Clojure REPL with the storm jars and configuration on the classpath. Useful for debugging.
+
+### classpath
+
+Syntax: `storm classpath`
+
+Prints the classpath used by the storm client when running commands.
+
+### localconfvalue
+
+Syntax: `storm localconfvalue conf-name`
+
+Prints out the value for `conf-name` in the local Storm configs. The local Storm configs are the ones in `~/.storm/storm.yaml` merged in with the configs in `defaults.yaml`.
+
+### remoteconfvalue
+
+Syntax: `storm remoteconfvalue conf-name`
+
+Prints out the value for `conf-name` in the cluster's Storm configs. The cluster's Storm configs are the ones in `$STORM-PATH/conf/storm.yaml` merged in with the configs in `defaults.yaml`. This command must be run on a cluster machine.
+
+### nimbus
+
+Syntax: `storm nimbus`
+
+Launches the nimbus daemon. This command should be run under supervision with a tool like [daemontools](http://cr.yp.to/daemontools.html) or [monit](http://mmonit.com/monit/). See [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html) for more information.
+
+### supervisor
+
+Syntax: `storm supervisor`
+
+Launches the supervisor daemon. This command should be run under supervision with a tool like [daemontools](http://cr.yp.to/daemontools.html) or [monit](http://mmonit.com/monit/). See [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html) for more information.
+
+### ui
+
+Syntax: `storm ui`
+
+Launches the UI daemon. The UI provides a web interface for a Storm cluster and shows detailed stats about running topologies. This command should be run under supervision with a tool like [daemontools](http://cr.yp.to/daemontools.html) or [monit](http://mmonit.com/monit/). See [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html) for more information.
+
+### drpc
+
+Syntax: `storm drpc`
+
+Launches a DRPC daemon. This command should be run under supervision with a tool like [daemontools](http://cr.yp.to/daemontools.html) or [monit](http://mmonit.com/monit/). See [Distributed RPC](Distributed-RPC.html) for more information.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Common-patterns.md
----------------------------------------------------------------------
diff --git a/docs/Common-patterns.md b/docs/Common-patterns.md
new file mode 100644
index 0000000..3f8c979
--- /dev/null
+++ b/docs/Common-patterns.md
@@ -0,0 +1,86 @@
+---
+layout: documentation
+---
+
+This page lists a variety of common patterns in Storm topologies.
+
+1. Streaming joins
+2. Batching
+3. BasicBolt
+4. In-memory caching + fields grouping combo
+5. Streaming top N
+6. TimeCacheMap for efficiently keeping a cache of things that have been recently updated
+7. CoordinatedBolt and KeyedFairBolt for Distributed RPC
+
+### Joins
+
+A streaming join combines two or more data streams together based on some common field. Whereas a normal database join has finite input and clear semantics for a join, a streaming join has infinite input and unclear semantics for what a join should be.
+
+The join type you need will vary per application. Some applications join all tuples for two streams over a finite window of time, whereas other applications expect exactly one tuple for each side of the join for each join field. Other applications may do the join completely differently. The common pattern among all these join types is partitioning multiple input streams in the same way. This is easily accomplished in Storm by using a fields grouping on the same fields for many input streams to the joiner bolt. For example:
+
+```java
+builder.setBolt("join", new MyJoiner(), parallelism)
+  .fieldsGrouping("1", new Fields("joinfield1", "joinfield2"))
+  .fieldsGrouping("2", new Fields("joinfield1", "joinfield2"))
+  .fieldsGrouping("3", new Fields("joinfield1", "joinfield2"));
+```
+
+The different streams don't have to have the same field names, of course.
+
+
+### Batching
+
+Oftentimes for efficiency reasons or otherwise, you want to process a group of tuples in batch rather than individually. For example, you may want to batch updates to a database or do a streaming aggregation of some sort.
+
+If you want reliability in your data processing, the right way to do this is to hold on to tuples in an instance variable while the bolt waits to do the batching. Once you do the batch operation, you then ack all the tuples you were holding onto.
+
+If the bolt emits tuples, then you may want to use multi-anchoring to ensure reliability. It all depends on the specific application. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more details on how reliability works.
+
+### BasicBolt
+Many bolts follow a similar pattern of reading an input tuple, emitting zero or more tuples based on that input tuple, and then acking that input tuple immediately at the end of the execute method. Bolts that match this pattern are things like functions and filters. This is such a common pattern that Storm exposes an interface called [IBasicBolt](javadocs/backtype/storm/topology/IBasicBolt.html) that automates this pattern for you. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more information.
+
+### In-memory caching + fields grouping combo
+
+It's common to keep caches in-memory in Storm bolts. Caching becomes particularly powerful when you combine it with a fields grouping. For example, suppose you have a bolt that expands short URLs (like bit.ly, t.co, etc.) into long URLs. You can increase performance by keeping an LRU cache of short URL to long URL expansions to avoid doing the same HTTP requests over and over. Suppose component "urls" emits short URLS, and component "expand" expands short URLs into long URLs and keeps a cache internally. Consider the difference between the two following snippets of code:
+
+```java
+builder.setBolt("expand", new ExpandUrl(), parallelism)
+  .shuffleGrouping(1);
+```
+
+```java
+builder.setBolt("expand", new ExpandUrl(), parallelism)
+  .fieldsGrouping("urls", new Fields("url"));
+```
+
+The second approach will have vastly more effective caches, since the same URL will always go to the same task. This avoids having duplication across any of the caches in the tasks and makes it much more likely that a short URL will hit the cache.
+
+### Streaming top N
+
+A common continuous computation done on Storm is a "streaming top N" of some sort. Suppose you have a bolt that emits tuples of the form ["value", "count"] and you want a bolt that emits the top N tuples based on count. The simplest way to do this is to have a bolt that does a global grouping on the stream and maintains a list in memory of the top N items.
+
+This approach obviously doesn't scale to large streams since the entire stream has to go through one task. A better way to do the computation is to do many top N's in parallel across partitions of the stream, and then merge those top N's together to get the global top N. The pattern looks like this:
+
+```java
+builder.setBolt("rank", new RankObjects(), parallellism)
+  .fieldsGrouping("objects", new Fields("value"));
+builder.setBolt("merge", new MergeObjects())
+  .globalGrouping("rank");
+```
+
+This pattern works because of the fields grouping done by the first bolt which gives the partitioning you need for this to be semantically correct. You can see an example of this pattern in storm-starter [here](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/RollingTopWords.java).
+
+
+### TimeCacheMap for efficiently keeping a cache of things that have been recently updated
+
+You sometimes want to keep a cache in memory of items that have been recently "active" and have items that have been inactive for some time be automatically expires. [TimeCacheMap](javadocs/backtype/storm/utils/TimeCacheMap.html) is an efficient data structure for doing this and provides hooks so you can insert callbacks whenever an item is expired.
+
+### CoordinatedBolt and KeyedFairBolt for Distributed RPC
+
+When building distributed RPC applications on top of Storm, there are two common patterns that are usually needed. These are encapsulated by [CoordinatedBolt](javadocs/backtype/storm/task/CoordinatedBolt.html) and [KeyedFairBolt](javadocs/backtype/storm/task/KeyedFairBolt.html) which are part of the "standard library" that ships with the Storm codebase.
+
+`CoordinatedBolt` wraps the bolt containing your logic and figures out when your bolt has received all the tuples for any given request. It makes heavy use of direct streams to do this.
+
+`KeyedFairBolt` also wraps the bolt containing your logic and makes sure your topology processes multiple DRPC invocations at the same time, instead of doing them serially one at a time.
+
+See [Distributed RPC](Distributed-RPC.html) for more details.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Concepts.md
----------------------------------------------------------------------
diff --git a/docs/Concepts.md b/docs/Concepts.md
new file mode 100644
index 0000000..33779f2
--- /dev/null
+++ b/docs/Concepts.md
@@ -0,0 +1,115 @@
+---
+layout: documentation
+---
+
+This page lists the main concepts of Storm and links to resources where you can find more information. The concepts discussed are:
+
+1. Topologies
+2. Streams
+3. Spouts
+4. Bolts
+5. Stream groupings
+6. Reliability
+7. Tasks
+8. Workers
+
+### Topologies
+
+The logic for a realtime application is packaged into a Storm topology. A Storm topology is analogous to a MapReduce job. One key difference is that a MapReduce job eventually finishes, whereas a topology runs forever (or until you kill it, of course). A topology is a graph of spouts and bolts that are connected with stream groupings. These concepts are described below.
+
+**Resources:**
+
+* [TopologyBuilder](javadocs/backtype/storm/topology/TopologyBuilder.html): use this class to construct topologies in Java
+* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html)
+* [Local mode](Local-mode.html): Read this to learn how to develop and test topologies in local mode.
+
+### Streams
+
+The stream is the core abstraction in Storm. A stream is an unbounded sequence of tuples that is processed and created in parallel in a distributed fashion. Streams are defined with a schema that names the fields in the stream's tuples. By default, tuples can contain integers, longs, shorts, bytes, strings, doubles, floats, booleans, and byte arrays. You can also define your own serializers so that custom types can be used natively within tuples.
+
+Every stream is given an id when declared. Since single-stream spouts and bolts are so common, [OutputFieldsDeclarer](javadocs/backtype/storm/topology/OutputFieldsDeclarer.html) has convenience methods for declaring a single stream without specifying an id. In this case, the stream is given the default id of "default".
+
+
+**Resources:**
+
+* [Tuple](javadocs/backtype/storm/tuple/Tuple.html): streams are composed of tuples
+* [OutputFieldsDeclarer](javadocs/backtype/storm/topology/OutputFieldsDeclarer.html): used to declare streams and their schemas
+* [Serialization](Serialization.html): Information about Storm's dynamic typing of tuples and declaring custom serializations
+* [ISerialization](javadocs/backtype/storm/serialization/ISerialization.html): custom serializers must implement this interface
+* [CONFIG.TOPOLOGY_SERIALIZATIONS](javadocs/backtype/storm/Config.html#TOPOLOGY_SERIALIZATIONS): custom serializers can be registered using this configuration
+
+### Spouts
+
+A spout is a source of streams in a topology. Generally spouts will read tuples from an external source and emit them into the topology (e.g. a Kestrel queue or the Twitter API). Spouts can either be __reliable__ or __unreliable__. A reliable spout is capable of replaying a tuple if it failed to be processed by Storm, whereas an unreliable spout forgets about the tuple as soon as it is emitted.
+
+Spouts can emit more than one stream. To do so, declare multiple streams using the `declareStream` method of [OutputFieldsDeclarer](javadocs/backtype/storm/topology/OutputFieldsDeclarer.html) and specify the stream to emit to when using the `emit` method on [SpoutOutputCollector](javadocs/backtype/storm/spout/SpoutOutputCollector.html). 
+
+The main method on spouts is `nextTuple`. `nextTuple` either emits a new tuple into the topology or simply returns if there are no new tuples to emit. It is imperative that `nextTuple` does not block for any spout implementation, because Storm calls all the spout methods on the same thread.
+
+The other main methods on spouts are `ack` and `fail`. These are called when Storm detects that a tuple emitted from the spout either successfully completed through the topology or failed to be completed. `ack` and `fail` are only called for reliable spouts. See [the Javadoc](javadocs/backtype/storm/spout/ISpout.html) for more information.
+
+**Resources:**
+
+* [IRichSpout](javadocs/backtype/storm/topology/IRichSpout.html): this is the interface that spouts must implement. 
+* [Guaranteeing message processing](Guaranteeing-message-processing.html)
+
+### Bolts
+
+All processing in topologies is done in bolts. Bolts can do anything from filtering, functions, aggregations, joins, talking to databases, and more. 
+
+Bolts can do simple stream transformations. Doing complex stream transformations often requires multiple steps and thus multiple bolts. For example, transforming a stream of tweets into a stream of trending images requires at least two steps: a bolt to do a rolling count of retweets for each image, and one or more bolts to stream out the top X images (you can do this particular stream transformation in a more scalable way with three bolts than with two). 
+
+Bolts can emit more than one stream. To do so, declare multiple streams using the `declareStream` method of [OutputFieldsDeclarer](javadocs/backtype/storm/topology/OutputFieldsDeclarer.html) and specify the stream to emit to when using the `emit` method on [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html).
+
+When you declare a bolt's input streams, you always subscribe to specific streams of another component. If you want to subscribe to all the streams of another component, you have to subscribe to each one individually. [InputDeclarer](javadocs/backtype/storm/topology/InputDeclarer.html) has syntactic sugar for subscribing to streams declared on the default stream id. Saying `declarer.shuffleGrouping("1")` subscribes to the default stream on component "1" and is equivalent to `declarer.shuffleGrouping("1", DEFAULT_STREAM_ID)`. 
+
+The main method in bolts is the `execute` method which takes in as input a new tuple. Bolts emit new tuples using the [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html) object. Bolts must call the `ack` method on the `OutputCollector` for every tuple they process so that Storm knows when tuples are completed (and can eventually determine that its safe to ack the original spout tuples). For the common case of processing an input tuple, emitting 0 or more tuples based on that tuple, and then acking the input tuple, Storm provides an [IBasicBolt](javadocs/backtype/storm/topology/IBasicBolt.html) interface which does the acking automatically.
+
+Its perfectly fine to launch new threads in bolts that do processing asynchronously. [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html) is thread-safe and can be called at any time.
+
+**Resources:**
+
+* [IRichBolt](javadocs/backtype/storm/topology/IRichBolt.html): this is general interface for bolts.
+* [IBasicBolt](javadocs/backtype/storm/topology/IBasicBolt.html): this is a convenience interface for defining bolts that do filtering or simple functions.
+* [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html): bolts emit tuples to their output streams using an instance of this class
+* [Guaranteeing message processing](Guaranteeing-message-processing.html)
+
+### Stream groupings
+
+Part of defining a topology is specifying for each bolt which streams it should receive as input. A stream grouping defines how that stream should be partitioned among the bolt's tasks.
+
+There are seven built-in stream groupings in Storm, and you can implement a custom stream grouping by implementing the [CustomStreamGrouping](javadocs/backtype/storm/grouping/CustomStreamGrouping.html) interface:
+
+1. **Shuffle grouping**: Tuples are randomly distributed across the bolt's tasks in a way such that each bolt is guaranteed to get an equal number of tuples.
+2. **Fields grouping**: The stream is partitioned by the fields specified in the grouping. For example, if the stream is grouped by the "user-id" field, tuples with the same "user-id" will always go to the same task, but tuples with different "user-id"'s may go to different tasks.
+3. **All grouping**: The stream is replicated across all the bolt's tasks. Use this grouping with care.
+4. **Global grouping**: The entire stream goes to a single one of the bolt's tasks. Specifically, it goes to the task with the lowest id.
+5. **None grouping**: This grouping specifies that you don't care how the stream is grouped. Currently, none groupings are equivalent to shuffle groupings. Eventually though, Storm will push down bolts with none groupings to execute in the same thread as the bolt or spout they subscribe from (when possible).
+6. **Direct grouping**: This is a special kind of grouping. A stream grouped this way means that the __producer__ of the tuple decides which task of the consumer will receive this tuple. Direct groupings can only be declared on streams that have been declared as direct streams. Tuples emitted to a direct stream must be emitted using one of the [emitDirect](javadocs/backtype/storm/task/OutputCollector.html#emitDirect(int, int, java.util.List) methods. A bolt can get the task ids of its consumers by either using the provided [TopologyContext](javadocs/backtype/storm/task/TopologyContext.html) or by keeping track of the output of the `emit` method in [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html) (which returns the task ids that the tuple was sent to).  
+7. **Local or shuffle grouping**: If the target bolt has one or more tasks in the same worker process, tuples will be shuffled to just those in-process tasks. Otherwise, this acts like a normal shuffle grouping.
+
+**Resources:**
+
+* [TopologyBuilder](javadocs/backtype/storm/topology/TopologyBuilder.html): use this class to define topologies
+* [InputDeclarer](javadocs/backtype/storm/topology/InputDeclarer.html): this object is returned whenever `setBolt` is called on `TopologyBuilder` and is used for declaring a bolt's input streams and how those streams should be grouped
+* [CoordinatedBolt](javadocs/backtype/storm/task/CoordinatedBolt.html): this bolt is useful for distributed RPC topologies and makes heavy use of direct streams and direct groupings
+
+### Reliability
+
+Storm guarantees that every spout tuple will be fully processed by the topology. It does this by tracking the tree of tuples triggered by every spout tuple and determining when that tree of tuples has been successfully completed. Every topology has a "message timeout" associated with it. If Storm fails to detect that a spout tuple has been completed within that timeout, then it fails the tuple and replays it later. 
+
+To take advantage of Storm's reliability capabilities, you must tell Storm when new edges in a tuple tree are being created and tell Storm whenever you've finished processing an individual tuple. These are done using the [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html) object that bolts use to emit tuples. Anchoring is done in the `emit` method, and you declare that you're finished with a tuple using the `ack` method.
+
+This is all explained in much more detail in [Guaranteeing message processing](Guaranteeing-message-processing.html). 
+
+### Tasks
+
+Each spout or bolt executes as many tasks across the cluster. Each task corresponds to one thread of execution, and stream groupings define how to send tuples from one set of tasks to another set of tasks. You set the parallelism for each spout or bolt in the `setSpout` and `setBolt` methods of [TopologyBuilder](javadocs/backtype/storm/topology/TopologyBuilder.html). 
+
+### Workers
+
+Topologies execute across one or more worker processes. Each worker process is a physical JVM and executes a subset of all the tasks for the topology. For example, if the combined parallelism of the topology is 300 and 50 workers are allocated, then each worker will execute 6 tasks (as threads within the worker). Storm tries to spread the tasks evenly across all the workers.
+
+**Resources:**
+
+* [Config.TOPOLOGY_WORKERS](javadocs/backtype/storm/Config.html#TOPOLOGY_WORKERS): this config sets the number of workers to allocate for executing the topology

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Configuration.md
----------------------------------------------------------------------
diff --git a/docs/Configuration.md b/docs/Configuration.md
new file mode 100644
index 0000000..8e8ca77
--- /dev/null
+++ b/docs/Configuration.md
@@ -0,0 +1,29 @@
+---
+layout: documentation
+---
+Storm has a variety of configurations for tweaking the behavior of nimbus, supervisors, and running topologies. Some configurations are system configurations and cannot be modified on a topology by topology basis, whereas other configurations can be modified per topology. 
+
+Every configuration has a default value defined in [defaults.yaml](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml) in the Storm codebase. You can override these configurations by defining a storm.yaml in the classpath of Nimbus and the supervisors. Finally, you can define a topology-specific configuration that you submit along with your topology when using [StormSubmitter](javadocs/backtype/storm/StormSubmitter.html). However, the topology-specific configuration can only override configs prefixed with "TOPOLOGY".
+
+Storm 0.7.0 and onwards lets you override configuration on a per-bolt/per-spout basis. The only configurations that can be overriden this way are:
+
+1. "topology.debug"
+2. "topology.max.spout.pending"
+3. "topology.max.task.parallelism"
+4. "topology.kryo.register": This works a little bit differently than the other ones, since the serializations will be available to all components in the topology. More details on [Serialization](Serialization.html). 
+
+The Java API lets you specify component specific configurations in two ways:
+
+1. *Internally:* Override `getComponentConfiguration` in any spout or bolt and return the component-specific configuration map.
+2. *Externally:* `setSpout` and `setBolt` in `TopologyBuilder` return an object with methods `addConfiguration` and `addConfigurations` that can be used to override the configurations for the component.
+
+The preference order for configuration values is defaults.yaml < storm.yaml < topology specific configuration < internal component specific configuration < external component specific configuration. 
+
+
+**Resources:**
+
+* [Config](javadocs/backtype/storm/Config.html): a listing of all configurations as well as a helper class for creating topology specific configurations
+* [defaults.yaml](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml): the default values for all configurations
+* [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html): explains how to create and configure a Storm cluster
+* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html): lists useful configurations when running topologies on a cluster
+* [Local mode](Local-mode.html): lists useful configurations when using local mode

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Contributing-to-Storm.md
----------------------------------------------------------------------
diff --git a/docs/Contributing-to-Storm.md b/docs/Contributing-to-Storm.md
new file mode 100644
index 0000000..dff23fb
--- /dev/null
+++ b/docs/Contributing-to-Storm.md
@@ -0,0 +1,31 @@
+---
+layout: documentation
+---
+
+### Getting started with contributing
+
+Some of the issues on the [issue tracker](https://issues.apache.org/jira/browse/STORM) are marked with the "Newbie" label. If you're interesting in contributing to Storm but don't know where to begin, these are good issues to start with. These issues are a great way to get your feet wet with learning the codebase because they require learning about only an isolated portion of the codebase and are a relatively small amount of work.
+
+### Learning the codebase
+
+The [Implementation docs](Implementation-docs.html) section of the wiki gives detailed walkthroughs of the codebase. Reading through these docs is highly recommended to understand the codebase.
+
+### Contribution process
+
+Contributions to the Storm codebase should be sent as GitHub pull requests. If there's any problems to the pull request we can iterate on it using GitHub's commenting features.
+
+For small patches, feel free to submit pull requests directly for them. For larger contributions, please use the following process. The idea behind this process is to prevent any wasted work and catch design issues early on:
+
+1. Open an issue on the [issue tracker](https://issues.apache.org/jira/browse/STORM) if one doesn't exist already
+2. Comment on the issue with your plan for implementing the issue. Explain what pieces of the codebase you're going to touch and how everything is going to fit together.
+3. Storm committers will iterate with you on the design to make sure you're on the right track
+4. Implement your issue, submit a pull request, and iterate from there.
+
+### Modules built on top of Storm
+
+Modules built on top of Storm (like spouts, bolts, etc) that aren't appropriate for Storm core can be done as your own project or as part of [@stormprocessor](https://github.com/stormprocessor). To be part of @stormprocessor put your project on your own Github and then send an email to the mailing list proposing to make it part of @stormprocessor. Then the community can discuss whether it's useful enough to be part of @stormprocessor. Then you'll be added to the @stormprocessor organization and can maintain your project there. The advantage of hosting your module in @stormprocessor is that it will be easier for potential users to find your project.
+
+### Contributing documentation
+
+Documentation contributions are very welcome! The best way to send contributions is as emails through the mailing list.
+

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Creating-a-new-Storm-project.md
----------------------------------------------------------------------
diff --git a/docs/Creating-a-new-Storm-project.md b/docs/Creating-a-new-Storm-project.md
new file mode 100644
index 0000000..feb49b8
--- /dev/null
+++ b/docs/Creating-a-new-Storm-project.md
@@ -0,0 +1,25 @@
+---
+layout: documentation
+---
+This page outlines how to set up a Storm project for development. The steps are:
+
+1. Add Storm jars to classpath
+2. If using multilang, add multilang dir to classpath
+
+Follow along to see how to set up the [storm-starter](http://github.com/nathanmarz/storm-starter) project in Eclipse.
+
+### Add Storm jars to classpath
+
+You'll need the Storm jars on your classpath to develop Storm topologies. Using [Maven](Maven.html) is highly recommended. [Here's an example](https://github.com/nathanmarz/storm-starter/blob/master/m2-pom.xml) of how to setup your pom.xml for a Storm project. If you don't want to use Maven, you can include the jars from the Storm release on your classpath. 
+
+[storm-starter](http://github.com/nathanmarz/storm-starter) uses [Leiningen](http://github.com/technomancy/leiningen) for build and dependency resolution. You can install leiningen by downloading [this script](https://raw.github.com/technomancy/leiningen/stable/bin/lein), placing it on your path, and making it executable. To retrieve the dependencies for Storm, simply run `lein deps` in the project root.
+
+To set up the classpath in Eclipse, create a new Java project, include `src/jvm/` as a source path, and make sure all the jars in `lib/` and `lib/dev/` are in the `Referenced Libraries` section of the project.
+
+### If using multilang, add multilang dir to classpath
+
+If you implement spouts or bolts in languages other than Java, then those implementations should be under the `multilang/resources/` directory of the project. For Storm to find these files in local mode, the `resources/` dir needs to be on the classpath. You can do this in Eclipse by adding `multilang/` as a source folder. You may also need to add multilang/resources as a source directory.
+
+For more information on writing topologies in other languages, see [Using non-JVM languages with Storm](Using-non-JVM-languages-with-Storm.html).
+
+To test that everything is working in Eclipse, you should now be able to `Run` the `WordCountTopology.java` file. You will see messages being emitted at the console for 10 seconds.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/DSLs-and-multilang-adapters.md
----------------------------------------------------------------------
diff --git a/docs/DSLs-and-multilang-adapters.md b/docs/DSLs-and-multilang-adapters.md
new file mode 100644
index 0000000..31bd453
--- /dev/null
+++ b/docs/DSLs-and-multilang-adapters.md
@@ -0,0 +1,9 @@
+---
+layout: documentation
+---
+* [Scala DSL](https://github.com/velvia/ScalaStorm)
+* [JRuby DSL](https://github.com/colinsurprenant/redstorm)
+* [Clojure DSL](Clojure-DSL.html)
+* [Storm/Esper integration](https://github.com/tomdz/storm-esper): Streaming SQL on top of Storm
+* [io-storm](https://github.com/gphat/io-storm): Perl multilang adapter
+* [storm-php](https://github.com/lazyshot/storm-php): PHP multilang adapter


[02/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/js/jquery.min.js
----------------------------------------------------------------------
diff --git a/docs/assets/js/jquery.min.js b/docs/assets/js/jquery.min.js
new file mode 100644
index 0000000..f364443
--- /dev/null
+++ b/docs/assets/js/jquery.min.js
@@ -0,0 +1,6 @@
+/*! jQuery v1.11.3 | (c) 2005, 2015 jQuery Foundation, Inc. | jquery.org/license */
+!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.3",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,argumen
 ts))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a=
 =a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)+1>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e])
 ,d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(
 arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b="length"in a&&a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N=M.replace("w","w#"),O="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|
 \"((?:\\\\.|[^\\\\\"])*)\"|("+N+"))|)"+L+"*\\]",P=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+O+")*)|.*)\\)|)",Q=new RegExp(L+"+","g"),R=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),S=new RegExp("^"+L+"*,"+L+"*"),T=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),U=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),V=new RegExp(P),W=new RegExp("^"+N+"$"),X={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M.replace("w","w*")+")"),ATTR:new RegExp("^"+O),PSEUDO:new RegExp("^"+P),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$
 /,aa=/[+~]/,ba=/'|\\/g,ca=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),da=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ea=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(fa){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],k=b.nodeType,"string"!=typeof a||!a||1!==k&&9!==k&&11!==k)return d;if(!e&&p){if(11!==k&&(f=_.exec(a)))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return H.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName)return H.apply(d,b.getElementsByClassName(j)),
 d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=1!==k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(ba,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+ra(o[l]);w=aa.test(a)&&pa(b.parentNode)||b,x=o.join(",")}if(x)try{return H.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.node
 Name.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function pa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=g.documentElement,e=g.defaultView,e&&e!==e.top&&(e.addEventListener?e.addEventListener("unload",ea,!1):e.attachEvent&&e.attachEvent("onunload",ea)),p=!f(g),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(g.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$
 .test(g.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!g.getElementsByName||!g.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(g.querySelectorAll))&&(ja(fu
 nction(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\f]' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){var b=g.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",P)}),
 q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===g||a.ownerDocument===v&&t(v,a)?-1:b===g||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,h=[a],i=[b];if(!e||!f)return a===g?-1:b===g?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);
 while(h[d]===i[d])d++;return d?la(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},g):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1
 )}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ca,da),a[3]=(a[3]||a[4]||a[5]||"").replace(ca,da),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.leng
 th-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ca,da).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(Q," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=
 b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=
 [],d=h(a.replace(R,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(ca,da),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return W.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(ca,da).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},check
 ed:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:oa(function(){return[0]}),last:oa(function(a,b){return[b-1]}),eq:oa(function(a,b,c){return[0>c?c+b:c]}),even:oa(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:oa(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:oa(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:oa(function(a,b,c){fo
 r(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=ma(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=na(b);function qa(){}qa.prototype=d.filters=d.pseudos,d.setFilters=new qa,g=ga.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?ga.error(a):z(a,i).slice(0)};function ra(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function sa(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b
 =b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function ta(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ua(a,b,c){for(var d=0,e=b.length;e>d;d++)ga(a,b[d],c);return c}function va(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function wa(a,b,c,d,e,f){return d&&!d[u]&&(d=wa(d)),e&&!e[u]&&(e=wa(e,f)),ia(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ua(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:va(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=va(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=va(r===g?r.s
 plice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function xa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=sa(function(a){return a===b},h,!0),l=sa(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[sa(ta(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return wa(i>1&&ta(m),i>1&&ra(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&xa(a.slice(i,e)),f>e&&xa(a=a.slice(e)),f>e&&ra(a))}m.push(c)}return ta(m)}function ya(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!=
 =p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=F.call(i));s=va(s)}H.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&ga.uniqueSort(i)}return k&&(w=v,j=t),r};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=xa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,ya(e,d)),f.selector=a}return f},i=ga.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ca,da),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ca,da),aa.test(j[0].type)&&pa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&ra(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,
 aa.test(a)&&pa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ja(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u
 =/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==type
 of a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,conte
 nts:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.a
 dd(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.f
 ilter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0
 ,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend
 (a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.rea
 dy);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var 
 a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e
 ){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;
+
+return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h;if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D
 -11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx
 ";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?m.queue(this[0],a):void 0===b?this:this.each(function(){var c=m.queue(this,a,b);m._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&m.dequeue(this,a)})},dequeue:function(a){return this.each(function(){m.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=m.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=m._data(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.em
 pty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=["Top","Right","Bottom","Left"],U=function(a,b){return a=b||a,"none"===m.css(a,"display")||!m.contains(a.ownerDocument,a)},V=m.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===m.type(c)){e=!0;for(h in c)m.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,m.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(m(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav></:nav>"!==y.createElement("nav").cloneNode(!0).outerH
 TML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="<input type='radio' checked='checked' name='t'/>",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function aa(){return!0}function ba(){return!1}function ca(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a
 ,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q
 ,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.in
 dexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0
 ,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d
 .needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[m.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=Z.test(e)?this.mouseHooks:Y.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new m.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=f.srcElement||y),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,g.filter?g.filter(a,f):a},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY
  toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button,g=b.fromElement;return null==a.pageX&&null!=b.clientX&&(d=a.target.ownerDocument||y,e=d.documentElement,c=d.body,a.pageX=b.clientX+(e&&e.scrollLeft||c&&c.scrollLeft||0)-(e&&e.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||c&&c.scrollTop||0)-(e&&e.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&g&&(a.relatedTarget=g===a.target?b.toElement:g),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==ca()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:"focusin"},blur:{trigger:function(){return this===ca()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return m.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return m.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEve
 nt.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=m.extend(new m.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?m.event.trigger(e,null,b):m.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},m.removeEvent=y.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){var d="on"+b;a.detachEvent&&(typeof a[d]===K&&(a[d]=null),a.detachEvent(d,c))},m.Event=function(a,b){return this instanceof m.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?aa:ba):this.type=a,b&&m.extend(this,b),this.timeStamp=a&&a.timeStamp||m.now(),void(this[m.expando]=!0)):new m.Event(a,b)},m.Event.prototype={isDefaultPrevented:ba,isPropagationStopped:ba,isImmediatePropagationStopped:ba,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=aa,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:fu
 nction(){var a=this.originalEvent;this.isPropagationStopped=aa,a&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=aa,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},m.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){m.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!m.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.submitBubbles||(m.event.special.submit={setup:function(){return m.nodeName(this,"form")?!1:void m.event.add(this,"click._submit keypress._submit",function(a){var b=a.target,c=m.nodeName(b,"input")||m.nodeName(b,"button")?b.form:void 0;c&&!m._data(c,"submitBubbles")&&(m.event.add(c,"submit._submit",function(a){a._submit_bubble=!0}),m._data(c,"subm
 itBubbles",!0))})},postDispatch:function(a){a._submit_bubble&&(delete a._submit_bubble,this.parentNode&&!a.isTrigger&&m.event.simulate("submit",this.parentNode,a,!0))},teardown:function(){return m.nodeName(this,"form")?!1:void m.event.remove(this,"._submit")}}),k.changeBubbles||(m.event.special.change={setup:function(){return X.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(m.event.add(this,"propertychange._change",function(a){"checked"===a.originalEvent.propertyName&&(this._just_changed=!0)}),m.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1),m.event.simulate("change",this,a,!0)})),!1):void m.event.add(this,"beforeactivate._change",function(a){var b=a.target;X.test(b.nodeName)&&!m._data(b,"changeBubbles")&&(m.event.add(b,"change._change",function(a){!this.parentNode||a.isSimulated||a.isTrigger||m.event.simulate("change",this.parentNode,a,!0)}),m._data(b,"changeBubbles",!0))})},handle:function(a){var b=a.targe
 t;return this!==b||a.isSimulated||a.isTrigger||"radio"!==b.type&&"checkbox"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return m.event.remove(this,"._change"),!X.test(this.nodeName)}}),k.focusinBubbles||m.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){m.event.simulate(b,a.target,m.event.fix(a),!0)};m.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=m._data(d,b);e||d.addEventListener(a,c,!0),m._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=m._data(d,b)-1;e?m._data(d,b,e):(d.removeEventListener(a,c,!0),m._removeData(d,b))}}}),m.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(f in a)this.on(f,b,c,a[f],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=ba;else if(!d)return this;return 1===e&&(g=d,d=function(a){return m().off(a),g.apply(this,arguments)},
 d.guid=g.guid||(g.guid=m.guid++)),this.each(function(){m.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,m(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=ba),this.each(function(){m.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){m.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?m.event.trigger(a,b,c,!0):void 0}});function da(a){var b=ea.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}var ea="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",fa=/ jQuery\d+="(?:null|\d+)"/g
 ,ga=new RegExp("<(?:"+ea+")[\\s/>]","i"),ha=/^\s+/,ia=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,ja=/<([\w:]+)/,ka=/<tbody/i,la=/<|&#?\w+;/,ma=/<(?:script|style|link)/i,na=/checked\s*(?:[^=]|=\s*.checked.)/i,oa=/^$|\/(?:java|ecma)script/i,pa=/^true\/(.*)/,qa=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,ra={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:k.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},sa=da(y),ta=sa.appendChild(y.createElement("div"));ra.optgroup=ra.option,ra.tbody=ra.tfoot=ra.colgroup=ra.caption=ra.thead,ra.th=ra.td;function ua(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K
 ?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ua(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function va(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wa(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xa(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function ya(a){var b=pa.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function za(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Aa(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Ba(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeN
 ame.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xa(b).text=a.text,ya(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!ga.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(ta.innerHTML=a.outerHTML,ta.removeChild(f=ta.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ua(f),h=ua(a),g=0;null!=(e=h[g]);++g)d[g]&&Ba(e,d[g]);if(b)if(c)for(h=h||ua(a),d=d||ua(f),g=0;null!=(e=h[g]);g++)Aa(e,d[g]);else Aa(a,f);ret
 urn d=ua(f,"script"),d.length>0&&za(d,!i&&ua(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=da(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(la.test(f)){h=h||o.appendChild(b.createElement("div")),i=(ja.exec(f)||["",""])[1].toLowerCase(),l=ra[i]||ra._default,h.innerHTML=l[1]+f.replace(ia,"<$1></$2>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&ha.test(f)&&p.push(b.createTextNode(ha.exec(f)[0])),!k.tbody){f="table"!==i||ka.test(f)?"<table>"!==l[1]||ka.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ua(p,"input"),va),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ua(o.ap
 pendChild(f),"script"),g&&za(h),c)){e=0;while(f=h[e++])oa.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wa(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wa(this,a);b.insertBefore(a,b.firstChild)}})},before:functi
 on(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ua(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&za(ua(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ua(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fa,""):void 0;if(!("string"!=typeof a||ma.test(a)||!k.htmlSerialize&&ga.test(a)||!k.lead
 ingWhitespace&&ha.test(a)||ra[(ja.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ia,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ua(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ua(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&na.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ua(i,"script"),xa),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ua(d,"script"))),b.call(this[j],d,j
 );if(f)for(h=g[g.length-1].ownerDocument,m.map(g,ya),j=0;f>j;j++)d=g[j],oa.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qa,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Ca,Da={};function Ea(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fa(a){var b=y,c=Da[a];return c||(c=Ea(a,b),"none"!==c&&c||(Ca=(Ca||m("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=(Ca[0].contentWindow||Ca[0].contentDocument).document,b.write(),b.close(),c=Ea(a,b),Ca.deta
 ch()),Da[a]=c),c}!function(){var a;k.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,d;return c=y.getElementsByTagName("body")[0],c&&c.style?(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1",b.appendChild(y.createElement("div")).style.width="5px",a=3!==b.offsetWidth),c.removeChild(d),a):void 0}}();var Ga=/^margin/,Ha=new RegExp("^("+S+")(?!px)[a-z%]+$","i"),Ia,Ja,Ka=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ia=function(b){return b.ownerDocument.defaultView.opener?b.ownerDocument.defaultView.getComputedStyle(b,null):a.getComputedStyle(b,null)},Ja=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ia(a),g=c?c.getPropertyValue(b)||c[b]:void 0,c&&(""!==g||m.contains(a.owner
 Document,a)||(g=m.style(a,b)),Ha.test(g)&&Ga.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0===g?g:g+""}):y.documentElement.currentStyle&&(Ia=function(a){return a.currentStyle},Ja=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ia(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Ha.test(g)&&!Ka.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left="fontSize"===b?"1em":g,g=h.pixelLeft+"px",h.left=d,f&&(e.left=f)),void 0===g?g:g+""||"auto"});function La(a,b){return{get:function(){var c=a();if(null!=c)return c?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d,e,f,g,h;if(b=y.createElement("div"),b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=d&&d.style){c.cssText="float:left;opacity:.5",k.opacity="0.5"===c.opacity,k.cssFloat=!!c.cssFloat,b.style.backgroundClip="content-box",
 b.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===b.style.backgroundClip,k.boxSizing=""===c.boxSizing||""===c.MozBoxSizing||""===c.WebkitBoxSizing,m.extend(k,{reliableHiddenOffsets:function(){return null==g&&i(),g},boxSizingReliable:function(){return null==f&&i(),f},pixelPosition:function(){return null==e&&i(),e},reliableMarginRight:function(){return null==h&&i(),h}});function i(){var b,c,d,i;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),b.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",e=f=!1,h=!0,a.getComputedStyle&&(e="1%"!==(a.getComputedStyle(b,null)||{}).top,f="4px"===(a.getComputedStyle(b,null)||{width:"4px"}).width,i=b.appendChild(y.createElement("div")),i.s
 tyle.cssText=b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",i.style.marginRight=i.style.width="0",b.style.width="1px",h=!parseFloat((a.getComputedStyle(i,null)||{}).marginRight),b.removeChild(i)),b.innerHTML="<table><tr><td></td><td>t</td></tr></table>",i=b.getElementsByTagName("td"),i[0].style.cssText="margin:0;border:0;padding:0;display:none",g=0===i[0].offsetHeight,g&&(i[0].style.display="",i[1].style.display="none",g=0===i[0].offsetHeight),c.removeChild(d))}}}(),m.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var Ma=/alpha\([^)]*\)/i,Na=/opacity\s*=\s*([^)]*)/,Oa=/^(none|table(?!-c[ea]).+)/,Pa=new RegExp("^("+S+")(.*)$","i"),Qa=new RegExp("^([+-])=("+S+")","i"),Ra={position:"absolute",visibility:"hidden",display:"block"},Sa={letterSpacing:"0",fontWeight:"400"},Ta=["Webkit","O","Moz","ms"];function 
 Ua(a,b){if(b in a)return b;var c=b.charAt(0).toUpperCase()+b.slice(1),d=b,e=Ta.length;while(e--)if(b=Ta[e]+c,b in a)return b;return d}function Va(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=m._data(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&U(d)&&(f[g]=m._data(d,"olddisplay",Fa(d.nodeName)))):(e=U(d),(c&&"none"!==c||!e)&&m._data(d,"olddisplay",e?c:m.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function Wa(a,b,c){var d=Pa.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Xa(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=m.css(a,c+T[f],!0,e)),d?("content"===c&&(g-=m.css(a,"padding"+T[f],!0,e)),"margin"!==c&&(g-=m.css(a,"border"+T[f]+"Width",!0,e))):(g+=m.css(a,"padding"+T[f],!0,e),"padding"!==c&&(g+=m.css(a,"border"+T[f]+"Width",!0,e)));re
 turn g}function Ya(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ia(a),g=k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Ja(a,b,f),(0>e||null==e)&&(e=a.style[b]),Ha.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Xa(a,b,c||(g?"border":"content"),d,f)+"px"}m.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Ja(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":k.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=m.camelCase(b),i=a.style;if(b=m.cssProps[h]||(m.cssProps[h]=Ua(i,h)),g=m.cssHooks[b]||m.cssHooks[h],void 0===c)return g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,"string"===f&&(e=Qa.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(m.css(a,b)),f="number"),nul
 l!=c&&c===c&&("number"!==f||m.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),!(g&&"set"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=m.camelCase(b);return b=m.cssProps[h]||(m.cssProps[h]=Ua(a.style,h)),g=m.cssHooks[b]||m.cssHooks[h],g&&"get"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Ja(a,b,d)),"normal"===f&&b in Sa&&(f=Sa[b]),""===c||c?(e=parseFloat(f),c===!0||m.isNumeric(e)?e||0:f):f}}),m.each(["height","width"],function(a,b){m.cssHooks[b]={get:function(a,c,d){return c?Oa.test(m.css(a,"display"))&&0===a.offsetWidth?m.swap(a,Ra,function(){return Ya(a,b,d)}):Ya(a,b,d):void 0},set:function(a,c,d){var e=d&&Ia(a);return Wa(a,c,d?Xa(a,b,d,k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,e),e):0)}}}),k.opacity||(m.cssHooks.opacity={get:function(a,b){return Na.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d
 =a.currentStyle,e=m.isNumeric(b)?"alpha(opacity="+100*b+")":"",f=d&&d.filter||c.filter||"";c.zoom=1,(b>=1||""===b)&&""===m.trim(f.replace(Ma,""))&&c.removeAttribute&&(c.removeAttribute("filter"),""===b||d&&!d.filter)||(c.filter=Ma.test(f)?f.replace(Ma,e):f+" "+e)}}),m.cssHooks.marginRight=La(k.reliableMarginRight,function(a,b){return b?m.swap(a,{display:"inline-block"},Ja,[a,"marginRight"]):void 0}),m.each({margin:"",padding:"",border:"Width"},function(a,b){m.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+T[d]+b]=f[d]||f[d-2]||f[0];return e}},Ga.test(a)||(m.cssHooks[a+b].set=Wa)}),m.fn.extend({css:function(a,b){return V(this,function(a,b,c){var d,e,f={},g=0;if(m.isArray(b)){for(d=Ia(a),e=b.length;e>g;g++)f[b[g]]=m.css(a,b[g],!1,d);return f}return void 0!==c?m.style(a,b,c):m.css(a,b)},a,b,arguments.length>1)},show:function(){return Va(this,!0)},hide:function(){return Va(this)},toggle:function(a){return"boolean"==typeof a?a?this.sh
 ow():this.hide():this.each(function(){U(this)?m(this).show():m(this).hide()})}});function Za(a,b,c,d,e){
+return new Za.prototype.init(a,b,c,d,e)}m.Tween=Za,Za.prototype={constructor:Za,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(m.cssNumber[c]?"":"px")},cur:function(){var a=Za.propHooks[this.prop];return a&&a.get?a.get(this):Za.propHooks._default.get(this)},run:function(a){var b,c=Za.propHooks[this.prop];return this.options.duration?this.pos=b=m.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Za.propHooks._default.set(this),this}},Za.prototype.init.prototype=Za.prototype,Za.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=m.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){m.fx.step[a.prop]?m.fx.step[a.prop](a):a.elem.style&&(null!
 =a.elem.style[m.cssProps[a.prop]]||m.cssHooks[a.prop])?m.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Za.propHooks.scrollTop=Za.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},m.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},m.fx=Za.prototype.init,m.fx.step={};var $a,_a,ab=/^(?:toggle|show|hide)$/,bb=new RegExp("^(?:([+-])=|)("+S+")([a-z%]*)$","i"),cb=/queueHooks$/,db=[ib],eb={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=bb.exec(b),f=e&&e[3]||(m.cssNumber[a]?"":"px"),g=(m.cssNumber[a]||"px"!==f&&+d)&&bb.exec(m.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,m.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function fb(){return setTimeout(function(){$a=void 0}),$a=m.now()}function gb(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=T[e],d["m
 argin"+c]=d["padding"+c]=a;return b&&(d.opacity=d.width=a),d}function hb(a,b,c){for(var d,e=(eb[b]||[]).concat(eb["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ib(a,b,c){var d,e,f,g,h,i,j,l,n=this,o={},p=a.style,q=a.nodeType&&U(a),r=m._data(a,"fxshow");c.queue||(h=m._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,n.always(function(){n.always(function(){h.unqueued--,m.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=m.css(a,"display"),l="none"===j?m._data(a,"olddisplay")||Fa(a.nodeName):j,"inline"===l&&"none"===m.css(a,"float")&&(k.inlineBlockNeedsLayout&&"inline"!==Fa(a.nodeName)?p.zoom=1:p.display="inline-block")),c.overflow&&(p.overflow="hidden",k.shrinkWrapBlocks()||n.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],ab.exec(e)){
 if(delete b[d],f=f||"toggle"===e,e===(q?"hide":"show")){if("show"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||m.style(a,d)}else j=void 0;if(m.isEmptyObject(o))"inline"===("none"===j?Fa(a.nodeName):j)&&(p.display=j);else{r?"hidden"in r&&(q=r.hidden):r=m._data(a,"fxshow",{}),f&&(r.hidden=!q),q?m(a).show():n.done(function(){m(a).hide()}),n.done(function(){var b;m._removeData(a,"fxshow");for(b in o)m.style(a,b,o[b])});for(d in o)g=hb(q?r[d]:0,d,n),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function jb(a,b){var c,d,e,f,g;for(c in a)if(d=m.camelCase(c),e=b[d],f=a[c],m.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=m.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function kb(a,b,c){var d,e,f=0,g=db.length,h=m.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=$a||fb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.twee
 ns.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:m.extend({},b),opts:m.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:$a||fb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=m.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(jb(k,j.opts.specialEasing);g>f;f++)if(d=db[f].call(j,a,k,j.opts))return d;return m.map(k,hb,j),m.isFunction(j.opts.start)&&j.opts.start.call(a,j),m.fx.timer(m.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}m.Animation=m.extend(kb,{tweener:function(a,b){m.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d
 ++)c=a[d],eb[c]=eb[c]||[],eb[c].unshift(b)},prefilter:function(a,b){b?db.unshift(a):db.push(a)}}),m.speed=function(a,b,c){var d=a&&"object"==typeof a?m.extend({},a):{complete:c||!c&&b||m.isFunction(a)&&a,duration:a,easing:c&&b||b&&!m.isFunction(b)&&b};return d.duration=m.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in m.fx.speeds?m.fx.speeds[d.duration]:m.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){m.isFunction(d.old)&&d.old.call(this),d.queue&&m.dequeue(this,d.queue)},d},m.fn.extend({fadeTo:function(a,b,c,d){return this.filter(U).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=m.isEmptyObject(a),f=m.speed(b,c,d),g=function(){var b=kb(this,m.extend({},a),f);(e||m._data(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b
 =a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=m.timers,g=m._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&cb.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&m.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=m._data(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=m.timers,g=d?d.length:0;for(c.finish=!0,m.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),m.each(["toggle","show","hide"],function(a,b){var c=m.fn[b];m.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(gb(b,!0),a,d,e)}}),m.each({slideDown:gb("show"),slideUp:gb("hide"),slideToggle:gb("toggle"),fadeIn:{opacity:"show"
 },fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){m.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),m.timers=[],m.fx.tick=function(){var a,b=m.timers,c=0;for($a=m.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||m.fx.stop(),$a=void 0},m.fx.timer=function(a){m.timers.push(a),a()?m.fx.start():m.timers.pop()},m.fx.interval=13,m.fx.start=function(){_a||(_a=setInterval(m.fx.tick,m.fx.interval))},m.fx.stop=function(){clearInterval(_a),_a=null},m.fx.speeds={slow:600,fast:200,_default:400},m.fn.delay=function(a,b){return a=m.fx?m.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a,b,c,d,e;b=y.createElement("div"),b.setAttribute("className","t"),b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=y.createElement("select"),e=c.appendChild(y.createElement("option")),a=b.getElementsByTagName("input")[0],d.
 style.cssText="top:1px",k.getSetAttribute="t"!==b.className,k.style=/top/.test(d.getAttribute("style")),k.hrefNormalized="/a"===d.getAttribute("href"),k.checkOn=!!a.value,k.optSelected=e.selected,k.enctype=!!y.createElement("form").enctype,c.disabled=!0,k.optDisabled=!e.disabled,a=y.createElement("input"),a.setAttribute("value",""),k.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),k.radioValue="t"===a.value}();var lb=/\r/g;m.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=m.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,m(this).val()):a,null==e?e="":"number"==typeof e?e+="":m.isArray(e)&&(e=m.map(e,function(a){return null==a?"":a+""})),b=m.valHooks[this.type]||m.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=m.valHooks[e.type]||m.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"str
 ing"==typeof c?c.replace(lb,""):null==c?"":c)}}}),m.extend({valHooks:{option:{get:function(a){var b=m.find.attr(a,"value");return null!=b?b:m.trim(m.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&m.nodeName(c.parentNode,"optgroup"))){if(b=m(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=m.makeArray(b),g=e.length;while(g--)if(d=e[g],m.inArray(m.valHooks.option.get(d),f)>=0)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),m.each(["radio","checkbox"],function(){m.valHooks[this]={set:function(a,b){return m.isArray(b)?a.checked=m.inArray(m(a).val(),b)>=0:void 0}},k.checkOn||(m.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var mb,nb,ob=m.expr.
 attrHandle,pb=/^(?:checked|selected)$/i,qb=k.getSetAttribute,rb=k.input;m.fn.extend({attr:function(a,b){return V(this,m.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){m.removeAttr(this,a)})}}),m.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===K?m.prop(a,b,c):(1===f&&m.isXMLDoc(a)||(b=b.toLowerCase(),d=m.attrHooks[b]||(m.expr.match.bool.test(b)?nb:mb)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=m.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void m.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=m.propFix[c]||c,m.expr.match.bool.test(c)?rb&&qb||!pb.test(c)?a[d]=!1:a[m.camelCase("default-"+c)]=a[d]=!1:m.attr(a,c,""),a.removeAttribute(qb?c:d)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&m.nodeName(a,"input")){var c=a.value;return a.setAttri
 bute("type",b),c&&(a.value=c),b}}}}}),nb={set:function(a,b,c){return b===!1?m.removeAttr(a,c):rb&&qb||!pb.test(c)?a.setAttribute(!qb&&m.propFix[c]||c,c):a[m.camelCase("default-"+c)]=a[c]=!0,c}},m.each(m.expr.match.bool.source.match(/\w+/g),function(a,b){var c=ob[b]||m.find.attr;ob[b]=rb&&qb||!pb.test(b)?function(a,b,d){var e,f;return d||(f=ob[b],ob[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,ob[b]=f),e}:function(a,b,c){return c?void 0:a[m.camelCase("default-"+b)]?b.toLowerCase():null}}),rb&&qb||(m.attrHooks.value={set:function(a,b,c){return m.nodeName(a,"input")?void(a.defaultValue=b):mb&&mb.set(a,b,c)}}),qb||(mb={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+="","value"===c||b===a.getAttribute(c)?b:void 0}},ob.id=ob.name=ob.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&""!==d.value?d.value:null},m.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specifie
 d?c.value:void 0},set:mb.set},m.attrHooks.contenteditable={set:function(a,b,c){mb.set(a,""===b?!1:b,c)}},m.each(["width","height"],function(a,b){m.attrHooks[b]={set:function(a,c){return""===c?(a.setAttribute(b,"auto"),c):void 0}}})),k.style||(m.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+""}});var sb=/^(?:input|select|textarea|button|object)$/i,tb=/^(?:a|area)$/i;m.fn.extend({prop:function(a,b){return V(this,m.prop,a,b,arguments.length>1)},removeProp:function(a){return a=m.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),m.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!m.isXMLDoc(a),f&&(b=m.propFix[b]||b,e=m.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=m.find.attr(a,"tabindex");
 return b?parseInt(b,10):sb.test(a.nodeName)||tb.test(a.nodeName)&&a.href?0:-1}}}}),k.hrefNormalized||m.each(["href","src"],function(a,b){m.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),k.optSelected||(m.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null}}),m.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){m.propFix[this.toLowerCase()]=this}),k.enctype||(m.propFix.enctype="encoding");var ub=/[\t\r\n\f]/g;m.fn.extend({addClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j="string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).addClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ub," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=m.trim(d),c.className!==g&&(c.
 className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j=0===arguments.length||"string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).removeClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ub," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?m.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(m.isFunction(a)?function(c){m(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=m(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===K||"boolean"===c)&&(this.className&&m._data(this,"__className__",this.className),this.className=this.className||a===!1?"":m._data(this,"__className__")|
 |"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(ub," ").indexOf(b)>=0)return!0;return!1}}),m.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){m.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),m.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var vb=m.now(),wb=/\?/,xb=/(,)|(\[|{)|(}|])|"(?:[^"\\\r\n]|\\["\\\/bfnrt]|\\u[\da-fA-F]{4})*"\s*:?|true|false|null|-?(?!0\d)\d+(?:\.\d+|)(?:[eE][+-]?\d+|)/g;m.parseJSON=funct
 ion(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+"");var c,d=null,e=m.trim(b+"");return e&&!m.trim(e.replace(xb,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,"")}))?Function("return "+e)():m.error("Invalid JSON: "+b)},m.parseXML=function(b){var c,d;if(!b||"string"!=typeof b)return null;try{a.DOMParser?(d=new DOMParser,c=d.parseFromString(b,"text/xml")):(c=new ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName("parsererror").length||m.error("Invalid XML: "+b),c};var yb,zb,Ab=/#.*$/,Bb=/([?&])_=[^&]*/,Cb=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Db=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Eb=/^(?:GET|HEAD)$/,Fb=/^\/\//,Gb=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,Hb={},Ib={},Jb="*/".concat("*");try{zb=location.href}catch(Kb){zb=y.createElement("a"),zb.href="",zb=zb.href}yb=Gb.exec(zb.toLowerCase())||[];function Lb(a){return function(b,c){"string"!=typeof b
 &&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(m.isFunction(c))while(d=f[e++])"+"===d.charAt(0)?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Mb(a,b,c,d){var e={},f=a===Ib;function g(h){var i;return e[h]=!0,m.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Nb(a,b){var c,d,e=m.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&m.extend(!0,a,c),a}function Ob(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader("Content-Type"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+" "+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Pb(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g i
 n a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}m.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:zb,type:"GET",isLocal:Db.test(yb[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Jb,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converte
 rs:{"* text":String,"text html":!0,"text json":m.parseJSON,"text xml":m.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Nb(Nb(a,m.ajaxSettings),b):Nb(m.ajaxSettings,a)},ajaxPrefilter:Lb(Hb),ajaxTransport:Lb(Ib),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=m.ajaxSetup({},b),l=k.context||k,n=k.context&&(l.nodeType||l.jquery)?m(l):m.event,o=m.Deferred(),p=m.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!j){j={};while(b=Cb.exec(f))j[b[1].toLowerCase()]=b[2]}b=j[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?f:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var 
 b=a||u;return i&&i.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||zb)+"").replace(Ab,"").replace(Fb,yb[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=m.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(c=Gb.exec(k.url.toLowerCase()),k.crossDomain=!(!c||c[1]===yb[1]&&c[2]===yb[2]&&(c[3]||("http:"===c[1]?"80":"443"))===(yb[3]||("http:"===yb[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=m.param(k.data,k.traditional)),Mb(Hb,k,b,v),2===t)return v;h=m.event&&k.global,h&&0===m.active++&&m.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!Eb.test(k.type),e=k.url,k.hasContent||(k.data&&(e=k.url+=(wb.test(e)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=Bb.test(e)?e.replace(Bb,"$1_="+vb++):e+(wb.test(e)?"&":"?")+"_="+vb++)),k.ifModified&&(m.lastModified[e]&&v.setRequestHeader("If-Modified-Since",m.lastModified[e]),m.etag[e]&&v.setRequestHeader("I
 f-None-Match",m.etag[e])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+Jb+"; q=0.01":""):k.accepts["*"]);for(d in k.headers)v.setRequestHeader(d,k.headers[d]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(d in{success:1,error:1,complete:1})v[d](k[d]);if(i=Mb(Ib,k,b,v)){v.readyState=1,h&&n.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,i.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,c,d){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),i=void 0,f=d||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,c&&(u=Ob(k,v,c)),u=Pb(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(m.lastModified[e]=w),w=v.getResponseHeader("etag"),w&&(m.etag[e]=w)),
 204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,h&&n.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),h&&(n.trigger("ajaxComplete",[v,k]),--m.active||m.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return m.get(a,b,c,"json")},getScript:function(a,b){return m.get(a,void 0,b,"script")}}),m.each(["get","post"],function(a,b){m[b]=function(a,c,d,e){return m.isFunction(c)&&(e=e||d,d=c,c=void 0),m.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),m._evalUrl=function(a){return m.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},m.fn.extend({wrapAll:function(a){if(m.isFunction(a))return this.each(function(b){m(this).wrapAll(a.call(this,b))});if(this[0]){var b=m(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefo
 re(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return this.each(m.isFunction(a)?function(b){m(this).wrapInner(a.call(this,b))}:function(){var b=m(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=m.isFunction(a);return this.each(function(c){m(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){m.nodeName(this,"body")||m(this).replaceWith(this.childNodes)}).end()}}),m.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0||!k.reliableHiddenOffsets()&&"none"===(a.style&&a.style.display||m.css(a,"display"))},m.expr.filters.visible=function(a){return!m.expr.filters.hidden(a)};var Qb=/%20/g,Rb=/\[\]$/,Sb=/\r?\n/g,Tb=/^(?:submit|button|image|reset|file)$/i,Ub=/^(?:input|select|textarea|keygen)/i;function Vb(a,b,c,d){var e;if(m.isArray(b))m.each(b,function(b,e){c||Rb.test(a)?d(a,e):Vb(a+"[
 "+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==m.type(b))d(a,b);else for(e in b)Vb(a+"["+e+"]",b[e],c,d)}m.param=function(a,b){var c,d=[],e=function(a,b){b=m.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=m.ajaxSettings&&m.ajaxSettings.traditional),m.isArray(a)||a.jquery&&!m.isPlainObject(a))m.each(a,function(){e(this.name,this.value)});else for(c in a)Vb(c,a[c],b,e);return d.join("&").replace(Qb,"+")},m.fn.extend({serialize:function(){return m.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=m.prop(this,"elements");return a?m.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!m(this).is(":disabled")&&Ub.test(this.nodeName)&&!Tb.test(a)&&(this.checked||!W.test(a))}).map(function(a,b){var c=m(this).val();return null==c?null:m.isArray(c)?m.map(c,function(a){return{name:b.name,value:a.replace(Sb,"\r\n")}}):{name:b.name,value:c.replace(Sb,"\r\n")}}
 ).get()}}),m.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return!this.isLoca

<TRUNCATED>

[13/16] storm git commit: STORM-1617: 0.10.x release docs

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/storm-hdfs.md
----------------------------------------------------------------------
diff --git a/docs/storm-hdfs.md b/docs/storm-hdfs.md
new file mode 100644
index 0000000..b5bf64d
--- /dev/null
+++ b/docs/storm-hdfs.md
@@ -0,0 +1,368 @@
+---
+title: Storm HDFS Integration
+layout: documentation
+documentation: true
+---
+
+Storm components for interacting with HDFS file systems
+
+
+## Usage
+The following example will write pipe("|")-delimited files to the HDFS path hdfs://localhost:54310/foo. After every
+1,000 tuples it will sync filesystem, making that data visible to other HDFS clients. It will rotate files when they
+reach 5 megabytes in size.
+
+```java
+// use "|" instead of "," for field delimiter
+RecordFormat format = new DelimitedRecordFormat()
+        .withFieldDelimiter("|");
+
+// sync the filesystem after every 1k tuples
+SyncPolicy syncPolicy = new CountSyncPolicy(1000);
+
+// rotate files when they reach 5MB
+FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
+
+FileNameFormat fileNameFormat = new DefaultFileNameFormat()
+        .withPath("/foo/");
+
+HdfsBolt bolt = new HdfsBolt()
+        .withFsUrl("hdfs://localhost:54310")
+        .withFileNameFormat(fileNameFormat)
+        .withRecordFormat(format)
+        .withRotationPolicy(rotationPolicy)
+        .withSyncPolicy(syncPolicy);
+```
+
+### Packaging a Topology
+When packaging your topology, it's important that you use the [maven-shade-plugin]() as opposed to the
+[maven-assembly-plugin]().
+
+The shade plugin provides facilities for merging JAR manifest entries, which the hadoop client leverages for URL scheme
+resolution.
+
+If you experience errors such as the following:
+
+```
+java.lang.RuntimeException: Error preparing HdfsBolt: No FileSystem for scheme: hdfs
+```
+
+it's an indication that your topology jar file isn't packaged properly.
+
+If you are using maven to create your topology jar, you should use the following `maven-shade-plugin` configuration to
+create your topology jar:
+
+```xml
+<plugin>
+    <groupId>org.apache.maven.plugins</groupId>
+    <artifactId>maven-shade-plugin</artifactId>
+    <version>1.4</version>
+    <configuration>
+        <createDependencyReducedPom>true</createDependencyReducedPom>
+    </configuration>
+    <executions>
+        <execution>
+            <phase>package</phase>
+            <goals>
+                <goal>shade</goal>
+            </goals>
+            <configuration>
+                <transformers>
+                    <transformer
+                            implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
+                    <transformer
+                            implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+                        <mainClass></mainClass>
+                    </transformer>
+                </transformers>
+            </configuration>
+        </execution>
+    </executions>
+</plugin>
+
+```
+
+### Specifying a Hadoop Version
+By default, storm-hdfs uses the following Hadoop dependencies:
+
+```xml
+<dependency>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-client</artifactId>
+    <version>2.2.0</version>
+    <exclusions>
+        <exclusion>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+    </exclusions>
+</dependency>
+<dependency>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdfs</artifactId>
+    <version>2.2.0</version>
+    <exclusions>
+        <exclusion>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+    </exclusions>
+</dependency>
+```
+
+If you are using a different version of Hadoop, you should exclude the Hadoop libraries from the storm-hdfs dependency
+and add the dependencies for your preferred version in your pom.
+
+Hadoop client version incompatibilites can manifest as errors like:
+
+```
+com.google.protobuf.InvalidProtocolBufferException: Protocol message contained an invalid tag (zero)
+```
+
+## Customization
+
+### Record Formats
+Record format can be controlled by providing an implementation of the `org.apache.storm.hdfs.format.RecordFormat`
+interface:
+
+```java
+public interface RecordFormat extends Serializable {
+    byte[] format(Tuple tuple);
+}
+```
+
+The provided `org.apache.storm.hdfs.format.DelimitedRecordFormat` is capable of producing formats such as CSV and
+tab-delimited files.
+
+
+### File Naming
+File naming can be controlled by providing an implementation of the `org.apache.storm.hdfs.format.FileNameFormat`
+interface:
+
+```java
+public interface FileNameFormat extends Serializable {
+    void prepare(Map conf, TopologyContext topologyContext);
+    String getName(long rotation, long timeStamp);
+    String getPath();
+}
+```
+
+The provided `org.apache.storm.hdfs.format.DefaultFileNameFormat`  will create file names with the following format:
+
+     {prefix}{componentId}-{taskId}-{rotationNum}-{timestamp}{extension}
+
+For example:
+
+     MyBolt-5-7-1390579837830.txt
+
+By default, prefix is empty and extenstion is ".txt".
+
+
+
+### Sync Policies
+Sync policies allow you to control when buffered data is flushed to the underlying filesystem (thus making it available
+to clients reading the data) by implementing the `org.apache.storm.hdfs.sync.SyncPolicy` interface:
+
+```java
+public interface SyncPolicy extends Serializable {
+    boolean mark(Tuple tuple, long offset);
+    void reset();
+}
+```
+The `HdfsBolt` will call the `mark()` method for every tuple it processes. Returning `true` will trigger the `HdfsBolt`
+to perform a sync/flush, after which it will call the `reset()` method.
+
+The `org.apache.storm.hdfs.sync.CountSyncPolicy` class simply triggers a sync after the specified number of tuples have
+been processed.
+
+### File Rotation Policies
+Similar to sync policies, file rotation policies allow you to control when data files are rotated by providing a
+`org.apache.storm.hdfs.rotation.FileRotation` interface:
+
+```java
+public interface FileRotationPolicy extends Serializable {
+    boolean mark(Tuple tuple, long offset);
+    void reset();
+}
+``` 
+
+The `org.apache.storm.hdfs.rotation.FileSizeRotationPolicy` implementation allows you to trigger file rotation when
+data files reach a specific file size:
+
+```java
+FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
+```
+
+### File Rotation Actions
+Both the HDFS bolt and Trident State implementation allow you to register any number of `RotationAction`s.
+What `RotationAction`s do is provide a hook to allow you to perform some action right after a file is rotated. For
+example, moving a file to a different location or renaming it.
+
+
+```java
+public interface RotationAction extends Serializable {
+    void execute(FileSystem fileSystem, Path filePath) throws IOException;
+}
+```
+
+Storm-HDFS includes a simple action that will move a file after rotation:
+
+```java
+public class MoveFileAction implements RotationAction {
+    private static final Logger LOG = LoggerFactory.getLogger(MoveFileAction.class);
+
+    private String destination;
+
+    public MoveFileAction withDestination(String destDir){
+        destination = destDir;
+        return this;
+    }
+
+    @Override
+    public void execute(FileSystem fileSystem, Path filePath) throws IOException {
+        Path destPath = new Path(destination, filePath.getName());
+        LOG.info("Moving file {} to {}", filePath, destPath);
+        boolean success = fileSystem.rename(filePath, destPath);
+        return;
+    }
+}
+```
+
+If you are using Trident and sequence files you can do something like this:
+
+```java
+        HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions()
+                .withFileNameFormat(fileNameFormat)
+                .withSequenceFormat(new DefaultSequenceFormat("key", "data"))
+                .withRotationPolicy(rotationPolicy)
+                .withFsUrl("hdfs://localhost:54310")
+                .addRotationAction(new MoveFileAction().withDestination("/dest2/"));
+```
+
+
+## Support for HDFS Sequence Files
+
+The `org.apache.storm.hdfs.bolt.SequenceFileBolt` class allows you to write storm data to HDFS sequence files:
+
+```java
+        // sync the filesystem after every 1k tuples
+        SyncPolicy syncPolicy = new CountSyncPolicy(1000);
+
+        // rotate files when they reach 5MB
+        FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
+
+        FileNameFormat fileNameFormat = new DefaultFileNameFormat()
+                .withExtension(".seq")
+                .withPath("/data/");
+
+        // create sequence format instance.
+        DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence");
+
+        SequenceFileBolt bolt = new SequenceFileBolt()
+                .withFsUrl("hdfs://localhost:54310")
+                .withFileNameFormat(fileNameFormat)
+                .withSequenceFormat(format)
+                .withRotationPolicy(rotationPolicy)
+                .withSyncPolicy(syncPolicy)
+                .withCompressionType(SequenceFile.CompressionType.RECORD)
+                .withCompressionCodec("deflate");
+```
+
+The `SequenceFileBolt` requires that you provide a `org.apache.storm.hdfs.bolt.format.SequenceFormat` that maps tuples to
+key/value pairs:
+
+```java
+public interface SequenceFormat extends Serializable {
+    Class keyClass();
+    Class valueClass();
+
+    Writable key(Tuple tuple);
+    Writable value(Tuple tuple);
+}
+```
+
+## Trident API
+storm-hdfs also includes a Trident `state` implementation for writing data to HDFS, with an API that closely mirrors
+that of the bolts.
+
+ ```java
+         Fields hdfsFields = new Fields("field1", "field2");
+
+         FileNameFormat fileNameFormat = new DefaultFileNameFormat()
+                 .withPath("/trident")
+                 .withPrefix("trident")
+                 .withExtension(".txt");
+
+         RecordFormat recordFormat = new DelimitedRecordFormat()
+                 .withFields(hdfsFields);
+
+         FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
+
+        HdfsState.Options options = new HdfsState.HdfsFileOptions()
+                .withFileNameFormat(fileNameFormat)
+                .withRecordFormat(recordFormat)
+                .withRotationPolicy(rotationPolicy)
+                .withFsUrl("hdfs://localhost:54310");
+
+         StateFactory factory = new HdfsStateFactory().withOptions(options);
+
+         TridentState state = stream
+                 .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields());
+ ```
+
+ To use the sequence file `State` implementation, use the `HdfsState.SequenceFileOptions`:
+
+ ```java
+        HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions()
+                .withFileNameFormat(fileNameFormat)
+                .withSequenceFormat(new DefaultSequenceFormat("key", "data"))
+                .withRotationPolicy(rotationPolicy)
+                .withFsUrl("hdfs://localhost:54310")
+                .addRotationAction(new MoveFileAction().toDestination("/dest2/"));
+```
+
+##Working with Secure HDFS
+If your topology is going to interact with secure HDFS, your bolts/states needs to be authenticated by NameNode. We 
+currently have 2 options to support this:
+
+### Using HDFS delegation tokens 
+Your administrator can configure nimbus to automatically get delegation tokens on behalf of the topology submitter user.
+The nimbus need to start with following configurations:
+
+nimbus.autocredential.plugins.classes : ["org.apache.storm.hdfs.common.security.AutoHDFS"] 
+nimbus.credential.renewers.classes : ["org.apache.storm.hdfs.common.security.AutoHDFS"] 
+hdfs.keytab.file: "/path/to/keytab/on/nimbus" (This is the keytab of hdfs super user that can impersonate other users.)
+hdfs.kerberos.principal: "superuser@EXAMPLE.com" 
+nimbus.credential.renewers.freq.secs : 82800 (23 hours, hdfs tokens needs to be renewed every 24 hours so this value should be
+less then 24 hours.)
+topology.hdfs.uri:"hdfs://host:port" (This is an optional config, by default we will use value of "fs.defaultFS" property
+specified in hadoop's core-site.xml)
+
+Your topology configuration should have:
+topology.auto-credentials :["org.apache.storm.hdfs.common.security.AutoHDFS"] 
+
+If nimbus did not have the above configuration you need to add it and then restart it. Ensure the hadoop configuration 
+files(core-site.xml and hdfs-site.xml) and the storm-hdfs jar with all the dependencies is present in nimbus's classpath. 
+Nimbus will use the keytab and principal specified in the config to authenticate with Namenode. From then on for every
+topology submission, nimbus will impersonate the topology submitter user and acquire delegation tokens on behalf of the
+topology submitter user. If topology was started with topology.auto-credentials set to AutoHDFS, nimbus will push the
+delegation tokens to all the workers for your topology and the hdfs bolt/state will authenticate with namenode using 
+these tokens.
+
+As nimbus is impersonating topology submitter user, you need to ensure the user specified in hdfs.kerberos.principal 
+has permissions to acquire tokens on behalf of other users. To achieve this you need to follow configuration directions 
+listed on this link
+http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html
+
+You can read about setting up secure HDFS here: http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/SecureMode.html.
+
+### Using keytabs on all worker hosts
+If you have distributed the keytab files for hdfs user on all potential worker hosts then you can use this method. You should specify a 
+hdfs config key using the method HdfsBolt/State.withconfigKey("somekey") and the value map of this key should have following 2 properties:
+
+hdfs.keytab.file: "/path/to/keytab/"
+hdfs.kerberos.principal: "user@EXAMPLE.com"
+
+On worker hosts the bolt/trident-state code will use the keytab file with principal provided in the config to authenticate with 
+Namenode. This method is little dangerous as you need to ensure all workers have the keytab file at the same location and you need
+to remember this as you bring up new hosts in the cluster.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/storm-hive.md
----------------------------------------------------------------------
diff --git a/docs/storm-hive.md b/docs/storm-hive.md
new file mode 100644
index 0000000..e2dd657
--- /dev/null
+++ b/docs/storm-hive.md
@@ -0,0 +1,111 @@
+---
+title: Storm Hive Integration
+layout: documentation
+documentation: true
+---
+
+  Hive offers streaming API that allows data to be written continuously into Hive. The incoming data 
+  can be continuously committed in small batches of records into existing Hive partition or table. Once the data
+  is committed its immediately visible to all hive queries. More info on Hive Streaming API 
+  https://cwiki.apache.org/confluence/display/Hive/Streaming+Data+Ingest
+  
+  With the help of Hive Streaming API, HiveBolt and HiveState allows users to stream data from Storm into Hive directly.
+  To use Hive streaming API users need to create a bucketed table with ORC format.  Example below
+  
+  ```sql
+  create table test_table ( id INT, name STRING, phone STRING, street STRING) partitioned by (city STRING, state STRING) stored as orc tblproperties ("orc.compress"="NONE");
+  ```
+  
+
+## HiveBolt (org.apache.storm.hive.bolt.HiveBolt)
+
+HiveBolt streams tuples directly into Hive. Tuples are written using Hive Transactions. 
+Partitions to which HiveBolt will stream to can either created or pre-created or optionally
+HiveBolt can create them if they are missing. Fields from Tuples are mapped to table columns.
+User should make sure that Tuple field names are matched to the table column names.
+
+```java
+DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper()
+            .withColumnFields(new Fields(colNames));
+HiveOptions hiveOptions = new HiveOptions(metaStoreURI,dbName,tblName,mapper);
+HiveBolt hiveBolt = new HiveBolt(hiveOptions);
+```
+
+### RecordHiveMapper
+   This class maps Tuple field names to Hive table column names.
+   There are two implementaitons available
+ 
+   
+   + DelimitedRecordHiveMapper (org.apache.storm.hive.bolt.mapper.DelimitedRecordHiveMapper)
+   + JsonRecordHiveMapper (org.apache.storm.hive.bolt.mapper.JsonRecordHiveMapper)
+   
+   ```java
+   DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper()
+            .withColumnFields(new Fields(colNames))
+            .withPartitionFields(new Fields(partNames));
+    or
+   DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper()
+            .withColumnFields(new Fields(colNames))
+            .withTimeAsPartitionField("YYYY/MM/DD");
+   ```
+
+|Arg | Description | Type
+|--- |--- |---
+|withColumnFields| field names in a tuple to be mapped to table column names | Fields (required) |
+|withPartitionFields| field names in a tuple can be mapped to hive table partitions | Fields |
+|withTimeAsPartitionField| users can select system time as partition in hive table| String . Date format|
+
+### HiveOptions (org.apache.storm.hive.common.HiveOptions)
+  
+HiveBolt takes in HiveOptions as a constructor arg.
+
+  ```java
+  HiveOptions hiveOptions = new HiveOptions(metaStoreURI,dbName,tblName,mapper)
+                                .withTxnsPerBatch(10)
+                				.withBatchSize(1000)
+                	     		.withIdleTimeout(10)
+  ```
+
+
+HiveOptions params
+
+|Arg  |Description | Type
+|---	|--- |---
+|metaStoreURI | hive meta store URI (can be found in hive-site.xml) | String (required) |
+|dbName | database name | String (required) |
+|tblName | table name | String (required) |
+|mapper| Mapper class to map Tuple field names to Table column names | DelimitedRecordHiveMapper or JsonRecordHiveMapper (required) |
+|withTxnsPerBatch | Hive grants a *batch of transactions* instead of single transactions to streaming clients like HiveBolt.This setting configures the number of desired transactions per Transaction Batch. Data from all transactions in a single batch end up in a single file. Flume will write a maximum of batchSize events in each transaction in the batch. This setting in conjunction with batchSize provides control over the size of each file. Note that eventually Hive will transparently compact these files into larger files.| Integer . default 100 |
+|withMaxOpenConnections| Allow only this number of open connections. If this number is exceeded, the least recently used connection is closed.| Integer . default 100|
+|withBatchSize| Max number of events written to Hive in a single Hive transaction| Integer. default 15000|
+|withCallTimeout| (In milliseconds) Timeout for Hive & HDFS I/O operations, such as openTxn, write, commit, abort. | Integer. default 10000|
+|withHeartBeatInterval| (In seconds) Interval between consecutive heartbeats sent to Hive to keep unused transactions from expiring. Set this value to 0 to disable heartbeats.| Integer. default 240 |
+|withAutoCreatePartitions| HiveBolt will automatically create the necessary Hive partitions to stream to. |Boolean. default true |
+|withKerberosPrinicipal| Kerberos user principal for accessing secure Hive | String|
+|withKerberosKeytab| Kerberos keytab for accessing secure Hive | String |
+|withTickTupleInterval| (In seconds) If > 0 then the Hive Bolt will periodically flush transaction batches. Enabling this is recommended to avoid tuple timeouts while waiting for a batch to fill up.| Integer. default 0|
+
+
+ 
+## HiveState (org.apache.storm.hive.trident.HiveTrident)
+
+Hive Trident state also follows similar pattern to HiveBolt it takes in HiveOptions as an arg.
+
+```java
+   DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper()
+            .withColumnFields(new Fields(colNames))
+            .withTimeAsPartitionField("YYYY/MM/DD");
+            
+   HiveOptions hiveOptions = new HiveOptions(metaStoreURI,dbName,tblName,mapper)
+                                .withTxnsPerBatch(10)
+                				.withBatchSize(1000)
+                	     		.withIdleTimeout(10)
+                	     		
+   StateFactory factory = new HiveStateFactory().withOptions(hiveOptions);
+   TridentState state = stream.partitionPersist(factory, hiveFields, new HiveUpdater(), new Fields());
+ ```
+   
+
+
+
+ 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/storm-jdbc.md
----------------------------------------------------------------------
diff --git a/docs/storm-jdbc.md b/docs/storm-jdbc.md
new file mode 100644
index 0000000..15aa2a3
--- /dev/null
+++ b/docs/storm-jdbc.md
@@ -0,0 +1,285 @@
+---
+title: Storm JDBC Integration
+layout: documentation
+documentation: true
+---
+
+Storm/Trident integration for JDBC. This package includes the core bolts and trident states that allows a storm topology
+to either insert storm tuples in a database table or to execute select queries against a database and enrich tuples 
+in a storm topology.
+
+**Note**: Throughout the examples below, we make use of com.google.common.collect.Lists and com.google.common.collect.Maps.
+
+## Inserting into a database.
+The bolt and trident state included in this package for inserting data into a database tables are tied to a single table.
+
+### ConnectionProvider
+An interface that should be implemented by different connection pooling mechanism `org.apache.storm.jdbc.common.ConnectionProvider`
+
+```java
+public interface ConnectionProvider extends Serializable {
+    /**
+     * method must be idempotent.
+     */
+    void prepare();
+
+    /**
+     *
+     * @return a DB connection over which the queries can be executed.
+     */
+    Connection getConnection();
+
+    /**
+     * called once when the system is shutting down, should be idempotent.
+     */
+    void cleanup();
+}
+```
+
+Out of the box we support `org.apache.storm.jdbc.common.HikariCPConnectionProvider` which is an implementation that uses HikariCP.
+
+###JdbcMapper
+The main API for inserting data in a table using JDBC is the `org.apache.storm.jdbc.mapper.JdbcMapper` interface:
+
+```java
+public interface JdbcMapper  extends Serializable {
+    List<Column> getColumns(ITuple tuple);
+}
+```
+
+The `getColumns()` method defines how a storm tuple maps to a list of columns representing a row in a database. 
+**The order of the returned list is important. The place holders in the supplied queries are resolved in the same order as returned list.**
+For example if the user supplied insert query is `insert into user(user_id, user_name, create_date) values (?,?, now())` the 1st item 
+of the returned list of `getColumns` method will map to the 1st place holder and the 2nd to the 2nd and so on. We do not parse
+the supplied queries to try and resolve place holder by column names. Not making any assumptions about the query syntax allows this connector
+to be used by some non-standard sql frameworks like Pheonix which only supports upsert into.
+
+### JdbcInsertBolt
+To use the `JdbcInsertBolt`, you construct an instance of it by specifying a `ConnectionProvider` implementation
+and a `JdbcMapper` implementation that converts storm tuple to DB row. In addition, you must either supply
+a table name  using `withTableName` method or an insert query using `withInsertQuery`. 
+If you specify a insert query you should ensure that your `JdbcMapper` implementation will return a list of columns in the same order as in your insert query.
+You can optionally specify a query timeout seconds param that specifies max seconds an insert query can take. 
+The default is set to value of topology.message.timeout.secs and a value of -1 will indicate not to set any query timeout.
+You should set the query timeout value to be <= topology.message.timeout.secs.
+
+ ```java
+Map hikariConfigMap = Maps.newHashMap();
+hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource");
+hikariConfigMap.put("dataSource.url", "jdbc:mysql://localhost/test");
+hikariConfigMap.put("dataSource.user","root");
+hikariConfigMap.put("dataSource.password","password");
+ConnectionProvider connectionProvider = new HikariCPConnectionProvider(hikariConfigMap);
+
+String tableName = "user_details";
+JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(tableName, connectionProvider);
+
+JdbcInsertBolt userPersistanceBolt = new JdbcInsertBolt(connectionProvider, simpleJdbcMapper)
+                                    .withTableName("user")
+                                    .withQueryTimeoutSecs(30);
+                                    Or
+JdbcInsertBolt userPersistanceBolt = new JdbcInsertBolt(connectionProvider, simpleJdbcMapper)
+                                    .withInsertQuery("insert into user values (?,?)")
+                                    .withQueryTimeoutSecs(30);                                    
+ ```
+
+### SimpleJdbcMapper
+`storm-jdbc` includes a general purpose `JdbcMapper` implementation called `SimpleJdbcMapper` that can map Storm
+tuple to a Database row. `SimpleJdbcMapper` assumes that the storm tuple has fields with same name as the column name in 
+the database table that you intend to write to.
+
+To use `SimpleJdbcMapper`, you simply tell it the tableName that you want to write to and provide a connectionProvider instance.
+
+The following code creates a `SimpleJdbcMapper` instance that:
+
+1. Will allow the mapper to transform a storm tuple to a list of columns mapping to a row in table test.user_details.
+2. Will use the provided HikariCP configuration to establish a connection pool with specified Database configuration and
+automatically figure out the column names and corresponding data types of the table that you intend to write to. 
+Please see https://github.com/brettwooldridge/HikariCP#configuration-knobs-baby to learn more about hikari configuration properties.
+
+```java
+Map hikariConfigMap = Maps.newHashMap();
+hikariConfigMap.put("dataSourceClassName","com.mysql.jdbc.jdbc2.optional.MysqlDataSource");
+hikariConfigMap.put("dataSource.url", "jdbc:mysql://localhost/test");
+hikariConfigMap.put("dataSource.user","root");
+hikariConfigMap.put("dataSource.password","password");
+ConnectionProvider connectionProvider = new HikariCPConnectionProvider(hikariConfigMap);
+String tableName = "user_details";
+JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(tableName, connectionProvider);
+```
+The mapper initialized in the example above assumes a storm tuple has value for all the columns of the table you intend to insert data into and its `getColumn`
+method will return the columns in the order in which Jdbc connection instance's `connection.getMetaData().getColumns();` method returns them.
+
+**If you specified your own insert query to `JdbcInsertBolt` you must initialize `SimpleJdbcMapper` with explicit columnschema such that the schema has columns in the same order as your insert queries.**
+For example if your insert query is `Insert into user (user_id, user_name) values (?,?)` then your `SimpleJdbcMapper` should be initialized with the following statements:
+```java
+List<Column> columnSchema = Lists.newArrayList(
+    new Column("user_id", java.sql.Types.INTEGER),
+    new Column("user_name", java.sql.Types.VARCHAR));
+JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(columnSchema);
+```
+
+If your storm tuple only has fields for a subset of columns i.e. if some of the columns in your table have default values and you want to only insert values for columns with no default values you can enforce the behavior by initializing the 
+`SimpleJdbcMapper` with explicit columnschema. For example, if you have a user_details table `create table if not exists user_details (user_id integer, user_name varchar(100), dept_name varchar(100), create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP);`
+In this table the create_time column has a default value. To ensure only the columns with no default values are inserted 
+you can initialize the `jdbcMapper` as below:
+
+```java
+List<Column> columnSchema = Lists.newArrayList(
+    new Column("user_id", java.sql.Types.INTEGER),
+    new Column("user_name", java.sql.Types.VARCHAR),
+    new Column("dept_name", java.sql.Types.VARCHAR));
+JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(columnSchema);
+```
+### JdbcTridentState
+We also support a trident persistent state that can be used with trident topologies. To create a jdbc persistent trident
+state you need to initialize it with the table name or an insert query, the JdbcMapper instance and connection provider instance.
+See the example below:
+
+```java
+JdbcState.Options options = new JdbcState.Options()
+        .withConnectionProvider(connectionProvider)
+        .withMapper(jdbcMapper)
+        .withTableName("user_details")
+        .withQueryTimeoutSecs(30);
+JdbcStateFactory jdbcStateFactory = new JdbcStateFactory(options);
+```
+similar to `JdbcInsertBolt` you can specify a custom insert query using `withInsertQuery` instead of specifying a table name.
+
+## Lookup from Database
+We support `select` queries from databases to allow enrichment of storm tuples in a topology. The main API for 
+executing select queries against a database using JDBC is the `org.apache.storm.jdbc.mapper.JdbcLookupMapper` interface:
+
+```java
+    void declareOutputFields(OutputFieldsDeclarer declarer);
+    List<Column> getColumns(ITuple tuple);
+    List<Values> toTuple(ITuple input, List<Column> columns);
+```
+
+The `declareOutputFields` method is used to indicate what fields will be emitted as part of output tuple of processing a storm 
+tuple. 
+
+The `getColumns` method specifies the place holder columns in a select query and their SQL type and the value to use.
+For example in the user_details table mentioned above if you were executing a query `select user_name from user_details where
+user_id = ? and create_time > ?` the `getColumns` method would take a storm input tuple and return a List containing two items.
+The first instance of `Column` type's `getValue()` method will be used as the value of `user_id` to lookup for and the
+second instance of `Column` type's `getValue()` method will be used as the value of `create_time`.
+**Note: the order in the returned list determines the place holder's value. In other words the first item in the list maps 
+to first `?` in select query, the second item to second `?` in query and so on.** 
+
+The `toTuple` method takes in the input tuple and a list of columns representing a DB row as a result of the select query
+and returns a list of values to be emitted. 
+**Please note that it returns a list of `Values` and not just a single instance of `Values`.** 
+This allows a for a single DB row to be mapped to multiple output storm tuples.
+
+###SimpleJdbcLookupMapper
+`storm-jdbc` includes a general purpose `JdbcLookupMapper` implementation called `SimpleJdbcLookupMapper`. 
+
+To use `SimpleJdbcMapper`, you have to initialize it with the fields that will be outputted by your bolt and the list of
+columns that are used in your select query as place holder. The following example shows initialization of a `SimpleJdbcLookupMapper`
+that declares `user_id,user_name,create_date` as output fields and `user_id` as the place holder column in select query.
+SimpleJdbcMapper assumes the field name in your tuple is equal to the place holder column name, i.e. in our example 
+`SimpleJdbcMapper` will look for a field `use_id` in the input tuple and use its value as the place holder's value in the
+select query. For constructing output tuples, it looks for fields specified in `outputFields` in the input tuple first, 
+and if it is not found in input tuple then it looks at select query's output row for a column with same name as field name. 
+So in the example below if the input tuple had fields `user_id, create_date` and the select query was 
+`select user_name from user_details where user_id = ?`, For each input tuple `SimpleJdbcLookupMapper.getColumns(tuple)` 
+will return the value of `tuple.getValueByField("user_id")` which will be used as the value in `?` of select query. 
+For each output row from DB, `SimpleJdbcLookupMapper.toTuple()` will use the `user_id, create_date` from the input tuple as 
+is adding only `user_name` from the resulting row and returning these 3 fields as a single output tuple.
+
+```java
+Fields outputFields = new Fields("user_id", "user_name", "create_date");
+List<Column> queryParamColumns = Lists.newArrayList(new Column("user_id", Types.INTEGER));
+this.jdbcLookupMapper = new SimpleJdbcLookupMapper(outputFields, queryParamColumns);
+```
+
+### JdbcLookupBolt
+To use the `JdbcLookupBolt`, construct an instance of it using a `ConnectionProvider` instance, `JdbcLookupMapper` instance and the select query to execute.
+You can optionally specify a query timeout seconds param that specifies max seconds the select query can take. 
+The default is set to value of topology.message.timeout.secs. You should set this value to be <= topology.message.timeout.secs.
+
+```java
+String selectSql = "select user_name from user_details where user_id = ?";
+SimpleJdbcLookupMapper lookupMapper = new SimpleJdbcLookupMapper(outputFields, queryParamColumns)
+JdbcLookupBolt userNameLookupBolt = new JdbcLookupBolt(connectionProvider, selectSql, lookupMapper)
+        .withQueryTimeoutSecs(30);
+```
+
+### JdbcTridentState for lookup
+We also support a trident query state that can be used with trident topologies. 
+
+```java
+JdbcState.Options options = new JdbcState.Options()
+        .withConnectionProvider(connectionProvider)
+        .withJdbcLookupMapper(new SimpleJdbcLookupMapper(new Fields("user_name"), Lists.newArrayList(new Column("user_id", Types.INTEGER))))
+        .withSelectQuery("select user_name from user_details where user_id = ?");
+        .withQueryTimeoutSecs(30);
+```
+
+## Example:
+A runnable example can be found in the `src/test/java/topology` directory.
+
+### Setup
+* Ensure you have included JDBC implementation dependency for your chosen database as part of your build configuration.
+* The test topologies executes the following queries so your intended DB must support these queries for test topologies
+to work. 
+```SQL
+create table if not exists user (user_id integer, user_name varchar(100), dept_name varchar(100), create_date date);
+create table if not exists department (dept_id integer, dept_name varchar(100));
+create table if not exists user_department (user_id integer, dept_id integer);
+insert into department values (1, 'R&D');
+insert into department values (2, 'Finance');
+insert into department values (3, 'HR');
+insert into department values (4, 'Sales');
+insert into user_department values (1, 1);
+insert into user_department values (2, 2);
+insert into user_department values (3, 3);
+insert into user_department values (4, 4);
+select dept_name from department, user_department where department.dept_id = user_department.dept_id and user_department.user_id = ?;
+```
+### Execution
+Run the `org.apache.storm.jdbc.topology.UserPersistanceTopology` class using storm jar command. The class expects 5 args
+storm jar org.apache.storm.jdbc.topology.UserPersistanceTopology <dataSourceClassName> <dataSource.url> <user> <password> [topology name]
+
+To make it work with Mysql, you can add the following to the pom.xml
+
+```
+<dependency>
+    <groupId>mysql</groupId>
+    <artifactId>mysql-connector-java</artifactId>
+    <version>5.1.31</version>
+</dependency>
+```
+
+You can generate a single jar with dependencies using mvn assembly plugin. To use the plugin add the following to your pom.xml and execute 
+`mvn clean compile assembly:single`
+
+```
+<plugin>
+    <artifactId>maven-assembly-plugin</artifactId>
+    <configuration>
+        <archive>
+            <manifest>
+                <mainClass>fully.qualified.MainClass</mainClass>
+            </manifest>
+        </archive>
+        <descriptorRefs>
+            <descriptorRef>jar-with-dependencies</descriptorRef>
+        </descriptorRefs>
+    </configuration>
+</plugin>
+```
+
+Mysql Example:
+```
+storm jar ~/repo/incubator-storm/external/storm-jdbc/target/storm-jdbc-0.10.0-SNAPSHOT-jar-with-dependencies.jar org.apache.storm.jdbc.topology.UserPersistanceTopology  com.mysql.jdbc.jdbc2.optional.MysqlDataSource jdbc:mysql://localhost/test root password UserPersistenceTopology
+```
+
+You can execute a select query against the user table which should show newly inserted rows:
+
+```
+select * from user;
+```
+
+For trident you can view `org.apache.storm.jdbc.topology.UserPersistanceTridentTopology`.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/storm-kafka.md
----------------------------------------------------------------------
diff --git a/docs/storm-kafka.md b/docs/storm-kafka.md
new file mode 100644
index 0000000..46a2b89
--- /dev/null
+++ b/docs/storm-kafka.md
@@ -0,0 +1,287 @@
+---
+title: Storm Kafka Integration
+layout: documentation
+documentation: true
+---
+
+Provides core Storm and Trident spout implementations for consuming data from Apache Kafka 0.8.x.
+
+##Spouts
+We support both Trident and core Storm spouts. For both spout implementations, we use a BrokerHost interface that
+tracks Kafka broker host to partition mapping and kafkaConfig that controls some Kafka related parameters.
+ 
+###BrokerHosts
+In order to initialize your Kafka spout/emitter you need to construct an instance of the marker interface BrokerHosts. 
+Currently, we support the following two implementations:
+
+####ZkHosts
+ZkHosts is what you should use if you want to dynamically track Kafka broker to partition mapping. This class uses 
+Kafka's ZooKeeper entries to track brokerHost -> partition mapping. You can instantiate an object by calling
+```java
+    public ZkHosts(String brokerZkStr, String brokerZkPath) 
+    public ZkHosts(String brokerZkStr)
+```
+Where brokerZkStr is just ip:port (e.g. localhost:2181). brokerZkPath is the root directory under which all the topics and
+partition information is stored. By default this is /brokers which is what the default Kafka implementation uses.
+
+By default, the broker-partition mapping is refreshed every 60 seconds from ZooKeeper. If you want to change it, you
+should set host.refreshFreqSecs to your chosen value.
+
+####StaticHosts
+This is an alternative implementation where broker -> partition information is static. In order to construct an instance
+of this class, you need to first construct an instance of GlobalPartitionInformation.
+
+```java
+    Broker brokerForPartition0 = new Broker("localhost");//localhost:9092
+    Broker brokerForPartition1 = new Broker("localhost", 9092);//localhost:9092 but we specified the port explicitly
+    Broker brokerForPartition2 = new Broker("localhost:9092");//localhost:9092 specified as one string.
+    GlobalPartitionInformation partitionInfo = new GlobalPartitionInformation();
+    partitionInfo.addPartition(0, brokerForPartition0);//mapping from partition 0 to brokerForPartition0
+    partitionInfo.addPartition(1, brokerForPartition1);//mapping from partition 1 to brokerForPartition1
+    partitionInfo.addPartition(2, brokerForPartition2);//mapping from partition 2 to brokerForPartition2
+    StaticHosts hosts = new StaticHosts(partitionInfo);
+```
+
+###KafkaConfig
+The second thing needed for constructing a kafkaSpout is an instance of KafkaConfig. 
+```java
+    public KafkaConfig(BrokerHosts hosts, String topic)
+    public KafkaConfig(BrokerHosts hosts, String topic, String clientId)
+```
+
+The BrokerHosts can be any implementation of BrokerHosts interface as described above. The topic is name of Kafka topic.
+The optional ClientId is used as a part of the ZooKeeper path where the spout's current consumption offset is stored.
+
+There are 2 extensions of KafkaConfig currently in use.
+
+Spoutconfig is an extension of KafkaConfig that supports additional fields with ZooKeeper connection info and for controlling
+behavior specific to KafkaSpout. The Zkroot will be used as root to store your consumer's offset. The id should uniquely
+identify your spout.
+```java
+public SpoutConfig(BrokerHosts hosts, String topic, String zkRoot, String id);
+public SpoutConfig(BrokerHosts hosts, String topic, String id);
+```
+In addition to these parameters, SpoutConfig contains the following fields that control how KafkaSpout behaves:
+```java
+    // setting for how often to save the current Kafka offset to ZooKeeper
+    public long stateUpdateIntervalMs = 2000;
+
+    // Exponential back-off retry settings.  These are used when retrying messages after a bolt
+    // calls OutputCollector.fail().
+    // Note: be sure to set backtype.storm.Config.MESSAGE_TIMEOUT_SECS appropriately to prevent
+    // resubmitting the message while still retrying.
+    public long retryInitialDelayMs = 0;
+    public double retryDelayMultiplier = 1.0;
+    public long retryDelayMaxMs = 60 * 1000;
+
+    // if set to true, spout will set Kafka topic as the emitted Stream ID
+    public boolean topicAsStreamId = false;
+```
+Core KafkaSpout only accepts an instance of SpoutConfig.
+
+TridentKafkaConfig is another extension of KafkaConfig.
+TridentKafkaEmitter only accepts TridentKafkaConfig.
+
+The KafkaConfig class also has bunch of public variables that controls your application's behavior. Here are defaults:
+```java
+    public int fetchSizeBytes = 1024 * 1024;
+    public int socketTimeoutMs = 10000;
+    public int fetchMaxWait = 10000;
+    public int bufferSizeBytes = 1024 * 1024;
+    public MultiScheme scheme = new RawMultiScheme();
+    public boolean ignoreZkOffsets = false;
+    public long startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
+    public long maxOffsetBehind = Long.MAX_VALUE;
+    public boolean useStartOffsetTimeIfOffsetOutOfRange = true;
+    public int metricsTimeBucketSizeInSecs = 60;
+```
+
+Most of them are self explanatory except MultiScheme.
+###MultiScheme
+MultiScheme is an interface that dictates how the byte[] consumed from Kafka gets transformed into a storm tuple. It
+also controls the naming of your output field.
+
+```java
+  public Iterable<List<Object>> deserialize(byte[] ser);
+  public Fields getOutputFields();
+```
+
+The default `RawMultiScheme` just takes the `byte[]` and returns a tuple with `byte[]` as is. The name of the
+outputField is "bytes".  There are alternative implementation like `SchemeAsMultiScheme` and
+`KeyValueSchemeAsMultiScheme` which can convert the `byte[]` to `String`.
+
+
+### Examples
+
+#### Core Spout
+
+```java
+BrokerHosts hosts = new ZkHosts(zkConnString);
+SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/" + topicName, UUID.randomUUID().toString());
+spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
+KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
+```
+
+#### Trident Spout
+```java
+TridentTopology topology = new TridentTopology();
+BrokerHosts zk = new ZkHosts("localhost");
+TridentKafkaConfig spoutConf = new TridentKafkaConfig(zk, "test-topic");
+spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
+OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpout(spoutConf);
+```
+
+
+### How KafkaSpout stores offsets of a Kafka topic and recovers in case of failures
+
+As shown in the above KafkaConfig properties, you can control from where in the Kafka topic the spout begins to read by
+setting `KafkaConfig.startOffsetTime` as follows:
+
+1. `kafka.api.OffsetRequest.EarliestTime()`:  read from the beginning of the topic (i.e. from the oldest messages onwards)
+2. `kafka.api.OffsetRequest.LatestTime()`: read from the end of the topic (i.e. any new messsages that are being written to the topic)
+3. A Unix timestamp aka seconds since the epoch (e.g. via `System.currentTimeMillis()`):
+   see [How do I accurately get offsets of messages for a certain timestamp using OffsetRequest?](https://cwiki.apache.org/confluence/display/KAFKA/FAQ#FAQ-HowdoIaccuratelygetoffsetsofmessagesforacertaintimestampusingOffsetRequest?) in the Kafka FAQ
+
+As the topology runs the Kafka spout keeps track of the offsets it has read and emitted by storing state information
+under the ZooKeeper path `SpoutConfig.zkRoot+ "/" + SpoutConfig.id`.  In the case of failures it recovers from the last
+written offset in ZooKeeper.
+
+> **Important:**  When re-deploying a topology make sure that the settings for `SpoutConfig.zkRoot` and `SpoutConfig.id`
+> were not modified, otherwise the spout will not be able to read its previous consumer state information (i.e. the
+> offsets) from ZooKeeper -- which may lead to unexpected behavior and/or to data loss, depending on your use case.
+
+This means that when a topology has run once the setting `KafkaConfig.startOffsetTime` will not have an effect for
+subsequent runs of the topology because now the topology will rely on the consumer state information (offsets) in
+ZooKeeper to determine from where it should begin (more precisely: resume) reading.
+If you want to force the spout to ignore any consumer state information stored in ZooKeeper, then you should
+set the parameter `KafkaConfig.ignoreZkOffsets` to `true`.  If `true`, the spout will always begin reading from the
+offset defined by `KafkaConfig.startOffsetTime` as described above.
+
+
+## Using storm-kafka with different versions of Scala
+
+Storm-kafka's Kafka dependency is defined as `provided` scope in maven, meaning it will not be pulled in
+as a transitive dependency. This allows you to use a version of Kafka built against a specific Scala version.
+
+When building a project with storm-kafka, you must explicitly add the Kafka dependency. For example, to
+use Kafka 0.8.1.1 built against Scala 2.10, you would use the following dependency in your `pom.xml`:
+
+```xml
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka_2.10</artifactId>
+            <version>0.8.1.1</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.apache.zookeeper</groupId>
+                    <artifactId>zookeeper</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>log4j</groupId>
+                    <artifactId>log4j</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+```
+
+Note that the ZooKeeper and log4j dependencies are excluded to prevent version conflicts with Storm's dependencies.
+
+##Writing to Kafka as part of your topology
+You can create an instance of storm.kafka.bolt.KafkaBolt and attach it as a component to your topology or if you 
+are using trident you can use storm.kafka.trident.TridentState, storm.kafka.trident.TridentStateFactory and
+storm.kafka.trident.TridentKafkaUpdater.
+
+You need to provide implementation of following 2 interfaces
+
+###TupleToKafkaMapper and TridentTupleToKafkaMapper
+These interfaces have 2 methods defined:
+
+```java
+    K getKeyFromTuple(Tuple/TridentTuple tuple);
+    V getMessageFromTuple(Tuple/TridentTuple tuple);
+```
+
+As the name suggests, these methods are called to map a tuple to Kafka key and Kafka message. If you just want one field
+as key and one field as value, then you can use the provided FieldNameBasedTupleToKafkaMapper.java 
+implementation. In the KafkaBolt, the implementation always looks for a field with field name "key" and "message" if you 
+use the default constructor to construct FieldNameBasedTupleToKafkaMapper for backward compatibility 
+reasons. Alternatively you could also specify a different key and message field by using the non default constructor.
+In the TridentKafkaState you must specify what is the field name for key and message as there is no default constructor.
+These should be specified while constructing and instance of FieldNameBasedTupleToKafkaMapper.
+
+###KafkaTopicSelector and trident KafkaTopicSelector
+This interface has only one method
+```java
+public interface KafkaTopicSelector {
+    String getTopics(Tuple/TridentTuple tuple);
+}
+```
+The implementation of this interface should return the topic to which the tuple's key/message mapping needs to be published 
+You can return a null and the message will be ignored. If you have one static topic name then you can use 
+DefaultTopicSelector.java and set the name of the topic in the constructor.
+
+### Specifying Kafka producer properties
+You can provide all the produce properties , see http://kafka.apache.org/documentation.html#producerconfigs 
+section "Important configuration properties for the producer", in your Storm topology config by setting the properties
+map with key kafka.broker.properties.
+
+###Putting it all together
+
+For the bolt :
+```java
+        TopologyBuilder builder = new TopologyBuilder();
+    
+        Fields fields = new Fields("key", "message");
+        FixedBatchSpout spout = new FixedBatchSpout(fields, 4,
+                    new Values("storm", "1"),
+                    new Values("trident", "1"),
+                    new Values("needs", "1"),
+                    new Values("javadoc", "1")
+        );
+        spout.setCycle(true);
+        builder.setSpout("spout", spout, 5);
+        KafkaBolt bolt = new KafkaBolt()
+                .withTopicSelector(new DefaultTopicSelector("test"))
+                .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper());
+        builder.setBolt("forwardToKafka", bolt, 8).shuffleGrouping("spout");
+        
+        Config conf = new Config();
+        //set producer properties.
+        Properties props = new Properties();
+        props.put("metadata.broker.list", "localhost:9092");
+        props.put("request.required.acks", "1");
+        props.put("serializer.class", "kafka.serializer.StringEncoder");
+        conf.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
+        
+        StormSubmitter.submitTopology("kafkaboltTest", conf, builder.createTopology());
+```
+
+For Trident:
+
+```java
+        Fields fields = new Fields("word", "count");
+        FixedBatchSpout spout = new FixedBatchSpout(fields, 4,
+                new Values("storm", "1"),
+                new Values("trident", "1"),
+                new Values("needs", "1"),
+                new Values("javadoc", "1")
+        );
+        spout.setCycle(true);
+
+        TridentTopology topology = new TridentTopology();
+        Stream stream = topology.newStream("spout1", spout);
+
+        TridentKafkaStateFactory stateFactory = new TridentKafkaStateFactory()
+                .withKafkaTopicSelector(new DefaultTopicSelector("test"))
+                .withTridentTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("word", "count"));
+        stream.partitionPersist(stateFactory, fields, new TridentKafkaUpdater(), new Fields());
+
+        Config conf = new Config();
+        //set producer properties.
+        Properties props = new Properties();
+        props.put("metadata.broker.list", "localhost:9092");
+        props.put("request.required.acks", "1");
+        props.put("serializer.class", "kafka.serializer.StringEncoder");
+        conf.put(TridentKafkaState.KAFKA_BROKER_PROPERTIES, props);
+        StormSubmitter.submitTopology("kafkaTridentTest", conf, topology.build());
+```

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/storm-redis.md
----------------------------------------------------------------------
diff --git a/docs/storm-redis.md b/docs/storm-redis.md
new file mode 100644
index 0000000..adbac68
--- /dev/null
+++ b/docs/storm-redis.md
@@ -0,0 +1,258 @@
+---
+title: Storm Redis Integration
+layout: documentation
+documentation: true
+---
+
+Storm/Trident integration for [Redis](http://redis.io/)
+
+Storm-redis uses Jedis for Redis client.
+
+## Usage
+
+### How do I use it?
+
+use it as a maven dependency:
+
+```xml
+<dependency>
+    <groupId>org.apache.storm</groupId>
+    <artifactId>storm-redis</artifactId>
+    <version>${storm.version}</version>
+    <type>jar</type>
+</dependency>
+```
+
+### For normal Bolt
+
+Storm-redis provides basic Bolt implementations, ```RedisLookupBolt``` and ```RedisStoreBolt```.
+
+As name represents its usage, ```RedisLookupBolt``` retrieves value from Redis using key, and ```RedisStoreBolt``` stores key / value to Redis. One tuple will be matched to one key / value pair, and you can define match pattern to ```TupleMapper```.
+
+You can also choose data type from ```RedisDataTypeDescription``` to use. Please refer ```RedisDataTypeDescription.RedisDataType``` to see what data types are supported. In some data types (hash and sorted set), it requires additional key and converted key from tuple becomes element.
+
+These interfaces are combined with ```RedisLookupMapper``` and ```RedisStoreMapper``` which fit ```RedisLookupBolt``` and ```RedisStoreBolt``` respectively.
+
+#### RedisLookupBolt example
+
+```java
+
+class WordCountRedisLookupMapper implements RedisLookupMapper {
+    private RedisDataTypeDescription description;
+    private final String hashKey = "wordCount";
+
+    public WordCountRedisLookupMapper() {
+        description = new RedisDataTypeDescription(
+                RedisDataTypeDescription.RedisDataType.HASH, hashKey);
+    }
+
+    @Override
+    public List<Values> toTuple(ITuple input, Object value) {
+        String member = getKeyFromTuple(input);
+        List<Values> values = Lists.newArrayList();
+        values.add(new Values(member, value));
+        return values;
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("wordName", "count"));
+    }
+
+    @Override
+    public RedisDataTypeDescription getDataTypeDescription() {
+        return description;
+    }
+
+    @Override
+    public String getKeyFromTuple(ITuple tuple) {
+        return tuple.getStringByField("word");
+    }
+
+    @Override
+    public String getValueFromTuple(ITuple tuple) {
+        return null;
+    }
+}
+
+```
+
+```java
+
+JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()
+        .setHost(host).setPort(port).build();
+RedisLookupMapper lookupMapper = new WordCountRedisLookupMapper();
+RedisLookupBolt lookupBolt = new RedisLookupBolt(poolConfig, lookupMapper);
+```
+
+#### RedisStoreBolt example
+
+```java
+
+class WordCountStoreMapper implements RedisStoreMapper {
+    private RedisDataTypeDescription description;
+    private final String hashKey = "wordCount";
+
+    public WordCountStoreMapper() {
+        description = new RedisDataTypeDescription(
+            RedisDataTypeDescription.RedisDataType.HASH, hashKey);
+    }
+
+    @Override
+    public RedisDataTypeDescription getDataTypeDescription() {
+        return description;
+    }
+
+    @Override
+    public String getKeyFromTuple(ITuple tuple) {
+        return tuple.getStringByField("word");
+    }
+
+    @Override
+    public String getValueFromTuple(ITuple tuple) {
+        return tuple.getStringByField("count");
+    }
+}
+```
+
+```java
+
+JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()
+                .setHost(host).setPort(port).build();
+RedisStoreMapper storeMapper = new WordCountStoreMapper();
+RedisStoreBolt storeBolt = new RedisStoreBolt(poolConfig, storeMapper);
+```
+
+### For non-simple Bolt
+
+If your scenario doesn't fit ```RedisStoreBolt``` and ```RedisLookupBolt```, storm-redis also provides ```AbstractRedisBolt``` to let you extend and apply your business logic.
+
+```java
+
+    public static class LookupWordTotalCountBolt extends AbstractRedisBolt {
+        private static final Logger LOG = LoggerFactory.getLogger(LookupWordTotalCountBolt.class);
+        private static final Random RANDOM = new Random();
+
+        public LookupWordTotalCountBolt(JedisPoolConfig config) {
+            super(config);
+        }
+
+        public LookupWordTotalCountBolt(JedisClusterConfig config) {
+            super(config);
+        }
+
+        @Override
+        public void execute(Tuple input) {
+            JedisCommands jedisCommands = null;
+            try {
+                jedisCommands = getInstance();
+                String wordName = input.getStringByField("word");
+                String countStr = jedisCommands.get(wordName);
+                if (countStr != null) {
+                    int count = Integer.parseInt(countStr);
+                    this.collector.emit(new Values(wordName, count));
+
+                    // print lookup result with low probability
+                    if(RANDOM.nextInt(1000) > 995) {
+                        LOG.info("Lookup result - word : " + wordName + " / count : " + count);
+                    }
+                } else {
+                    // skip
+                    LOG.warn("Word not found in Redis - word : " + wordName);
+                }
+            } finally {
+                if (jedisCommands != null) {
+                    returnInstance(jedisCommands);
+                }
+                this.collector.ack(input);
+            }
+        }
+
+        @Override
+        public void declareOutputFields(OutputFieldsDeclarer declarer) {
+            // wordName, count
+            declarer.declare(new Fields("wordName", "count"));
+        }
+    }
+
+```
+
+### Trident State usage
+
+1. RedisState and RedisMapState, which provide Jedis interface just for single redis.
+
+2. RedisClusterState and RedisClusterMapState, which provide JedisCluster interface, just for redis cluster.
+
+RedisState
+```java
+        JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()
+                                        .setHost(redisHost).setPort(redisPort)
+                                        .build();
+        RedisStoreMapper storeMapper = new WordCountStoreMapper();
+        RedisLookupMapper lookupMapper = new WordCountLookupMapper();
+        RedisState.Factory factory = new RedisState.Factory(poolConfig);
+
+        TridentTopology topology = new TridentTopology();
+        Stream stream = topology.newStream("spout1", spout);
+
+        stream.partitionPersist(factory,
+                                fields,
+                                new RedisStateUpdater(storeMapper).withExpire(86400000),
+                                new Fields());
+
+        TridentState state = topology.newStaticState(factory);
+        stream = stream.stateQuery(state, new Fields("word"),
+                                new RedisStateQuerier(lookupMapper),
+                                new Fields("columnName","columnValue"));
+```
+
+RedisClusterState
+```java
+        Set<InetSocketAddress> nodes = new HashSet<InetSocketAddress>();
+        for (String hostPort : redisHostPort.split(",")) {
+            String[] host_port = hostPort.split(":");
+            nodes.add(new InetSocketAddress(host_port[0], Integer.valueOf(host_port[1])));
+        }
+        JedisClusterConfig clusterConfig = new JedisClusterConfig.Builder().setNodes(nodes)
+                                        .build();
+        RedisStoreMapper storeMapper = new WordCountStoreMapper();
+        RedisLookupMapper lookupMapper = new WordCountLookupMapper();
+        RedisClusterState.Factory factory = new RedisClusterState.Factory(clusterConfig);
+
+        TridentTopology topology = new TridentTopology();
+        Stream stream = topology.newStream("spout1", spout);
+
+        stream.partitionPersist(factory,
+                                fields,
+                                new RedisClusterStateUpdater(storeMapper).withExpire(86400000),
+                                new Fields());
+
+        TridentState state = topology.newStaticState(factory);
+        stream = stream.stateQuery(state, new Fields("word"),
+                                new RedisClusterStateQuerier(lookupMapper),
+                                new Fields("columnName","columnValue"));
+```
+
+## License
+
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+
+## Committer Sponsors
+
+ * Robert Evans ([@revans2](https://github.com/revans2))
+ * Jungtaek Lim ([@HeartSaVioR](https://github.com/HeartSaVioR))


[03/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/js/bootstrap.min.js
----------------------------------------------------------------------
diff --git a/docs/assets/js/bootstrap.min.js b/docs/assets/js/bootstrap.min.js
index d839865..133aeec 100644
--- a/docs/assets/js/bootstrap.min.js
+++ b/docs/assets/js/bootstrap.min.js
@@ -1,7 +1,7 @@
 /*!
- * Bootstrap v3.3.1 (http://getbootstrap.com)
- * Copyright 2011-2014 Twitter, Inc.
- * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * Bootstrap v3.3.5 (http://getbootstrap.com)
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under the MIT license
  */
-if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){return a(b.target).is(this)?b.handleObj.handler.apply
 (this,arguments):void 0}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.1",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a(f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict"
 ;function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.1",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")&&(c.prop("checked")&&this.$element.hasClass("active")?a=!1:b.find(".active").removeClass("active")),a&&c.prop("checked",!this.$ele
 ment.hasClass("active")).trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active"));a&&this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target);d.hasClass("btn")||(d=d.closest(".btn")),b.call(d,"toggle"),c.preventDefault()}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=thi
 s.$element.find(".carousel-indicators"),this.options=c,this.paused=this.sliding=this.interval=this.$active=this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.1",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".it
 em"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c="prev"==a?-1:1,d=this.getItemIndex(b),e=(d+c)%this.$items.length;return this.$items.eq(e)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));return a>this.$items.length-1||0>a?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){return this.sliding?void 0:this.slide("next")},c.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"
 ==b?"left":"right",i="next"==b?"first":"last",j=this;if(!f.length){if(!this.options.wrap)return;f=this.$element.find(".item")[i]()}if(f.hasClass("active"))return this.sliding=!1;var k=f[0],l=a.Event("slide.bs.carousel",{relatedTarget:k,direction:h});if(this.$element.trigger(l),!l.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var m=a(this.$indicators.children()[this.getItemIndex(f)]);m&&m.addClass("active")}var n=a.Event("slid.bs.carousel",{relatedTarget:k,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),j.sliding=!1,setTimeout(function(){j.$element.trigger(n)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trig
 ger(n)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&"show"==b&&(f.toggle=!1),e||c.
 data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a(this.options.trigger).filter('[href="#'+b.id+'"], [data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.1",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0,trigger:'[data-toggle="collapse"]'},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.find("> .panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.di
 mension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transit
 ioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=
 f.data("bs.collapse"),h=g?"toggle":a.extend({},e.data(),{trigger:this});c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){b&&3===b.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=c(d),f={relatedTarget:this};e.hasClass("open")&&(e.trigger(b=a.Event("hide.bs.dropdown",f)),b.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger("hidden.bs.dropdown",f)))}))}function c(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.1",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=c(e),g=f.hasClass("open");if(b(),!g){"ontouchstart
 "in document.documentElement&&!f.closest(".navbar-nav").length&&a('<div class="dropdown-backdrop"/>').insertAfter(a(this)).on("click",b);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger("shown.bs.dropdown",h)}return!1}},g.prototype.keydown=function(b){if(/(38|40|27|32)/.test(b.which)&&!/input|textarea/i.test(b.target.tagName)){var d=a(this);if(b.preventDefault(),b.stopPropagation(),!d.is(".disabled, :disabled")){var e=c(d),g=e.hasClass("open");if(!g&&27!=b.which||g&&27==b.which)return 27==b.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.divider):visible a",i=e.find('[role="menu"]'+h+', [role="listbox"]'+h);if(i.length){var j=i.index(b.target);38==b.which&&j>0&&j--,40==b.which&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){
 return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",b).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f,g.prototype.keydown).on("keydown.bs.dropdown.data-api",'[role="menu"]',g.prototype.keydown).on("keydown.bs.dropdown.data-api",'[role="listbox"]',g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$backdrop=this.isShown=null,this.scrollbarWidth=0,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.3.1",c.TRANSITION_DURATI
 ON=300,c.BACKDROP_TRANSITION_DURATION=150,c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var d=this,e=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(e),this.isShown||e.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.backdrop(function(){var e=a.support.transition&&d.$element.hasClass("fade");d.$element.parent().length||d.$element.appendTo(d.$body),d.$element.show().scrollTop(0),d.options.backdrop&&d.adjustBackdrop(),d.adjustDialog(),e&&d.$element[0].offsetWidth,d.$element.addClass("in").attr("aria-hidden",!1),d.enforceFocus();var f=a.Event("shown.bs.modal",{relatedTarget:b});e?d.$element.find(".modal-dialog").one("bsTransitionEnd",function(){d.$element.trigger("focus").trigge
 r(f)}).emulateTransitionEnd(c.TRANSITION_DURATION):d.$element.trigger("focus").trigger(f)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").attr("aria-hidden",!0).off("click.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(c.TRANSITION_DURATION):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off
 ("keydown.dismiss.bs.modal")},c.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$body.removeClass("modal-open"),a.resetAdjustments(),a.resetScrollbar(),a.$element.trigger("hidden.bs.modal")})},c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var d=this,e=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var f=a.support.transition&&e;if(this.$backdrop=a('<div class="modal-backdrop '+e+'" />').prependTo(this.$element).on("click.dismiss.bs.modal",a.proxy(function(a){a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus.call(this.$element[0]):this.hide.call(this))},this)),f&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;f?this.$backdrop.one("
 bsTransitionEnd",b).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var g=function(){d.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",g).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):g()}else b&&b()},c.prototype.handleUpdate=function(){this.options.backdrop&&this.adjustBackdrop(),this.adjustDialog()},c.prototype.adjustBackdrop=function(){this.$backdrop.css("height",0).css("height",this.$element[0].scrollHeight)},c.prototype.adjustDialog=function(){var a=this.$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){this.bodyIsOverflowing=document.body.
 scrollHeight>document.documentElement.clientHeight,this.scrollbarWidth=this.measureScrollbar()},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.bodyIsOverflowing&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding-right","")},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.append(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevente
 d()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b,g=f&&f.selector;(e||"destroy"!=b)&&(g?(e||d.data("bs.tooltip",e={}),e[g]||(e[g]=new c(this,f))):e||d.data("bs.tooltip",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.type=this.options=this.enabled=this.timeout=this.hoverState=this.$element=null,this.init("tooltip",a,b)};c.VERSION="3.3.1",c.TRANSITION_DURATION=150,c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport
 &&a(this.options.viewport.selector||this.options.viewport);for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter
 =function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c&&c.$tip&&c.$tip.is(":visible")?void(c.hoverState="in"):(c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.
 contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.options.container?a(this.options.container):this.$element.parent(),p=this.getPosition(o);h="bottom"==h&&k.bottom+m>p.bottom?"top":"top"==h&&k.top-m<p.top?"bottom":"right"==h&&k.right+l>p.width?"left":"left"==h&&k.left-l<p.left?"right":h,f.removeClass(n).addClass(h)}var q=this
 .getCalculatedOffset(h,k,l,m);this.applyPlacement(q,h);var r=function(){var a=e.hoverState;e.$element.trigger("shown.bs."+e.type),e.hoverState=null,"out"==a&&e.leave(e)};a.support.transition&&this.$tip.hasClass("fade")?f.one("bsTransitionEnd",r).emulateTransitionEnd(c.TRANSITION_DURATION):r()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top=b.top+g,b.left=b.left+h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewportAdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=/top|bottom/.test(c),m=l?2*k.left-e+i:2*k.top-f+j,n=l?"offsetWidth":"offsetHeight";d.offset(b),this.replaceArrow(m,d[0][n],l)},c.prototype.replaceArrow=function(a,b,c){this.arr
 ow().css(c?"left":"top",50*(1-a/b)+"%").css(c?"top":"left","")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(b){function d(){"in"!=e.hoverState&&f.detach(),e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),b&&b()}var e=this,f=this.tip(),g=a.Event("hide.bs."+this.type);return this.$element.trigger(g),g.isDefaultPrevented()?void 0:(f.removeClass("in"),a.support.transition&&this.$tip.hasClass("fade")?f.one("bsTransitionEnd",d).emulateTransitionEnd(c.TRANSITION_DURATION):d(),this.hoverState=null,this)},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return this.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d=
 "BODY"==c.tagName,e=c.getBoundingClientRect();null==e.width&&(e=a.extend({},e,{width:e.right-e.left,height:e.bottom-e.top}));var f=d?{top:0,left:0}:b.offset(),g={scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop()},h=d?{width:a(window).width(),height:a(window).height()}:null;return a.extend({},e,g,h,f)},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)return e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.w
 idth&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){return this.$tip=this.$tip||a(this.options.template)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$elemen
 t.off("."+a.type).removeData("bs."+a.type)})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b,g=f&&f.selector;(e||"destroy"!=b)&&(g?(e||d.data("bs.popover",e={}),e[g]||(e[g]=new c(this,f))):e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.1",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.pro
 totype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")},c.prototype.tip=function(){return this.$tip||(this.$tip=a(this.options.template)),this.$tip};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){var e=a.proxy(this.process
 ,this);this.$body=a("body"),this.$scrollElement=a(a(c).is("body")?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",e),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.1",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b="offset",c=0;a.isWindow(this.$scrollElement[0])||(b="position",c=this.$scrollElement.scrollTop()),this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight();var d=this;this.$body.find(this.selector).map(function(){var d=a(this),e=d.data("target")|
 |d.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[b]().top+c,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){d.offsets.push(this[0]),d.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<e[0])return this.activeTarget=null,this.clear();for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(!e[a+1]||b<=e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){this.activeTarget=b,this.clear();var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")},b.prototype.clear=function(){a(this.selec
 tor).parentsUntil(this.options.target,".active").removeClass("active")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.3.1",c.TRANSITION_DURATION=150,c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a"),f=a.Event("hide.bs.tab",{relatedTarget:b[0]}),g=a.Event("show.bs.tab",{relatedTarget:e[0]});if(e.trigger(f),b.trigger(g),!g.isDefaultPrevented()&&!f.isDefaultPrevented()){var
  h=a(d);this.activate(b.closest("li"),c),this.activate(h,h.parent(),function(){e.trigger({type:"hidden.bs.tab",relatedTarget:b[0]}),b.trigger({type:"shown.bs.tab",relatedTarget:e[0]})
-})}}},c.prototype.activate=function(b,d,e){function f(){g.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu")&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&&a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use 
 strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=this.unpin=this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.3.1",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&"top"==this.affixed)return c>e?"top":!1;if("bottom"==this.affixed)return null!=c?e+this.unpin<=f.top?!1:"bottom":a-d>=e+g?!1:"bottom";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&c>=i?"top":null!=d&&i+j>=a-d?"bottom":!1},c.prototype.getPinnedOffset=function(
 ){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=a("body").height();"object"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix"+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d
 =a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery);
\ No newline at end of file
+if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){return a(b.target).is(this)?b.handleObj.
 handler.apply(this,arguments):void 0}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.5",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a(f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a)
 {"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.5",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")?(c.prop("checked")&&(a=!1),b.find(".active").removeClass("active"),this.$element.addClass("active")):"checkbox"==c.pr
 op("type")&&(c.prop("checked")!==this.$element.hasClass("active")&&(a=!1),this.$element.toggleClass("active")),c.prop("checked",this.$element.hasClass("active")),a&&c.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target);d.hasClass("btn")||(d=d.closest(".btn")),b.call(d,"toggle"),a(c.target).is('input[type="radio"]')||a(c.target).is('input[type="checkbox"]')||c.preventDefault()}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"o
 bject"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.5",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),th
 is.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));return a>this.$items.length-1||0>a?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)
 ),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){return this.sliding?void 0:this.slide("next")},c.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"
 ),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"
 ))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle="collapse"][href="#'+b.id+'"],[data-toggle="collapse"][data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.5",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!
 (e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element
 [c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Construc
 tor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":e.data();c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function c(c){c&&3===c.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=b(d),f={relatedTarget:this};e.hasClass("open")&&(c&&"click"==c.type&&/input|textarea/i.test(c.target.tagName)&&a.contains(e[0],c.target)||(e.trigger(c=a.Event("hide.bs.dropdown",f)),c.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger("hidden.bs.dropdown",f))))}))}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==type
 of b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.5",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=b(e),g=f.hasClass("open");if(c(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",c);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger("shown.bs.dropdown",h)}return!1}},g.prototype.keydown=function(c){if(/(38|40|27|32)/.test(c.which)&&!/input|textarea/i.test(c.target.tagName)){var d=a(this);if(c.preventDefault(),c.stopPropagation(),!d.is(".disabled, :disabled")){var e=b(d),g=e.hasClass("open");if(!g&&27!=c.which||g&&27==c.which)return 27==c.which&&e.find(f).trigger("focus"),d.trigger("click");var h
 =" li:not(.disabled):visible a",i=e.find(".dropdown-menu"+h);if(i.length){var j=i.index(c.target);38==c.which&&j>0&&j--,40==c.which&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",c).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f,g.prototype.keydown).on("keydown.bs.dropdown.data-api",".dropdown-menu",g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$dialog=this.$element.
 find(".modal-dialog"),this.$backdrop=null,this.isShown=null,this.originalBodyPad=null,this.scrollbarWidth=0,this.ignoreBackdropClick=!1,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.3.5",c.TRANSITION_DURATION=300,c.BACKDROP_TRANSITION_DURATION=150,c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var d=this,e=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(e),this.isShown||e.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.$dialog.on("mousedown.dismiss.bs.modal",function(){d.$element.one("mouseup.dismiss.bs.modal",function(b){a(b.target).is(d.$element)&&(d.ignore
 BackdropClick=!0)})}),this.backdrop(function(){var e=a.support.transition&&d.$element.hasClass("fade");d.$element.parent().length||d.$element.appendTo(d.$body),d.$element.show().scrollTop(0),d.adjustDialog(),e&&d.$element[0].offsetWidth,d.$element.addClass("in"),d.enforceFocus();var f=a.Event("shown.bs.modal",{relatedTarget:b});e?d.$dialog.one("bsTransitionEnd",function(){d.$element.trigger("focus").trigger(f)}).emulateTransitionEnd(c.TRANSITION_DURATION):d.$element.trigger("focus").trigger(f)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").off("click.dismiss.bs.modal").off("mouseup.dismiss.bs.modal"),this.$dialog.off("mousedown.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTran
 sitionEnd(c.TRANSITION_DURATION):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keydown.dismiss.bs.modal")},c.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$body.removeClass("modal-open"),a.resetAdjustments(),a.resetScrollbar(),a.$element.trigger("hidden.bs.modal")})},c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var d=this,e=this.$eleme
 nt.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var f=a.support.transition&&e;if(this.$backdrop=a(document.createElement("div")).addClass("modal-backdrop "+e).appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){return this.ignoreBackdropClick?void(this.ignoreBackdropClick=!1):void(a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus():this.hide()))},this)),f&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;f?this.$backdrop.one("bsTransitionEnd",b).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var g=function(){d.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",g).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):g()}else b&&b()},c.prototype.handleUpdate=function(){this.adjustDialog()},c.prototype.adjustDialog=function(){var a=this
 .$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth<a,this.scrollbarWidth=this.measureScrollbar()},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.originalBodyPad=document.body.style.paddingRight||"",this.bodyIsOverflowing&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding-right",this.originalBodyPad)},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.ap
 pend(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevented()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b;(e||!/destroy|hide/.test(b))&&(e||d.data("bs.tooltip",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.type=null,this.options=null,this.enabled=null,this.timeout=null,this.hoverState=null,this.$element=nul
 l,this.inState=null,this.init("tooltip",a,b)};c.VERSION="3.3.5",c.TRANSITION_DURATION=150,c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(a.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+t
 his.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a
 (b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusin"==b.type?"focus":"hover"]=!0),c.tip().hasClass("in")||"in"==c.hoverState?void(c.hoverState="in"):(clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.isInStateTrue=function(){for(var a in this.inState)if(this.inState[a])return!0;return!1},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusout"==b.type?"focus":"hover"]=!1),c.isInStateTrue()?void 0:(clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide())},c.prototype.show=fun
 ction(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.getPosition(this.$viewport);h="bottom"==h&&k.bottom+m>o.bottom?"top":"top"==h&&k.top-m<o.top?"bottom":"right"==h&&k
 .right+l>o.width?"left":"left"==h&&k.left-l<o.left?"right":h,f.removeClass(n).addClass(h)}var p=this.getCalculatedOffset(h,k,l,m);this.applyPlacement(p,h);var q=function(){var a=e.hoverState;e.$element.trigger("shown.bs."+e.type),e.hoverState=null,"out"==a&&e.leave(e)};a.support.transition&&this.$tip.hasClass("fade")?f.one("bsTransitionEnd",q).emulateTransitionEnd(c.TRANSITION_DURATION):q()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top+=g,b.left+=h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewportAdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=/top|bottom/.test(c),m=l?2*k.left-e+i:2*k.top-f+j,n=l?"offsetWidth":"offsetHeight";d.of
 fset(b),this.replaceArrow(m,d[0][n],l)},c.prototype.replaceArrow=function(a,b,c){this.arrow().css(c?"left":"top",50*(1-a/b)+"%").css(c?"top":"left","")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(b){function d(){"in"!=e.hoverState&&f.detach(),e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),b&&b()}var e=this,f=a(this.$tip),g=a.Event("hide.bs."+this.type);return this.$element.trigger(g),g.isDefaultPrevented()?void 0:(f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one("bsTransitionEnd",d).emulateTransitionEnd(c.TRANSITION_DURATION):d(),this.hoverState=null,this)},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return th
 is.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d="BODY"==c.tagName,e=c.getBoundingClientRect();null==e.width&&(e=a.extend({},e,{width:e.right-e.left,height:e.bottom-e.top}));var f=d?{top:0,left:0}:b.offset(),g={scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop()},h=d?{width:a(window).width(),height:a(window).height()}:null;return a.extend({},e,g,h,f)},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)return e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.to
 p=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.right&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){if(!this.$tip&&(this.$tip=a(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.
 getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),b?(c.inState.click=!c.inState.click,c.isInStateTrue()?c.enter(c):c.leave(c)):c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off("."+a.type).removeData("bs."+a.type),a.$tip&&a.$tip.detach(),a.$tip=null,a.$arrow=null,a.$viewport=null})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;(e||!/destroy|hide/.test(b))&&(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.5",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",t
 emplate:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var
  d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.5",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c="offset",d=0;this.offsets=[],this.targets=[],this.
 scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c="position",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data("target")||b.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<e[0])return this.activeTarget=null,this.clear();for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(void 0===e[a+1]||b<e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){this.activeTarget=b,this.clear();var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+
 b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),
+d.trigger("activate.bs.scrollspy")},b.prototype.clear=function(){a(this.selector).parentsUntil(this.options.target,".active").removeClass("active")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.3.5",c.TRANSITION_DURATION=150,c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a"),f=a.Event("hide.bs.tab",{relatedTarget:b[0]}),g=a.Event("show.bs.tab",{relatedTarget:e[0]});if(e.
 trigger(f),b.trigger(g),!g.isDefaultPrevented()&&!f.isDefaultPrevented()){var h=a(d);this.activate(b.closest("li"),c),this.activate(h,h.parent(),function(){e.trigger({type:"hidden.bs.tab",relatedTarget:b[0]}),b.trigger({type:"shown.bs.tab",relatedTarget:e[0]})})}}},c.prototype.activate=function(b,d,e){function f(){g.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu").length&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&&a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constru
 ctor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.3.5",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=
 c&&"top"==this.affixed)return c>e?"top":!1;if("bottom"==this.affixed)return null!=c?e+this.unpin<=f.top?!1:"bottom":a-d>=e+g?!1:"bottom";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&c>=e?"top":null!=d&&i+j>=a-d?"bottom":!1},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=Math.max(a(document).height(),a(document.body).height());"object"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix
 "+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery);
\ No newline at end of file


[04/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/css/font-awesome.min.css
----------------------------------------------------------------------
diff --git a/docs/assets/css/font-awesome.min.css b/docs/assets/css/font-awesome.min.css
new file mode 100644
index 0000000..24fcc04
--- /dev/null
+++ b/docs/assets/css/font-awesome.min.css
@@ -0,0 +1,4 @@
+/*!
+ *  Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome
+ *  License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.3.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.3.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff2?v=4.3.0') format('woff2'),url('../fonts/fontawesome-webfont.woff?v=4.3.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.3.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.3.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;transform:translate(0, 0)}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{p
 osition:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Micr
 osoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size
 :2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o
 -up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:
 "\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}
 .fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-sl
 ash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:be
 fore{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-h
 and-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-
 underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-
 flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content
 :"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content
 :"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-c
 ircle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{conten
 t:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:bef
 ore{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-arc
 hive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{c
 ontent:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{cont
 ent:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:
 "\f1d2"}.fa-git:before{content:"\f1d3"}.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-genderless:before,.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:
 "\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"
 \f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"
 }.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/css/main.scss
----------------------------------------------------------------------
diff --git a/docs/assets/css/main.scss b/docs/assets/css/main.scss
new file mode 100644
index 0000000..352e13a
--- /dev/null
+++ b/docs/assets/css/main.scss
@@ -0,0 +1,48 @@
+---
+# Only the main Sass file needs front matter (the dashes are enough)
+---
+@charset "utf-8";
+
+
+$spacing-unit:     30px;
+
+@import
+        "syntax-highlighting"
+;
+
+#sidebar {
+  float: left;
+  background: #F6F9FB;
+	border: 1px solid #E1E1E1;
+	padding: 0px 10px 10px 0px;
+	margin-left: 0px;
+
+}
+
+#sidebar a.current {
+  color: #E00000;
+}
+
+#sidebar ul {
+  list-style: none;
+  font-size: 0.83em;
+  padding-left: 20px;
+}
+
+#sidebar ul li {
+  margin-top: 3px;
+}
+
+
+#aboutcontent {
+	width: 70%;
+	margin-top:0px;
+	margin-left: 220px;
+}
+
+#footer {
+	//margin: 100px;
+	//padding: 0;
+	font-size: 0.7em;
+}
+

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/css/owl.carousel.css
----------------------------------------------------------------------
diff --git a/docs/assets/css/owl.carousel.css b/docs/assets/css/owl.carousel.css
new file mode 100644
index 0000000..4e3c17c
--- /dev/null
+++ b/docs/assets/css/owl.carousel.css
@@ -0,0 +1,71 @@
+/* 
+ * 	Core Owl Carousel CSS File
+ *	v1.3.3
+ */
+
+/* clearfix */
+.owl-carousel .owl-wrapper:after {
+	content: ".";
+	display: block;
+	clear: both;
+	visibility: hidden;
+	line-height: 0;
+	height: 0;
+}
+/* display none until init */
+.owl-carousel{
+	display: none;
+	position: relative;
+	width: 100%;
+	-ms-touch-action: pan-y;
+}
+.owl-carousel .owl-wrapper{
+	display: none;
+	position: relative;
+	-webkit-transform: translate3d(0px, 0px, 0px);
+}
+.owl-carousel .owl-wrapper-outer{
+	overflow: hidden;
+	position: relative;
+	width: 100%;
+}
+.owl-carousel .owl-wrapper-outer.autoHeight{
+	-webkit-transition: height 500ms ease-in-out;
+	-moz-transition: height 500ms ease-in-out;
+	-ms-transition: height 500ms ease-in-out;
+	-o-transition: height 500ms ease-in-out;
+	transition: height 500ms ease-in-out;
+}
+	
+.owl-carousel .owl-item{
+	float: left;
+}
+.owl-controls .owl-page,
+.owl-controls .owl-buttons div{
+	cursor: pointer;
+}
+.owl-controls {
+	-webkit-user-select: none;
+	-khtml-user-select: none;
+	-moz-user-select: none;
+	-ms-user-select: none;
+	user-select: none;
+	-webkit-tap-highlight-color: rgba(0, 0, 0, 0);
+}
+
+/* mouse grab icon */
+.grabbing { 
+    cursor:url(grabbing.png) 8 8, move;
+}
+
+/* fix */
+.owl-carousel  .owl-wrapper,
+.owl-carousel  .owl-item{
+	-webkit-backface-visibility: hidden;
+	-moz-backface-visibility:    hidden;
+	-ms-backface-visibility:     hidden;
+  -webkit-transform: translate3d(0,0,0);
+  -moz-transform: translate3d(0,0,0);
+  -ms-transform: translate3d(0,0,0);
+}
+

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/css/owl.theme.css
----------------------------------------------------------------------
diff --git a/docs/assets/css/owl.theme.css b/docs/assets/css/owl.theme.css
new file mode 100644
index 0000000..9772975
--- /dev/null
+++ b/docs/assets/css/owl.theme.css
@@ -0,0 +1,79 @@
+/*
+* 	Owl Carousel Owl Demo Theme 
+*	v1.3.3
+*/
+
+.owl-theme .owl-controls{
+	margin-top: 10px;
+	text-align: center;
+}
+
+/* Styling Next and Prev buttons */
+
+.owl-theme .owl-controls .owl-buttons div{
+	color: #FFF;
+	display: inline-block;
+	zoom: 1;
+	*display: inline;/*IE7 life-saver */
+	margin: 5px;
+	padding: 3px 10px;
+	font-size: 12px;
+	-webkit-border-radius: 30px;
+	-moz-border-radius: 30px;
+	border-radius: 30px;
+	background: #869791;
+	filter: Alpha(Opacity=50);/*IE7 fix*/
+	opacity: 0.5;
+}
+/* Clickable class fix problem with hover on touch devices */
+/* Use it for non-touch hover action */
+.owl-theme .owl-controls.clickable .owl-buttons div:hover{
+	filter: Alpha(Opacity=100);/*IE7 fix*/
+	opacity: 1;
+	text-decoration: none;
+}
+
+/* Styling Pagination*/
+
+.owl-theme .owl-controls .owl-page{
+	display: inline-block;
+	zoom: 1;
+	*display: inline;/*IE7 life-saver */
+}
+.owl-theme .owl-controls .owl-page span{
+	display: block;
+	width: 12px;
+	height: 12px;
+	margin: 5px 7px;
+	filter: Alpha(Opacity=50);/*IE7 fix*/
+	opacity: 0.5;
+	-webkit-border-radius: 20px;
+	-moz-border-radius: 20px;
+	border-radius: 20px;
+	background: #869791;
+}
+
+.owl-theme .owl-controls .owl-page.active span,
+.owl-theme .owl-controls.clickable .owl-page:hover span{
+	filter: Alpha(Opacity=100);/*IE7 fix*/
+	opacity: 1;
+}
+
+/* If PaginationNumbers is true */
+
+.owl-theme .owl-controls .owl-page span.owl-numbers{
+	height: auto;
+	width: auto;
+	color: #FFF;
+	padding: 2px 10px;
+	font-size: 12px;
+	-webkit-border-radius: 30px;
+	-moz-border-radius: 30px;
+	border-radius: 30px;
+}
+
+/* preloading images */
+.owl-item.loading{
+	min-height: 150px;
+	background: url(AjaxLoader.gif) no-repeat center center
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/css/style.css
----------------------------------------------------------------------
diff --git a/docs/assets/css/style.css b/docs/assets/css/style.css
new file mode 100644
index 0000000..80e0614
--- /dev/null
+++ b/docs/assets/css/style.css
@@ -0,0 +1,503 @@
+/*
+	Theme: Apache Storm
+*/
+
+/* Fonts */
+@import url(http://fonts.googleapis.com/css?family=Lato:300,400,700,300italic,400italic,700italic);
+@import url(http://fonts.googleapis.com/css?family=Oswald:400,300);
+
+/* Generic */
+body {
+	font-family: "Lato", sans-serif;
+	font-weight: 400;
+	font-size: 14px;
+}
+header {
+	background: #fdfbfb url(../images/header-bg.png);
+	border-top: 3px #328fbf solid;
+}
+footer {
+	background: #222 url(../images/footer-bg.png);
+	padding-top: 10px;
+	color: #ddd;
+	border-top: 3px #328fbf solid;
+	font-size: 12px;
+}
+p {
+	line-height: 24px;
+}
+h1, h2, h3, h4, h5, h6{
+	font-family: "Oswald", sans-serif;
+	font-weight: 400;
+}
+
+/* Bootstrap Extended */
+.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {
+	padding-top: 15px;
+	padding-bottom: 15px;
+}
+.navbar{
+	background: #328fbf;
+	border-top:1px solid #235693;
+	border-bottom:1px solid #235693;
+	border-radius:0;
+	margin-bottom:10px; 
+}
+.navbar.affix {
+    position: fixed;
+    top: 0;
+    width: 100%;
+    z-index:10;
+}
+.nav .open>a, .nav .open>a:hover, .nav .open>a:focus {
+   border-color: #235693;
+}
+.navbar button{
+	background:#29a1c4;  
+}
+.navbar button:hover{
+	background:#29a1c4;
+}
+.navbar-toggle .icon-bar {
+  background: #fff;
+}
+.navbar .nav{
+	border-right:1px solid #235693;
+}
+.navbar .nav > li > a{
+	color: #ffffff;
+	background: #328fbf !important; 
+	border-left:1px solid #235693;
+	font-weight: 400;
+	
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;	
+	transition:background 1s ease;
+}
+.navbar .nav > li > a:hover, .navbar .nav > li > a.current {
+	background:#235693 !important;
+}
+.navbar .nav .active > a,
+.navbar .nav .active > a:hover,
+.navbar .nav .active > a:focus {
+	background: #235693;
+}
+.dropdown-toggle{
+	background: #328fbf;
+}
+.dropdown-menu{
+	padding:0px;
+	background:#328fbf;
+	border:1px solid #235693;
+}
+.dropdown-menu a{
+	padding: 8px 10px !important;
+	color:#ffffff !important;
+	background:#328fbf;
+	border-bottom:1px solid #235693;
+	
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;
+	transition:background 1s ease;
+}
+.dropdown-menu a:hover,.dropdown-menu a:focus{
+	background:#235693 !important;
+}
+
+/* Theme Style */
+.row-margin-bottom {
+	margin-bottom: 25px;
+}
+.remove-custom-padding {
+	padding-top: 0px;
+	padding-bottom: 0px;
+}
+.no-margin-top {
+	margin-top: 0px;
+}
+.logo {
+	margin-top: 20px;
+	margin-bottom: 20px;
+}
+.page-title {
+	margin-top: 0;
+	margin-bottom: 20px;
+	padding-bottom: 5px;
+	color: #235693;
+	border-bottom: 2px solid #e99941;
+	font-weight: 300;
+}
+.page-title span {
+	font-family: "Lato", sans-serif;
+	font-weight: 400;
+	font-size: 14px;
+	color: #328fbf;
+	margin-top: 5px;
+}
+.btn-std {
+	background-color: #328fbf;
+	border-bottom: 2px solid #235693;
+	color: #ffffff;
+	padding: 4px 12px;
+	font-size: 12px;
+	text-align: center;
+	border-radius: 5px;
+}
+.btn-std:hover {
+	background-color: #235693;
+	color: #ffffff;
+	text-decoration: none;
+}
+.btn-download {
+	background-color: #32bf61;
+	border-bottom: 2px solid #239329;
+	font-size: 16px;
+	font-weight: 700;
+	margin-top: 35px;
+	margin-bottom: 35px;
+	padding: 12px 36px;	
+}
+.btn-download:hover {
+	background-color: #239329;
+}
+.box-primary {
+	padding: 8px 10px;
+	background: #fbfbfb;
+	border: 1px solid #ccc;
+	border-bottom: 3px solid #ccc;
+	border-radius: 10px;
+}
+.box-primary h4 {
+	border-bottom: 1px solid #ccc;
+	padding-bottom: 10px;
+}
+.box-warning {
+	padding: 8px 10px;
+	background: #FFFAF3;
+	border: 1px solid #EB9A35;
+	border-bottom: 3px solid #EB9A35;
+	border-radius: 10px;
+}
+.box-warning h4 {
+	border-bottom: 1px solid #EB9A35;
+	padding-bottom: 10px;
+}
+.box-info {
+	padding: 8px 10px;
+	background: #F8FFFF;
+	border: 1px solid #328fbf;
+	border-bottom: 3px solid #328fbf;
+	/*border-radius: 10px;*/
+}
+.box-info h4 {
+	border-bottom: 1px solid #328fbf;
+	padding-bottom: 10px;
+}
+
+/* Footer */
+footer hr {
+	margin: 0;
+	border-top: 1px solid #555;
+	border-bottom: 1px solid #111;
+}
+.footer-widget h5 {
+	padding-bottom: 10px;
+	border-bottom: 1px solid #555;
+	margin-bottom: 10px;
+}
+.footer-list {
+	list-style: none;
+	padding-left: 20px;
+}
+.footer-list li {
+	line-height: 32px;
+}
+.footer-list li:before {
+	content: "\f105";
+	font-family: 'FontAwesome';
+	margin-right: 10px;
+}
+.footer-list li a {
+	color: #ddd;
+}
+
+.tweet {margin-bottom: 10px;}
+
+.social a, .social a:visited, .social a:hover{
+	color:#fff;
+	text-decoration:none;
+}
+.social i{
+	display:inline-block;
+	height:30px;
+	width:30px;
+	font-size:15px;
+	text-align:center;
+	line-height:30px;
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;
+	transition:background 1s ease;
+	border-radius:30px;
+	margin-right:5px;
+}
+.social i:hover{
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;
+	transition:background 1s ease;
+}
+.facebook { background: #1e78ae !important; }
+.twitter { background: #1ba1e2 !important; }
+.google-plus { background: #f22d0c !important; }
+.linkedin { background: #2d93cf !important; }
+.pinterest { background:	#dd1617 !important; }
+
+.facebook:hover, .twitter:hover, .google-plus:hover, .linkedin:hover, .pinterest:hover { background: #666 !important; }
+
+/* Scroll to top */
+.totop {
+	position: fixed;
+	bottom: 10px;
+	right: 10px;
+	z-index: 104400;
+}
+.totop a i{
+	display: block;
+	width: 40px;
+	height: 40px;
+	line-height: 40px;
+	text-align: center;
+	font-size: 25px;
+	background: rgba(60,60,60,0.7);
+}
+.totop a:hover i { background: rgba(60,60,60,1); }
+.totop a, .totop a:visited{
+	color: #fff;
+}
+.totop a:hover {
+	color: #eee;
+	text-decoration: none;
+}
+
+/* Page Specific - Home */
+.latest-news {
+	list-style: none;
+	padding-left: 20px;
+}
+.latest-news li {
+	line-height: 28px;
+}
+.latest-news li:before {
+	content: "\f101";
+	font-family: 'FontAwesome';
+	margin-right: 10px;
+}
+.latest-news li span.small {
+	font-size:10px;
+}
+
+/* Page Specific - Download */
+.download-block {
+	margin-bottom: 15px;
+	border-bottom: 1px solid #eee;
+}
+.download-block h4 {
+	background: #328fbf;
+	color: #fff;
+	padding: 10px;
+}
+.download-info {
+	border-left: 4px solid #eee;
+	padding-left: 15px;
+	border-bottom: 1px solid #eee;
+	padding-bottom: 10px;
+}
+.arrow-list {
+	list-style: none;
+	padding-left: 20px;
+}
+.arrow-list li {
+	line-height: 28px;
+}
+.arrow-list li:before {
+	content: "\f105";
+	font-family: 'FontAwesome';
+	margin-right: 10px;
+}
+
+/* Page Specific - News */
+.news {
+	list-style: none;
+	padding-left: 0px;
+}
+.news li a {
+	display: block;
+	padding: 7px;
+	margin-bottom: 2px;
+	font-family: "Oswald", sans-serif;
+	letter-spacing: 1px;
+	color: #328fbf;
+	border: 1px #328fbf solid;
+	-webkit-transition:background 1s ease;
+	-moz-transition:background 1s ease;
+	-o-transition:background 1s ease;	
+	transition:background 1s ease;
+}
+.news li a:hover, .news li a.current {
+	text-decoration: none;
+	border: 1px #235693 solid;
+	background-color: #328fbf;
+	color:white;
+}
+.news-title {
+	color: #235693;
+	margin-top: 0;
+}
+.news-meta {
+	padding: 5px 0;
+	margin-bottom: 20px;
+	border-top: 1px #328fbf solid;
+	border-bottom: 1px #328fbf solid;
+	color: #235693;
+	font-weight: 400;
+}
+.news-meta .fa-user {
+	margin-left: 25px;
+}
+
+/* Page Specific - FAQ */
+.faq .nav-tabs {
+	border-bottom: 1px solid #328fbf;
+}
+.faq .nav-tabs > li > a {
+	margin-right: 2px;
+	font-family: "Oswald", sans-serif;
+	font-size: 18px;
+	letter-spacing: 1px;
+	border: none;
+	border-radius: 0;
+}
+.faq .nav-tabs > li > a:hover {
+	border-bottom-color: #328fbf;
+	background-color: #FFF0DF;
+}
+.faq .nav-tabs > li.active > a, 
+.faq .nav-tabs > li.active > a:hover, 
+.faq .nav-tabs > li.active > a:focus {
+	color: #235693;
+	cursor: default;
+	background-color: #fff;
+	border: 1px solid #328fbf;
+	border-bottom-color: transparent;
+}
+.faq .tab-content {
+	padding: 15px;
+	border: 1px #328fbf solid;
+	border-top: none;
+}
+
+/* Syntax Highlighting Styles */
+.highlight {background: #fff;}
+.highlight .c {color: #998; font-style: italic;}
+.highlight .err {color: #a61717; background-color: #e3d2d2;}
+.highlight .k {font-weight: bold;}
+.highlight .o {font-weight: bold;}
+.highlight .cm {color: #998; font-style: italic;}
+.highlight .cp {color: #999; font-weight: bold;}
+.highlight .c1 {color: #998; font-style: italic;}
+.highlight .cs {color: #999; font-weight: bold; font-style: italic;}
+.highlight .gd {color: #000; background-color: #fdd;}
+.highlight .gd .x {color: #000; background-color: #faa;}
+.highlight .ge {font-style: italic;}
+.highlight .gr {color: #a00;}
+.highlight .gh {color: #999;}
+.highlight .gi {color: #000; background-color: #dfd;}
+.highlight .gi .x {color: #000; background-color: #afa;}
+.highlight .go {color: #888;}
+.highlight .gp {color: #555;}
+.highlight .gs {font-weight: bold;}
+.highlight .gu {color: #aaa;}
+.highlight .gt {color: #a00;}
+.highlight .kc {font-weight: bold;}
+.highlight .kd {font-weight: bold;}
+.highlight .kp {font-weight: bold;}
+.highlight .kr {font-weight: bold;}
+.highlight .kt {color: #458; font-weight: bold;}
+.highlight .m {color: #099;}
+.highlight .s {color: #d14;}
+.highlight .na {color: #008080;}
+.highlight .nb {color: #0086B3;}
+.highlight .nc {color: #458; font-weight: bold;}
+.highlight .no {color: #008080;}
+.highlight .ni {color: #800080;}
+.highlight .ne {color: #900; font-weight: bold;}
+.highlight .nf {color: #900; font-weight: bold;}
+.highlight .nn {color: #555;}
+.highlight .nt {color: #000080;}
+.highlight .nv {color: #008080;}
+.highlight .ow {font-weight: bold;}
+.highlight .w {color: #bbb;}
+.highlight .mf {color: #099;}
+.highlight .mh {color: #099;}
+.highlight .mi {color: #099;}
+.highlight .mo {color: #099;}
+.highlight .sb {color: #d14;}
+.highlight .sc {color: #d14;}
+.highlight .sd {color: #d14;}
+.highlight .s2 {color: #d14;}
+.highlight .se {color: #d14;}
+.highlight .sh {color: #d14;}
+.highlight .si {color: #d14;}
+.highlight .sx {color: #d14;}
+.highlight .sr {color: #009926;}
+.highlight .s1 {color: #d14;}
+.highlight .ss {color: #990073;}
+.highlight .bp {color: #999;}
+.highlight .vc {color: #008080;}
+.highlight .vg {color: #008080;}
+.highlight .vi {color: #008080;}
+.highlight .il {color: #099;}
+
+/* Page Specific - Documentation Index */
+.documentation-list {
+	list-style: none;
+	padding-left: 0px;
+}
+.documentation-list li {
+	line-height: 28px;
+}
+.documentation-list li:before {
+	content: "\f101";
+	font-family: 'FontAwesome';
+	margin-right: 5px;
+	color: #337ab7;
+}
+
+
+/* Responsive */
+@media screen and (max-width: 992px) {
+	.logo, .btn-download {margin-top:0; margin-bottom:0;}
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+


[15/16] storm git commit: STORM-1617: 0.10.x release docs

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Running-topologies-on-a-production-cluster.md
----------------------------------------------------------------------
diff --git a/docs/Running-topologies-on-a-production-cluster.md b/docs/Running-topologies-on-a-production-cluster.md
index 248c929..76c43dd 100644
--- a/docs/Running-topologies-on-a-production-cluster.md
+++ b/docs/Running-topologies-on-a-production-cluster.md
@@ -1,5 +1,7 @@
 ---
+title: Running Topologies on a Production Cluster
 layout: documentation
+documentation: true
 ---
 Running topologies on a production cluster is similar to running in [Local mode](Local-mode.html). Here are the steps:
 
@@ -48,7 +50,7 @@ You can find out how to configure your `storm` client to talk to a Storm cluster
 There are a variety of configurations you can set per topology. A list of all the configurations you can set can be found [here](javadocs/backtype/storm/Config.html). The ones prefixed with "TOPOLOGY" can be overridden on a topology-specific basis (the other ones are cluster configurations and cannot be overridden). Here are some common ones that are set for a topology:
 
 1. **Config.TOPOLOGY_WORKERS**: This sets the number of worker processes to use to execute the topology. For example, if you set this to 25, there will be 25 Java processes across the cluster executing all the tasks. If you had a combined 150 parallelism across all components in the topology, each worker process will have 6 tasks running within it as threads.
-2. **Config.TOPOLOGY_ACKERS**: This sets the number of tasks that will track tuple trees and detect when a spout tuple has been fully processed. Ackers are an integral part of Storm's reliability model and you can read more about them on [Guaranteeing message processing](Guaranteeing-message-processing.html).
+2. **Config.TOPOLOGY_ACKER_EXECUTORS**: This sets the number of executors that will track tuple trees and detect when a spout tuple has been fully processed. Ackers are an integral part of Storm's reliability model and you can read more about them on [Guaranteeing message processing](Guaranteeing-message-processing.html). By not setting this variable or setting it as null, Storm will set the number of acker executors to be equal to the number of workers configured for this topology. If this variable is set to 0, then Storm will immediately ack tuples as soon as they come off the spout, effectively disabling reliability.
 3. **Config.TOPOLOGY_MAX_SPOUT_PENDING**: This sets the maximum number of spout tuples that can be pending on a single spout task at once (pending means the tuple has not been acked or failed yet). It is highly recommended you set this config to prevent queue explosion.
 4. **Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS**: This is the maximum amount of time a spout tuple has to be fully completed before it is considered failed. This value defaults to 30 seconds, which is sufficient for most topologies. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more information on how Storm's reliability model works.
 5. **Config.TOPOLOGY_SERIALIZATIONS**: You can register more serializers to Storm using this config so that you can use custom types within tuples.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/SECURITY.md
----------------------------------------------------------------------
diff --git a/docs/SECURITY.md b/docs/SECURITY.md
index 495061a..353cb86 100644
--- a/docs/SECURITY.md
+++ b/docs/SECURITY.md
@@ -3,19 +3,23 @@ title: Running Apache Storm Securely
 layout: documentation
 documentation: true
 ---
+
 # Running Apache Storm Securely
 
-The current release of Apache Storm offers no authentication or authorization.
-It does not encrypt any data being sent across the network, and does not 
-attempt to restrict access to data stored on the local file system or in
-Apache Zookeeper.  As such there are a number of different precautions you may
-want to enact outside of storm itself to be sure storm is running securely.
+Apache Storm offers a range of configuration options when trying to secure
+your cluster.  By default all authentication and authorization is disabled but 
+can be turned on as needed.
+
+## Firewall/OS level Security
+
+You can still have a secure storm cluster without turning on formal
+Authentication and Authorization. But to do so usually requires 
+configuring your Operating System to restrict the operations that can be done.
+This is generally a good idea even if you plan on running your cluster with Auth.
 
 The exact detail of how to setup these precautions varies a lot and is beyond
 the scope of this document.
 
-## Network Security
-
 It is generally a good idea to enable a firewall and restrict incoming network
 connections to only those originating from the cluster itself and from trusted
 hosts and services, a complete list of ports storm uses are below. 
@@ -33,47 +37,442 @@ IPsec to encrypt all traffic being sent between the hosts in the cluster.
 | 8000 | `logviewer.port` | Client Web Browsers | Logviewer |
 | 3772 | `drpc.port` | External DRPC Clients | DRPC |
 | 3773 | `drpc.invocations.port` | Worker Processes | DRPC |
+| 3774 | `drpc.http.port` | External HTTP DRPC Clients | DRPC |
 | 670{0,1,2,3} | `supervisor.slots.ports` | Worker Processes | Worker Processes |
 
+
 ### UI/Logviewer
 
 The UI and logviewer processes provide a way to not only see what a cluster is
 doing, but also manipulate running topologies.  In general these processes should
-not be exposed except to users of the cluster.  It is often simplest to restrict
-these ports to only accept connections from local hosts, and then front them with another web server,
-like Apache httpd, that can authenticate/authorize incoming connections and
+not be exposed except to users of the cluster.
+
+Some form of Authentication is typically required, with using java servlet filters 
+
+```yaml
+ui.filter: "filter.class"
+ui.filter.params: "param1":"value1"
+```
+or by restricting the UI/log viewers ports to only accept connections from local
+hosts, and then front them with another web server, like Apache httpd, that can
+authenticate/authorize incoming connections and
 proxy the connection to the storm process.  To make this work the ui process must have
 logviewer.port set to the port of the proxy in its storm.yaml, while the logviewers
 must have it set to the actual port that they are going to bind to.
 
-### Nimbus
+The servlet filters are preferred because it allows individual topologies to
+specificy who is and who is not allowed to access the pages associated with
+them.  
+
+Storm UI can be configured to use AuthenticationFilter from hadoop-auth.
+```yaml
+ui.filter: "org.apache.hadoop.security.authentication.server.AuthenticationFilter"
+ui.filter.params:
+   "type": "kerberos"
+   "kerberos.principal": "HTTP/nimbus.witzend.com"
+   "kerberos.keytab": "/vagrant/keytabs/http.keytab"
+   "kerberos.name.rules": "RULE:[2:$1@$0]([jt]t@.*EXAMPLE.COM)s/.*/$MAPRED_USER/ RULE:[2:$1@$0]([nd]n@.*EXAMPLE.COM)s/.*/$HDFS_USER/DEFAULT"
+```
+make sure to create a principal 'HTTP/{hostname}' (here hostname should be the one where UI daemon runs
+
+Once configured users needs to do kinit before accessing UI.
+Ex:
+curl  -i --negotiate -u:anyUser  -b ~/cookiejar.txt -c ~/cookiejar.txt  http://storm-ui-hostname:8080/api/v1/cluster/summary
+
+1. Firefox: Goto about:config and search for network.negotiate-auth.trusted-uris double-click to  add value "http://storm-ui-hostname:8080"
+2. Google-chrome:  start from command line with: google-chrome --auth-server-whitelist="*storm-ui-hostname" --auth-negotiate-delegate-whitelist="*storm-ui-hostname"   
+3. IE:  Configure trusted websites to include "storm-ui-hostname" and allow negotiation for that website 
+
+**Caution**: In AD MIT Keberos setup the key size is bigger than the default UI jetty server request header size. Make sure you set ui.header.buffer.bytes to 65536 in storm.yaml. More details are on [STORM-633](https://issues.apache.org/jira/browse/STORM-633)
+
+
+## UI / DRPC SSL 
+
+Both UI and DRPC allows users to configure ssl .
+
+### UI
+
+For UI users needs to set following config in storm.yaml. Generating keystores with proper keys and certs should be taken care by the user before this step.
+
+1. ui.https.port 
+2. ui.https.keystore.type (example "jks")
+3. ui.https.keystore.path (example "/etc/ssl/storm_keystore.jks")
+4. ui.https.keystore.password (keystore password)
+5. ui.https.key.password (private key password)
+
+optional config 
+6. ui.https.truststore.path (example "/etc/ssl/storm_truststore.jks")
+7. ui.https.truststore.password (truststore password)
+8. ui.https.truststore.type (example "jks")
+
+If users want to setup 2-way auth
+9. ui.https.want.client.auth (If this set to true server requests for client certifcate authentication, but keeps the connection if no authentication provided)
+10. ui.https.need.client.auth (If this set to true server requires client to provide authentication)
+
+
 
-Nimbus's Thrift port should be locked down as it can be used to control the entire
-cluster including running arbitrary user code on different nodes in the cluster.
-Ideally access to it is restricted to nodes within the cluster and possibly some gateway
-nodes that allow authorized users to log into them and run storm client commands.
 
 ### DRPC
+similarly to UI , users need to configure following for DRPC
+
+1. drpc.https.port 
+2. drpc.https.keystore.type (example "jks")
+3. drpc.https.keystore.path (example "/etc/ssl/storm_keystore.jks")
+4. drpc.https.keystore.password (keystore password)
+5. drpc.https.key.password (private key password)
+
+optional config 
+6. drpc.https.truststore.path (example "/etc/ssl/storm_truststore.jks")
+7. drpc.https.truststore.password (truststore password)
+8. drpc.https.truststore.type (example "jks")
+
+If users want to setup 2-way auth
+9. drpc.https.want.client.auth (If this set to true server requests for client certifcate authentication, but keeps the connection if no authentication provided)
+10. drpc.https.need.client.auth (If this set to true server requires client to provide authentication)
+
+
+
+
+
+## Authentication (Kerberos)
+
+Storm offers pluggable authentication support through thrift and SASL.  This
+example only goes off of Kerberos as it is a common setup for most big data
+projects.
+
+Setting up a KDC and configuring kerberos on each node is beyond the scope of
+this document and it is assumed that you have done that already.
+
+### Create Headless Principals and keytabs
+
+Each Zookeeper Server, Nimbus, and DRPC server will need a service principal, which, by convention, includes the FQDN of the host it will run on.  Be aware that the zookeeper user *MUST* be zookeeper.  
+The supervisors and UI also need a principal to run as, but because they are outgoing connections they do not need to be service principals. 
+The following is an example of how to setup kerberos principals, but the
+details may vary depending on your KDC and OS.
+
+
+```bash
+# Zookeeper (Will need one of these for each box in teh Zk ensamble)
+sudo kadmin.local -q 'addprinc zookeeper/zk1.example.com@STORM.EXAMPLE.COM'
+sudo kadmin.local -q "ktadd -k /tmp/zk.keytab  zookeeper/zk1.example.com@STORM.EXAMPLE.COM"
+# Nimbus and DRPC
+sudo kadmin.local -q 'addprinc storm/storm.example.com@STORM.EXAMPLE.COM'
+sudo kadmin.local -q "ktadd -k /tmp/storm.keytab storm/storm.example.com@STORM.EXAMPLE.COM"
+# All UI logviewer and Supervisors
+sudo kadmin.local -q 'addprinc storm@STORM.EXAMPLE.COM'
+sudo kadmin.local -q "ktadd -k /tmp/storm.keytab storm@STORM.EXAMPLE.COM"
+```
+
+be sure to distribute the keytab(s) to the appropriate boxes and set the FS permissions so that only the headless user running ZK, or storm has access to them.
+
+#### Storm Kerberos Configuration
+
+Both storm and Zookeeper use jaas configuration files to log the user in.
+Each jaas file may have multiple sections for different interfaces being used.
+
+To enable Kerberos authentication in storm you need to set the following storm.yaml configs
+```yaml
+storm.thrift.transport: "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin"
+java.security.auth.login.config: "/path/to/jaas.conf"
+```
+
+Nimbus and the supervisor processes will also connect to ZooKeeper(ZK) and we want to configure them to use Kerberos for authentication with ZK. To do this append 
+```
+-Djava.security.auth.login.config=/path/to/jaas.conf
+```
+
+to the childopts of nimbus, ui, and supervisor.  Here is an example given the default childopts settings at the time of writing:
+
+```yaml
+nimbus.childopts: "-Xmx1024m -Djava.security.auth.login.config=/path/to/jaas.conf"
+ui.childopts: "-Xmx768m -Djava.security.auth.login.config=/path/to/jaas.conf"
+supervisor.childopts: "-Xmx256m -Djava.security.auth.login.config=/path/to/jaas.conf"
+```
+
+The jaas.conf file should look something like the following for the storm nodes.
+The StormServer section is used by nimbus and the DRPC Nodes.  It does not need to be included on supervisor nodes.
+The StormClient section is used by all storm clients that want to talk to nimbus, including the ui, logviewer, and supervisor.  We will use this section on the gateways as well but the structure of that will be a bit different.
+The Client section is used by processes wanting to talk to zookeeper and really only needs to be included with nimbus and the supervisors.
+The Server section is used by the zookeeper servers.
+Having unused sections in the jaas is not a problem.
+
+```
+StormServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="$keytab"
+   storeKey=true
+   useTicketCache=false
+   principal="$principal";
+};
+StormClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="$keytab"
+   storeKey=true
+   useTicketCache=false
+   serviceName="$nimbus_user"
+   principal="$principal";
+};
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="$keytab"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="$principal";
+};
+Server {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="$keytab"
+   storeKey=true
+   useTicketCache=false
+   principal="$principal";
+};
+```
+
+The following is an example based off of the keytabs generated
+```
+StormServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="/keytabs/storm.keytab"
+   storeKey=true
+   useTicketCache=false
+   principal="storm/storm.example.com@STORM.EXAMPLE.COM";
+};
+StormClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="/keytabs/storm.keytab"
+   storeKey=true
+   useTicketCache=false
+   serviceName="storm"
+   principal="storm@STORM.EXAMPLE.COM";
+};
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="/keytabs/storm.keytab"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="storm@STORM.EXAMPLE.COM";
+};
+Server {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="/keytabs/zk.keytab"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="zookeeper/zk1.example.com@STORM.EXAMPLE.COM";
+};
+```
+
+Nimbus also will translate the principal into a local user name, so that other services can use this name.  To configure this for Kerberos authentication set
+
+```
+storm.principal.tolocal: "backtype.storm.security.auth.KerberosPrincipalToLocal"
+```
 
-Each DRPC server has two different ports.  The invocations port is accessed by worker
-processes within the cluster.  The other port is accessed by external clients that
-want to query the topology.  The external port should be restricted to hosts that you
-want to be able to do queries.
+This only needs to be done on nimbus, but it will not hurt on any node.
+We also need to inform the topology who the supervisor daemon and the nimbus daemon are running as from a ZooKeeper perspective.
 
-### Supervisors
+```
+storm.zookeeper.superACL: "sasl:${nimbus-user}"
+```
 
-Supervisors are only clients they are not servers, and as such don't need special restrictions.
+Here *nimbus-user* is the Kerberos user that nimbus uses to authenticate with ZooKeeper.  If ZooKeeeper is stripping host and realm then this needs to have host and realm stripped too.
 
-### Workers
+#### ZooKeeper Ensemble
 
-Worker processes receive data from each other.  There is the option to encrypt this data using
-Blowfish by setting `topology.tuple.serializer` to `backtype.storm.security.serialization.BlowfishTupleSerializer`
-and setting `topology.tuple.serializer.blowfish.key` to a secret key you want your topology to use.
+Complete details of how to setup a secure ZK are beyond the scope of this document.  But in general you want to enable SASL authentication on each server, and optionally strip off host and realm
 
-### Zookeeper
+```
+authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+kerberos.removeHostFromPrincipal = true
+kerberos.removeRealmFromPrincipal = true
+```
+
+And you want to include the jaas.conf on the command line when launching the server so it can use it can find the keytab.
+```
+-Djava.security.auth.login.config=/jaas/zk_jaas.conf
+```
+
+#### Gateways
+
+Ideally the end user will only need to run kinit before interacting with storm.  To make this happen seamlessly we need the default jaas.conf on the gateways to be something like
+
+```
+StormClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   doNotPrompt=false
+   useTicketCache=true
+   serviceName="$nimbus_user";
+};
+```
+
+The end user can override this if they have a headless user that has a keytab.
+
+### Authorization Setup
+
+*Authentication* does the job of verifying who the user is, but we also need *authorization* to do the job of enforcing what each user can do.
+
+The preferred authorization plug-in for nimbus is The *SimpleACLAuthorizer*.  To use the *SimpleACLAuthorizer*, set the following:
+
+```yaml
+nimbus.authorizer: "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer"
+```
+
+DRPC has a separate authorizer configuration for it.  Do not use SimpleACLAuthorizer for DRPC.
+
+The *SimpleACLAuthorizer* plug-in needs to know who the supervisor users are, and it needs to know about all of the administrator users, including the user running the ui daemon. 
+
+These are set through *nimbus.supervisor.users* and *nimbus.admins* respectively.  Each can either be a full Kerberos principal name, or the name of the user with host and realm stripped off.
+
+The Log servers have their own authorization configurations.  These are set through *logs.users* and *logs.groups*.  These should be set to the admin users or groups for all of the nodes in the cluster.  
+
+When a topology is submitted, the submitting user can specify users in this list as well.  The users and groups specified-in addition to the users in the cluster-wide setting-will be granted access to the submitted topology's worker logs in the logviewers.
+
+### Supervisors headless User and group Setup
+
+To ensure isolation of users in multi-tenancy, there is need to run supervisors and headless user and group unique to execution on the supervisor nodes.  To enable this follow below steps.
+1. Add headlessuser to all supervisor hosts.
+2. Create unique group and make it the primary group for the headless user on the supervisor nodes.
+3. The set following properties on storm for these supervisor nodes.
+
+### Multi-tenant Scheduler
+
+To support multi-tenancy better we have written a new scheduler.  To enable this scheduler set.
+```yaml
+storm.scheduler: "backtype.storm.scheduler.multitenant.MultitenantScheduler"
+```
+Be aware that many of the features of this scheduler rely on storm authentication.  Without them the scheduler will not know what the user is and will not isolate topologies properly.
+
+The goal of the multi-tenant scheduler is to provide a way to isolate topologies from one another, but to also limit the resources that an individual user can have in the cluster.
+
+The scheduler currently has one config that can be set either through =storm.yaml= or through a separate config file called =multitenant-scheduler.yaml= that should be placed in the same directory as =storm.yaml=.  It is preferable to use =multitenant-scheduler.yaml= because it can be updated without needing to restart nimbus.
+
+There is currently only one config in =multitenant-scheduler.yaml=, =multitenant.scheduler.user.pools= is a map from the user name, to the maximum number of nodes that user is guaranteed to be able to use for their topologies.
+
+For example:
+
+```yaml
+multitenant.scheduler.user.pools: 
+    "evans": 10
+    "derek": 10
+```
+
+### Run worker processes as user who submitted the topology
+By default storm runs workers as the user that is running the supervisor.  This is not ideal for security.  To make storm run the topologies as the user that launched them set.
+
+```yaml
+supervisor.run.worker.as.user: true
+```
+
+There are several files that go along with this that are needed to be configured properly to make storm secure.
+
+The worker-launcher executable is a special program that allows the supervisor to launch workers as different users.  For this to work it needs to be owned by root, but with the group set to be a group that only teh supervisor headless user is a part of.
+It also needs to have 6550 permissions.
+There is also a worker-launcher.cfg file, usually located under /etc/ that should look something like the following
+
+```
+storm.worker-launcher.group=$(worker_launcher_group)
+min.user.id=$(min_user_id)
+```
+where worker_launcher_group is the same group the supervisor is a part of, and min.user.id is set to the first real user id on the system.
+This config file also needs to be owned by root and not have world or group write permissions.
+
+### Impersonating a user
+A storm client may submit requests on behalf of another user. For example, if a `userX` submits an oozie workflow and as part of workflow execution if user `oozie` wants to submit a topology on behalf of `userX`
+it can do so by leveraging the impersonation feature.In order to submit topology as some other user , you can use `StormSubmitter.submitTopologyAs` API. Alternatively you can use `NimbusClient.getConfiguredClientAs` 
+to get a nimbus client as some other user and perform any nimbus action(i.e. kill/rebalance/activate/deactivate) using this client. 
+
+To ensure only authorized users can perform impersonation you should start nimbus with `nimbus.impersonation.authorizer` set to `backtype.storm.security.auth.authorizer.ImpersonationAuthorizer`. 
+The `ImpersonationAuthorizer` uses `nimbus.impersonation.acl` as the acl to authorize users. Following is a sample nimbus config for supporting impersonation:
+
+```yaml
+nimbus.impersonation.authorizer: backtype.storm.security.auth.authorizer.ImpersonationAuthorizer
+nimbus.impersonation.acl:
+    impersonating_user1:
+        hosts:
+            [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users]
+        groups:
+            [comma separated list of groups whose users impersonating_user1 is allowed to impersonate]
+    impersonating_user2:
+        hosts:
+            [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users]
+        groups:
+            [comma separated list of groups whose users impersonating_user2 is allowed to impersonate]
+```
+
+To support the oozie use case following config can be supplied:
+```yaml
+nimbus.impersonation.acl:
+    oozie:
+        hosts:
+            [oozie-host1, oozie-host2, 127.0.0.1]
+        groups:
+            [some-group-that-userX-is-part-of]
+```
+
+### Automatic Credentials Push and Renewal
+Individual topologies have the ability to push credentials (tickets and tokens) to workers so that they can access secure services.  Exposing this to all of the users can be a pain for them.
+To hide this from them in the common case plugins can be used to populate the credentials, unpack them on the other side into a java Subject, and also allow Nimbus to renew the credentials if needed.
+These are controlled by the following configs. topology.auto-credentials is a list of java plugins, all of which must implement IAutoCredentials interface, that populate the credentials on gateway 
+and unpack them on the worker side. On a kerberos secure cluster they should be set by default to point to backtype.storm.security.auth.kerberos.AutoTGT.  
+nimbus.credential.renewers.classes should also be set to this value so that nimbus can periodically renew the TGT on behalf of the user.
+
+nimbus.credential.renewers.freq.secs controls how often the renewer will poll to see if anything needs to be renewed, but the default should be fine.
+
+In addition Nimbus itself can be used to get credentials on behalf of the user submitting topologies. This can be configures using nimbus.autocredential.plugins.classes which is a list 
+of fully qualified class names ,all of which must implement INimbusCredentialPlugin.  Nimbus will invoke the populateCredentials method of all the configured implementation as part of topology
+submission. You should use this config with topology.auto-credentials and nimbus.credential.renewers.classes so the credentials can be populated on worker side and nimbus can automatically renew
+them. Currently there are 2 examples of using this config, AutoHDFS and AutoHBase which auto populates hdfs and hbase delegation tokens for topology submitter so they don't have to distribute keytabs
+on all possible worker hosts.
+
+### Limits
+By default storm allows any sized topology to be submitted. But ZK and others have limitations on how big a topology can actually be.  The following configs allow you to limit the maximum size a topology can be.
+
+| YAML Setting | Description |
+|------------|----------------------|
+| nimbus.slots.perTopology | The maximum number of slots/workers a topology can use. |
+| nimbus.executors.perTopology | The maximum number of executors/threads a topology can use. |
+
+### Log Cleanup
+The Logviewer daemon now is also responsible for cleaning up old log files for dead topologies.
+
+| YAML Setting | Description |
+|--------------|-------------------------------------|
+| logviewer.cleanup.age.mins | How old (by last modification time) must a worker's log be before that log is considered for clean-up. (Living workers' logs are never cleaned up by the logviewer: Their logs are rolled via logback.) |
+| logviewer.cleanup.interval.secs | Interval of time in seconds that the logviewer cleans up worker logs. |
+
+
+### Allowing specific users or groups to access storm
+
+ With SimpleACLAuthorizer any user with valid kerberos ticket can deploy a topology or do further operations such as activate, deactivate , access cluster information.
+ One can restrict this access by specifying nimbus.users or nimbus.groups. If nimbus.users configured only the users in the list can deploy a topology or access cluster.
+ Similarly nimbus.groups restrict storm cluster access to users who belong to those groups.
+ 
+ To configure specify the following config in storm.yaml
+
+```yaml
+nimbus.users: 
+   - "testuser"
+```
+
+or 
+
+```yaml
+nimbus.groups: 
+   - "storm"
+```
+ 
+
+### DRPC
+Hopefully more on this soon
 
-Zookeeper uses other ports for communications within the ensemble the details of which
-are beyond the scope of this document.  You should look at restricting Zookeeper access
-as well, because storm does not set up any ACLs for the data it write to Zookeeper.
 
-  

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/STORM-UI-REST-API.md
----------------------------------------------------------------------
diff --git a/docs/STORM-UI-REST-API.md b/docs/STORM-UI-REST-API.md
index 2109ab2..26dbf07 100644
--- a/docs/STORM-UI-REST-API.md
+++ b/docs/STORM-UI-REST-API.md
@@ -1,5 +1,5 @@
 ---
-title: Storm UI REST API
+title: Storm REST API
 layout: documentation
 documentation: true
 ---

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Serialization.md
----------------------------------------------------------------------
diff --git a/docs/Serialization.md b/docs/Serialization.md
index 4c271b4..ac8efe1 100644
--- a/docs/Serialization.md
+++ b/docs/Serialization.md
@@ -1,11 +1,13 @@
 ---
+title: Serialization
 layout: documentation
+documentation: true
 ---
 This page is about how the serialization system in Storm works for versions 0.6.0 and onwards. Storm used a different serialization system prior to 0.6.0 which is documented on [Serialization (prior to 0.6.0)](Serialization-\(prior-to-0.6.0\).html). 
 
 Tuples can be comprised of objects of any types. Since Storm is a distributed system, it needs to know how to serialize and deserialize objects when they're passed between tasks.
 
-Storm uses [Kryo](http://code.google.com/p/kryo/) for serialization. Kryo is a flexible and fast serialization library that produces small serializations.
+Storm uses [Kryo](https://github.com/EsotericSoftware/kryo) for serialization. Kryo is a flexible and fast serialization library that produces small serializations.
 
 By default, Storm can serialize primitive types, strings, byte arrays, ArrayList, HashMap, HashSet, and the Clojure collection types. If you want to use another type in your tuples, you'll need to register a custom serializer.
 
@@ -21,12 +23,12 @@ Finally, another reason for using dynamic typing is so Storm can be used in a st
 
 ### Custom serialization
 
-As mentioned, Storm uses Kryo for serialization. To implement custom serializers, you need to register new serializers with Kryo. It's highly recommended that you read over [Kryo's home page](http://code.google.com/p/kryo/) to understand how it handles custom serialization.
+As mentioned, Storm uses Kryo for serialization. To implement custom serializers, you need to register new serializers with Kryo. It's highly recommended that you read over [Kryo's home page](https://github.com/EsotericSoftware/kryo) to understand how it handles custom serialization.
 
 Adding custom serializers is done through the "topology.kryo.register" property in your topology config. It takes a list of registrations, where each registration can take one of two forms:
 
 1. The name of a class to register. In this case, Storm will use Kryo's `FieldsSerializer` to serialize the class. This may or may not be optimal for the class -- see the Kryo docs for more details.
-2. A map from the name of a class to register to an implementation of [com.esotericsoftware.kryo.Serializer](http://code.google.com/p/kryo/source/browse/trunk/src/com/esotericsoftware/kryo/Serializer.java).
+2. A map from the name of a class to register to an implementation of [com.esotericsoftware.kryo.Serializer](https://github.com/EsotericSoftware/kryo/blob/master/src/com/esotericsoftware/kryo/Serializer.java).
 
 Let's look at an example.
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Setting-up-a-Storm-cluster.md
----------------------------------------------------------------------
diff --git a/docs/Setting-up-a-Storm-cluster.md b/docs/Setting-up-a-Storm-cluster.md
index e139523..2b58703 100644
--- a/docs/Setting-up-a-Storm-cluster.md
+++ b/docs/Setting-up-a-Storm-cluster.md
@@ -1,5 +1,7 @@
 ---
+title: Setting up a Storm Cluster
 layout: documentation
+documentation: true
 ---
 This page outlines the steps for getting a Storm cluster up and running. If you're on AWS, you should check out the [storm-deploy](https://github.com/nathanmarz/storm-deploy/wiki) project. [storm-deploy](https://github.com/nathanmarz/storm-deploy/wiki) completely automates the provisioning, configuration, and installation of Storm clusters on EC2. It also sets up Ganglia for you so you can monitor CPU, disk, and network usage.
 
@@ -34,11 +36,11 @@ These are the versions of the dependencies that have been tested with Storm. Sto
 
 ### Download and extract a Storm release to Nimbus and worker machines
 
-Next, download a Storm release and extract the zip file somewhere on Nimbus and each of the worker machines. The Storm releases can be downloaded [from here](http://github.com/apache/incubator-storm/downloads).
+Next, download a Storm release and extract the zip file somewhere on Nimbus and each of the worker machines. The Storm releases can be downloaded [from here](http://github.com/apache/storm/releases).
 
 ### Fill in mandatory configurations into storm.yaml
 
-The Storm release contains a file at `conf/storm.yaml` that configures the Storm daemons. You can see the default configuration values [here](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml). storm.yaml overrides anything in defaults.yaml. There's a few configurations that are mandatory to get a working cluster:
+The Storm release contains a file at `conf/storm.yaml` that configures the Storm daemons. You can see the default configuration values [here]({{page.git-blob-base}}/conf/defaults.yaml). storm.yaml overrides anything in defaults.yaml. There's a few configurations that are mandatory to get a working cluster:
 
 1) **storm.zookeeper.servers**: This is a list of the hosts in the Zookeeper cluster for your Storm cluster. It should look something like:
 
@@ -50,17 +52,25 @@ storm.zookeeper.servers:
 
 If the port that your Zookeeper cluster uses is different than the default, you should set **storm.zookeeper.port** as well.
 
-2) **storm.local.dir**: The Nimbus and Supervisor daemons require a directory on the local disk to store small amounts of state (like jars, confs, and things like that). You should create that directory on each machine, give it proper permissions, and then fill in the directory location using this config. For example:
+2) **storm.local.dir**: The Nimbus and Supervisor daemons require a directory on the local disk to store small amounts of state (like jars, confs, and things like that).
+ You should create that directory on each machine, give it proper permissions, and then fill in the directory location using this config. For example:
 
 ```yaml
 storm.local.dir: "/mnt/storm"
 ```
+If you run storm on windows,it could be:
+```yaml
+storm.local.dir: "C:\\storm-local"
+```
+If you use a relative path,it will be relative to where you installed storm(STORM_HOME).
+You can leave it empty with default value `$STORM_HOME/storm-local`
 
-3) **nimbus.host**: The worker nodes need to know which machine is the master in order to download topology jars and confs. For example:
+3) **nimbus.seeds**: The worker nodes need to know which machines are the candidate of master in order to download topology jars and confs. For example:
 
 ```yaml
-nimbus.host: "111.222.333.44"
+nimbus.seeds: ["111.222.333.44"]
 ```
+You're encouraged to fill out the value to list of **machine's FQDN**. If you want to set up Nimbus H/A, you have to address all machines' FQDN which run nimbus. You may want to leave it to default value when you just want to set up 'pseudo-distributed' cluster, but you're still encouraged to fill out FQDN.
 
 4) **supervisor.slots.ports**: For each worker machine, you configure how many workers run on that machine with this config. Each worker uses a single port for receiving messages, and this setting defines which ports are open for use. If you define five ports here, then Storm will allocate up to five workers to run on this machine. If you define three ports, Storm will only run up to three. By default, this setting is configured to run 4 workers on the ports 6700, 6701, 6702, and 6703. For example:
 
@@ -72,12 +82,36 @@ supervisor.slots.ports:
     - 6703
 ```
 
+### Monitoring Health of Supervisors
+
+Storm provides a mechanism by which administrators can configure the supervisor to run administrator supplied scripts periodically to determine if a node is healthy or not. Administrators can have the supervisor determine if the node is in a healthy state by performing any checks of their choice in scripts located in storm.health.check.dir. If a script detects the node to be in an unhealthy state, it must print a line to standard output beginning with the string ERROR. The supervisor will periodically run the scripts in the health check dir and check the output. If the script’s output contains the string ERROR, as described above, the supervisor will shut down any workers and exit. 
+
+If the supervisor is running with supervision "/bin/storm node-health-check" can be called to determine if the supervisor should be launched or if the node is unhealthy.
+
+The health check directory location can be configured with:
+
+```yaml
+storm.health.check.dir: "healthchecks"
+
+```
+The scripts must have execute permissions.
+The time to allow any given healthcheck script to run before it is marked failed due to timeout can be configured with:
+
+```yaml
+storm.health.check.timeout.ms: 5000
+```
+
+### Configure external libraries and environmental variables (optional)
+
+If you need support from external libraries or custom plugins, you can place such jars into the extlib/ and extlib-daemon/ directories. Note that the extlib-daemon/ directory stores jars used only by daemons (Nimbus, Supervisor, DRPC, UI, Logviewer), e.g., HDFS and customized scheduling libraries. Accordingly, two environmental variables STORM_EXT_CLASSPATH and STORM_EXT_CLASSPATH_DAEMON can be configured by users for including the external classpath and daemon-only external classpath.
+
+
 ### Launch daemons under supervision using "storm" script and a supervisor of your choice
 
 The last step is to launch all the Storm daemons. It is critical that you run each of these daemons under supervision. Storm is a __fail-fast__ system which means the processes will halt whenever an unexpected error is encountered. Storm is designed so that it can safely halt at any point and recover correctly when the process is restarted. This is why Storm keeps no state in-process -- if Nimbus or the Supervisors restart, the running topologies are unaffected. Here's how to run the Storm daemons:
 
 1. **Nimbus**: Run the command "bin/storm nimbus" under supervision on the master machine.
 2. **Supervisor**: Run the command "bin/storm supervisor" under supervision on each worker machine. The supervisor daemon is responsible for starting and stopping worker processes on that machine.
-3. **UI**: Run the Storm UI (a site you can access from the browser that gives diagnostics on the cluster and topologies) by running the command "bin/storm ui" under supervision. The UI can be accessed by navigating your web browser to http://{nimbus host}:8080. 
+3. **UI**: Run the Storm UI (a site you can access from the browser that gives diagnostics on the cluster and topologies) by running the command "bin/storm ui" under supervision. The UI can be accessed by navigating your web browser to http://{ui host}:8080. 
 
 As you can see, running the daemons is very straightforward. The daemons will log to the logs/ directory in wherever you extracted the Storm release.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Setting-up-development-environment.md
----------------------------------------------------------------------
diff --git a/docs/Setting-up-development-environment.md b/docs/Setting-up-development-environment.md
index 07ba670..bfa98a2 100644
--- a/docs/Setting-up-development-environment.md
+++ b/docs/Setting-up-development-environment.md
@@ -1,9 +1,11 @@
 ---
+title: Setting Up a Development Environment
 layout: documentation
+documentation: true
 ---
 This page outlines what you need to do to get a Storm development environment set up. In summary, the steps are:
 
-1. Download a [Storm release](/releases.html) , unpack it, and put the unpacked `bin/` directory on your PATH
+1. Download a [Storm release](..//downloads.html) , unpack it, and put the unpacked `bin/` directory on your PATH
 2. To be able to start and stop topologies on a remote cluster, put the cluster information in `~/.storm/storm.yaml`
 
 More detail on each of these steps is below.
@@ -18,7 +20,7 @@ Let's quickly go over the relationship between your machine and a remote cluster
 
 ### Installing a Storm release locally
 
-If you want to be able to submit topologies to a remote cluster from your machine, you should install a Storm release locally. Installing a Storm release will give you the `storm` client that you can use to interact with remote clusters. To install Storm locally, download a release [from here](/releases.html) and unzip it somewhere on your computer. Then add the unpacked `bin/` directory onto your `PATH` and make sure the `bin/storm` script is executable.
+If you want to be able to submit topologies to a remote cluster from your machine, you should install a Storm release locally. Installing a Storm release will give you the `storm` client that you can use to interact with remote clusters. To install Storm locally, download a release [from here](https://github.com/apache/storm/releases) and unzip it somewhere on your computer. Then add the unpacked `bin/` directory onto your `PATH` and make sure the `bin/storm` script is executable.
 
 Installing a Storm release locally is only for interacting with remote clusters. For developing and testing topologies in local mode, it is recommended that you use Maven to include Storm as a dev dependency for your project. You can read more about using Maven for this purpose on [Maven](Maven.html). 
 
@@ -27,13 +29,5 @@ Installing a Storm release locally is only for interacting with remote clusters.
 The previous step installed the `storm` client on your machine which is used to communicate with remote Storm clusters. Now all you have to do is tell the client which Storm cluster to talk to. To do this, all you have to do is put the host address of the master in the `~/.storm/storm.yaml` file. It should look something like this:
 
 ```
-nimbus.host: "123.45.678.890"
+nimbus.seeds: ["123.45.678.890"]
 ```
-
-Alternatively, if you use the [storm-deploy](https://github.com/nathanmarz/storm-deploy) project to provision Storm clusters on AWS, it will automatically set up your ~/.storm/storm.yaml file. You can manually attach to a Storm cluster (or switch between multiple clusters) using the "attach" command, like so:
-
-```
-lein run :deploy --attach --name mystormcluster
-```
-
-More information is on the storm-deploy [wiki](https://github.com/nathanmarz/storm-deploy/wiki)

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Spout-implementations.md
----------------------------------------------------------------------
diff --git a/docs/Spout-implementations.md b/docs/Spout-implementations.md
index 10ddd42..f52e662 100644
--- a/docs/Spout-implementations.md
+++ b/docs/Spout-implementations.md
@@ -1,5 +1,7 @@
 ---
+title: Spout Implementations
 layout: documentation
+documentation: true
 ---
 * [storm-kestrel](https://github.com/nathanmarz/storm-kestrel): Adapter to use Kestrel as a spout
 * [storm-amqp-spout](https://github.com/rapportive-oss/storm-amqp-spout): Adapter to use AMQP source as a spout

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Structure-of-the-codebase.md
----------------------------------------------------------------------
diff --git a/docs/Structure-of-the-codebase.md b/docs/Structure-of-the-codebase.md
index 8ac66f4..573a93c 100644
--- a/docs/Structure-of-the-codebase.md
+++ b/docs/Structure-of-the-codebase.md
@@ -1,5 +1,7 @@
 ---
+title: Structure of the Codebase
 layout: documentation
+documentation: true
 ---
 There are three distinct layers to Storm's codebase.
 
@@ -13,18 +15,18 @@ The following sections explain each of these layers in more detail.
 
 ### storm.thrift
 
-The first place to look to understand the structure of Storm's codebase is the [storm.thrift](https://github.com/apache/incubator-storm/blob/master/storm-core/src/storm.thrift) file.
+The first place to look to understand the structure of Storm's codebase is the [storm.thrift]({{page.git-blob-base}}/storm-core/src/storm.thrift) file.
 
 Storm uses [this fork](https://github.com/nathanmarz/thrift/tree/storm) of Thrift (branch 'storm') to produce the generated code. This "fork" is actually Thrift 7 with all the Java packages renamed to be `org.apache.thrift7`. Otherwise, it's identical to Thrift 7. This fork was done because of the lack of backwards compatibility in Thrift and the need for many people to use other versions of Thrift in their Storm topologies.
 
-Every spout or bolt in a topology is given a user-specified identifier called the "component id". The component id is used to specify subscriptions from a bolt to the output streams of other spouts or bolts. A [StormTopology](https://github.com/apache/incubator-storm/blob/master/storm-core/src/storm.thrift#L91) structure contains a map from component id to component for each type of component (spouts and bolts).
+Every spout or bolt in a topology is given a user-specified identifier called the "component id". The component id is used to specify subscriptions from a bolt to the output streams of other spouts or bolts. A [StormTopology]({{page.git-blob-base}}/storm-core/src/storm.thrift#L91) structure contains a map from component id to component for each type of component (spouts and bolts).
 
-Spouts and bolts have the same Thrift definition, so let's just take a look at the [Thrift definition for bolts](https://github.com/apache/incubator-storm/blob/master/storm-core/src/storm.thrift#L79). It contains a `ComponentObject` struct and a `ComponentCommon` struct.
+Spouts and bolts have the same Thrift definition, so let's just take a look at the [Thrift definition for bolts]({{page.git-blob-base}}/storm-core/src/storm.thrift#L79). It contains a `ComponentObject` struct and a `ComponentCommon` struct.
 
 The `ComponentObject` defines the implementation for the bolt. It can be one of three types:
 
-1. A serialized java object (that implements [IBolt](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/task/IBolt.java))
-2. A `ShellComponent` object that indicates the implementation is in another language. Specifying a bolt this way will cause Storm to instantiate a [ShellBolt](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/task/ShellBolt.java) object to handle the communication between the JVM-based worker process and the non-JVM-based implementation of the component.
+1. A serialized java object (that implements [IBolt]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/task/IBolt.java))
+2. A `ShellComponent` object that indicates the implementation is in another language. Specifying a bolt this way will cause Storm to instantiate a [ShellBolt]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/task/ShellBolt.java) object to handle the communication between the JVM-based worker process and the non-JVM-based implementation of the component.
 3. A `JavaObject` structure which tells Storm the classname and constructor arguments to use to instantiate that bolt. This is useful if you want to define a topology in a non-JVM language. This way, you can make use of JVM-based spouts and bolts without having to create and serialize a Java object yourself.
 
 `ComponentCommon` defines everything else for this component. This includes:
@@ -32,9 +34,9 @@ The `ComponentObject` defines the implementation for the bolt. It can be one of
 1. What streams this component emits and the metadata for each stream (whether it's a direct stream, the fields declaration)
 2. What streams this component consumes (specified as a map from component_id:stream_id to the stream grouping to use)
 3. The parallelism for this component
-4. The component-specific [configuration](https://github.com/apache/incubator-storm/wiki/Configuration) for this component
+4. The component-specific [configuration](Configuration.html) for this component
 
-Note that the structure spouts also have a `ComponentCommon` field, and so spouts can also have declarations to consume other input streams. Yet the Storm Java API does not provide a way for spouts to consume other streams, and if you put any input declarations there for a spout you would get an error when you tried to submit the topology. The reason that spouts have an input declarations field is not for users to use, but for Storm itself to use. Storm adds implicit streams and bolts to the topology to set up the [acking framework](https://github.com/apache/incubator-storm/wiki/Guaranteeing-message-processing), and two of these implicit streams are from the acker bolt to each spout in the topology. The acker sends "ack" or "fail" messages along these streams whenever a tuple tree is detected to be completed or failed. The code that transforms the user's topology into the runtime topology is located [here](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/back
 type/storm/daemon/common.clj#L279).
+Note that the structure spouts also have a `ComponentCommon` field, and so spouts can also have declarations to consume other input streams. Yet the Storm Java API does not provide a way for spouts to consume other streams, and if you put any input declarations there for a spout you would get an error when you tried to submit the topology. The reason that spouts have an input declarations field is not for users to use, but for Storm itself to use. Storm adds implicit streams and bolts to the topology to set up the [acking framework](https://github.com/apache/storm/wiki/Guaranteeing-message-processing), and two of these implicit streams are from the acker bolt to each spout in the topology. The acker sends "ack" or "fail" messages along these streams whenever a tuple tree is detected to be completed or failed. The code that transforms the user's topology into the runtime topology is located [here]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/common.clj#L279).
 
 ### Java interfaces
 
@@ -49,92 +51,92 @@ The strategy for the majority of the interfaces is to:
 1. Specify the interface using a Java interface
 2. Provide a base class that provides default implementations when appropriate
 
-You can see this strategy at work with the [BaseRichSpout](javadocs/backtype/storm/topology/base/BaseRichSpout.html) class. 
+You can see this strategy at work with the [BaseRichSpout](javadocs/backtype/storm/topology/base/BaseRichSpout.html) class.
 
 Spouts and bolts are serialized into the Thrift definition of the topology as described above. 
 
-One subtle aspect of the interfaces is the difference between `IBolt` and `ISpout` vs. `IRichBolt` and `IRichSpout`. The main difference between them is the addition of the `declareOutputFields` method in the "Rich" versions of the interfaces. The reason for the split is that the output fields declaration for each output stream needs to be part of the Thrift struct (so it can be specified from any language), but as a user you want to be able to declare the streams as part of your class. What `TopologyBuilder` does when constructing the Thrift representation is call `declareOutputFields` to get the declaration and convert it into the Thrift structure. The conversion happens [at this portion](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/topology/TopologyBuilder.java#L205) of the `TopologyBuilder` code. 
+One subtle aspect of the interfaces is the difference between `IBolt` and `ISpout` vs. `IRichBolt` and `IRichSpout`. The main difference between them is the addition of the `declareOutputFields` method in the "Rich" versions of the interfaces. The reason for the split is that the output fields declaration for each output stream needs to be part of the Thrift struct (so it can be specified from any language), but as a user you want to be able to declare the streams as part of your class. What `TopologyBuilder` does when constructing the Thrift representation is call `declareOutputFields` to get the declaration and convert it into the Thrift structure. The conversion happens [at this portion]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/topology/TopologyBuilder.java#L205) of the `TopologyBuilder` code.
 
 
 ### Implementation
 
 Specifying all the functionality via Java interfaces ensures that every feature of Storm is available via Java. Moreso, the focus on Java interfaces ensures that the user experience from Java-land is pleasant as well.
 
-The implementation of Storm, on the other hand, is primarily in Clojure. While the codebase is about 50% Java and 50% Clojure in terms of LOC, most of the implementation logic is in Clojure. There are two notable exceptions to this, and that is the [DRPC](https://github.com/apache/incubator-storm/wiki/Distributed-RPC) and [transactional topologies](https://github.com/apache/incubator-storm/wiki/Transactional-topologies) implementations. These are implemented purely in Java. This was done to serve as an illustration for how to implement a higher level abstraction on Storm. The DRPC and transactional topologies implementations are in the [backtype.storm.coordination](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/coordination), [backtype.storm.drpc](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/drpc), and [backtype.storm.transactional](https://github.com/apache/incubator-storm/tree/master/storm-core/src
 /jvm/backtype/storm/transactional) packages.
+The implementation of Storm, on the other hand, is primarily in Clojure. While the codebase is about 50% Java and 50% Clojure in terms of LOC, most of the implementation logic is in Clojure. There are two notable exceptions to this, and that is the [DRPC](https://github.com/apache/storm/wiki/Distributed-RPC) and [transactional topologies](https://github.com/apache/storm/wiki/Transactional-topologies) implementations. These are implemented purely in Java. This was done to serve as an illustration for how to implement a higher level abstraction on Storm. The DRPC and transactional topologies implementations are in the [backtype.storm.coordination]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/coordination), [backtype.storm.drpc]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/drpc), and [backtype.storm.transactional]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/transactional) packages.
 
 Here's a summary of the purpose of the main Java packages and Clojure namespace:
 
 #### Java packages
 
-[backtype.storm.coordination](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/coordination): Implements the pieces required to coordinate batch-processing on top of Storm, which both DRPC and transactional topologies use. `CoordinatedBolt` is the most important class here.
+[backtype.storm.coordination]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/coordination): Implements the pieces required to coordinate batch-processing on top of Storm, which both DRPC and transactional topologies use. `CoordinatedBolt` is the most important class here.
 
-[backtype.storm.drpc](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/drpc): Implementation of the DRPC higher level abstraction
+[backtype.storm.drpc]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/drpc): Implementation of the DRPC higher level abstraction
 
-[backtype.storm.generated](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/generated): The generated Thrift code for Storm (generated using [this fork](https://github.com/nathanmarz/thrift) of Thrift, which simply renames the packages to org.apache.thrift7 to avoid conflicts with other Thrift versions)
+[backtype.storm.generated]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/generated): The generated Thrift code for Storm (generated using [this fork](https://github.com/nathanmarz/thrift) of Thrift, which simply renames the packages to org.apache.thrift7 to avoid conflicts with other Thrift versions)
 
-[backtype.storm.grouping](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/grouping): Contains interface for making custom stream groupings
+[backtype.storm.grouping]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/grouping): Contains interface for making custom stream groupings
 
-[backtype.storm.hooks](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/hooks): Interfaces for hooking into various events in Storm, such as when tasks emit tuples, when tuples are acked, etc. User guide for hooks is [here](https://github.com/apache/incubator-storm/wiki/Hooks).
+[backtype.storm.hooks]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/hooks): Interfaces for hooking into various events in Storm, such as when tasks emit tuples, when tuples are acked, etc. User guide for hooks is [here](https://github.com/apache/storm/wiki/Hooks).
 
-[backtype.storm.serialization](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/serialization): Implementation of how Storm serializes/deserializes tuples. Built on top of [Kryo](http://code.google.com/p/kryo/).
+[backtype.storm.serialization]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/serialization): Implementation of how Storm serializes/deserializes tuples. Built on top of [Kryo](http://code.google.com/p/kryo/).
 
-[backtype.storm.spout](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/spout): Definition of spout and associated interfaces (like the `SpoutOutputCollector`). Also contains `ShellSpout` which implements the protocol for defining spouts in non-JVM languages.
+[backtype.storm.spout]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/spout): Definition of spout and associated interfaces (like the `SpoutOutputCollector`). Also contains `ShellSpout` which implements the protocol for defining spouts in non-JVM languages.
 
-[backtype.storm.task](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/task): Definition of bolt and associated interfaces (like `OutputCollector`). Also contains `ShellBolt` which implements the protocol for defining bolts in non-JVM languages. Finally, `TopologyContext` is defined here as well, which is provided to spouts and bolts so they can get data about the topology and its execution at runtime.
+[backtype.storm.task]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/task): Definition of bolt and associated interfaces (like `OutputCollector`). Also contains `ShellBolt` which implements the protocol for defining bolts in non-JVM languages. Finally, `TopologyContext` is defined here as well, which is provided to spouts and bolts so they can get data about the topology and its execution at runtime.
 
-[backtype.storm.testing](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/testing): Contains a variety of test bolts and utilities used in Storm's unit tests.
+[backtype.storm.testing]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/testing): Contains a variety of test bolts and utilities used in Storm's unit tests.
 
-[backtype.storm.topology](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/topology): Java layer over the underlying Thrift structure to provide a clean, pure-Java API to Storm (users don't have to know about Thrift). `TopologyBuilder` is here as well as the helpful base classes for the different spouts and bolts. The slightly-higher level `IBasicBolt` interface is here, which is a simpler way to write certain kinds of bolts.
+[backtype.storm.topology]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/topology): Java layer over the underlying Thrift structure to provide a clean, pure-Java API to Storm (users don't have to know about Thrift). `TopologyBuilder` is here as well as the helpful base classes for the different spouts and bolts. The slightly-higher level `IBasicBolt` interface is here, which is a simpler way to write certain kinds of bolts.
 
-[backtype.storm.transactional](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/transactional): Implementation of transactional topologies.
+[backtype.storm.transactional]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/transactional): Implementation of transactional topologies.
 
-[backtype.storm.tuple](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/tuple): Implementation of Storm's tuple data model.
+[backtype.storm.tuple]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/tuple): Implementation of Storm's tuple data model.
 
-[backtype.storm.utils](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/tuple): Data structures and miscellaneous utilities used throughout the codebase.
+[backtype.storm.utils]({{page.git-tree-base}}/storm-core/src/jvm/backtype/storm/tuple): Data structures and miscellaneous utilities used throughout the codebase.
 
 
 #### Clojure namespaces
 
-[backtype.storm.bootstrap](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/bootstrap.clj): Contains a helpful macro to import all the classes and namespaces that are used throughout the codebase.
+[backtype.storm.bootstrap]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/bootstrap.clj): Contains a helpful macro to import all the classes and namespaces that are used throughout the codebase.
 
-[backtype.storm.clojure](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/clojure.clj): Implementation of the Clojure DSL for Storm.
+[backtype.storm.clojure]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/clojure.clj): Implementation of the Clojure DSL for Storm.
 
-[backtype.storm.cluster](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/cluster.clj): All Zookeeper logic used in Storm daemons is encapsulated in this file. This code manages how cluster state (like what tasks are running where, what spout/bolt each task runs as) is mapped to the Zookeeper "filesystem" API.
+[backtype.storm.cluster]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/cluster.clj): All Zookeeper logic used in Storm daemons is encapsulated in this file. This code manages how cluster state (like what tasks are running where, what spout/bolt each task runs as) is mapped to the Zookeeper "filesystem" API.
 
-[backtype.storm.command.*](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/command): These namespaces implement various commands for the `storm` command line client. These implementations are very short.
+[backtype.storm.command.*]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/command): These namespaces implement various commands for the `storm` command line client. These implementations are very short.
 
-[backtype.storm.config](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/config.clj): Implementation of config reading/parsing code for Clojure. Also has utility functions for determining what local path nimbus/supervisor/daemons should be using for various things. e.g. the `master-inbox` function will return the local path that Nimbus should use when jars are uploaded to it.
+[backtype.storm.config]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/config.clj): Implementation of config reading/parsing code for Clojure. Also has utility functions for determining what local path nimbus/supervisor/daemons should be using for various things. e.g. the `master-inbox` function will return the local path that Nimbus should use when jars are uploaded to it.
 
-[backtype.storm.daemon.acker](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/acker.clj): Implementation of the "acker" bolt, which is a key part of how Storm guarantees data processing.
+[backtype.storm.daemon.acker]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/acker.clj): Implementation of the "acker" bolt, which is a key part of how Storm guarantees data processing.
 
-[backtype.storm.daemon.common](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/common.clj): Implementation of common functions used in Storm daemons, like getting the id for a topology based on the name, mapping a user's topology into the one that actually executes (with implicit acking streams and acker bolt added - see `system-topology!` function), and definitions for the various heartbeat and other structures persisted by Storm.
+[backtype.storm.daemon.common]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/common.clj): Implementation of common functions used in Storm daemons, like getting the id for a topology based on the name, mapping a user's topology into the one that actually executes (with implicit acking streams and acker bolt added - see `system-topology!` function), and definitions for the various heartbeat and other structures persisted by Storm.
 
-[backtype.storm.daemon.drpc](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/drpc.clj): Implementation of the DRPC server for use with DRPC topologies.
+[backtype.storm.daemon.drpc]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/drpc.clj): Implementation of the DRPC server for use with DRPC topologies.
 
-[backtype.storm.daemon.nimbus](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/nimbus.clj): Implementation of Nimbus.
+[backtype.storm.daemon.nimbus]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/nimbus.clj): Implementation of Nimbus.
 
-[backtype.storm.daemon.supervisor](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/supervisor.clj): Implementation of Supervisor.
+[backtype.storm.daemon.supervisor]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/supervisor.clj): Implementation of Supervisor.
 
-[backtype.storm.daemon.task](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/task.clj): Implementation of an individual task for a spout or bolt. Handles message routing, serialization, stats collection for the UI, as well as the spout-specific and bolt-specific execution implementations.
+[backtype.storm.daemon.task]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/task.clj): Implementation of an individual task for a spout or bolt. Handles message routing, serialization, stats collection for the UI, as well as the spout-specific and bolt-specific execution implementations.
 
-[backtype.storm.daemon.worker](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/worker.clj): Implementation of a worker process (which will contain many tasks within). Implements message transferring and task launching.
+[backtype.storm.daemon.worker]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/worker.clj): Implementation of a worker process (which will contain many tasks within). Implements message transferring and task launching.
 
-[backtype.storm.event](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/event.clj): Implements a simple asynchronous function executor. Used in various places in Nimbus and Supervisor to make functions execute in serial to avoid any race conditions.
+[backtype.storm.event]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/event.clj): Implements a simple asynchronous function executor. Used in various places in Nimbus and Supervisor to make functions execute in serial to avoid any race conditions.
 
-[backtype.storm.log](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/log.clj): Defines the functions used to log messages to log4j.
+[backtype.storm.log]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/log.clj): Defines the functions used to log messages to log4j.
 
-[backtype.storm.messaging.*](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/messaging): Defines a higher level interface to implementing point to point messaging. In local mode Storm uses in-memory Java queues to do this; on a cluster, it uses ZeroMQ. The generic interface is defined in protocol.clj.
+[backtype.storm.messaging.*]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/messaging): Defines a higher level interface to implementing point to point messaging. In local mode Storm uses in-memory Java queues to do this; on a cluster, it uses ZeroMQ. The generic interface is defined in protocol.clj.
 
-[backtype.storm.stats](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/stats.clj): Implementation of stats rollup routines used when sending stats to ZK for use by the UI. Does things like windowed and rolling aggregations at multiple granularities.
+[backtype.storm.stats]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/stats.clj): Implementation of stats rollup routines used when sending stats to ZK for use by the UI. Does things like windowed and rolling aggregations at multiple granularities.
 
-[backtype.storm.testing](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/testing.clj): Implementation of facilities used to test Storm topologies. Includes time simulation, `complete-topology` for running a fixed set of tuples through a topology and capturing the output, tracker topologies for having fine grained control over detecting when a cluster is "idle", and other utilities.
+[backtype.storm.testing]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/testing.clj): Implementation of facilities used to test Storm topologies. Includes time simulation, `complete-topology` for running a fixed set of tuples through a topology and capturing the output, tracker topologies for having fine grained control over detecting when a cluster is "idle", and other utilities.
 
-[backtype.storm.thrift](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/thrift.clj): Clojure wrappers around the generated Thrift API to make working with Thrift structures more pleasant.
+[backtype.storm.thrift]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/thrift.clj): Clojure wrappers around the generated Thrift API to make working with Thrift structures more pleasant.
 
-[backtype.storm.timer](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/timer.clj): Implementation of a background timer to execute functions in the future or on a recurring interval. Storm couldn't use the [Timer](http://docs.oracle.com/javase/1.4.2/docs/api/java/util/Timer.html) class because it needed integration with time simulation in order to be able to unit test Nimbus and the Supervisor.
+[backtype.storm.timer]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/timer.clj): Implementation of a background timer to execute functions in the future or on a recurring interval. Storm couldn't use the [Timer](http://docs.oracle.com/javase/1.4.2/docs/api/java/util/Timer.html) class because it needed integration with time simulation in order to be able to unit test Nimbus and the Supervisor.
 
-[backtype.storm.ui.*](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/ui): Implementation of Storm UI. Completely independent from rest of code base and uses the Nimbus Thrift API to get data.
+[backtype.storm.ui.*]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/ui): Implementation of Storm UI. Completely independent from rest of code base and uses the Nimbus Thrift API to get data.
 
-[backtype.storm.util](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/util.clj): Contains generic utility functions used throughout the code base.
+[backtype.storm.util]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/util.clj): Contains generic utility functions used throughout the code base.
  
-[backtype.storm.zookeeper](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/zookeeper.clj): Clojure wrapper around the Zookeeper API and implements some "high-level" stuff like "mkdirs" and "delete-recursive".
+[backtype.storm.zookeeper]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/zookeeper.clj): Clojure wrapper around the Zookeeper API and implements some "high-level" stuff like "mkdirs" and "delete-recursive".

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Support-for-non-java-languages.md
----------------------------------------------------------------------
diff --git a/docs/Support-for-non-java-languages.md b/docs/Support-for-non-java-languages.md
index 724d106..d03dcad 100644
--- a/docs/Support-for-non-java-languages.md
+++ b/docs/Support-for-non-java-languages.md
@@ -1,5 +1,7 @@
 ---
+title: Support for Non-Java Languages
 layout: documentation
+documentation: true
 ---
 * [Scala DSL](https://github.com/velvia/ScalaStorm)
 * [JRuby DSL](https://github.com/colinsurprenant/storm-jruby)

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Transactional-topologies.md
----------------------------------------------------------------------
diff --git a/docs/Transactional-topologies.md b/docs/Transactional-topologies.md
index 1271a21..df2a0e3 100644
--- a/docs/Transactional-topologies.md
+++ b/docs/Transactional-topologies.md
@@ -1,5 +1,7 @@
 ---
+title: Transactional Topologies
 layout: documentation
+documentation: true
 ---
 **NOTE**: Transactional topologies have been deprecated -- use the [Trident](Trident-tutorial.html) framework instead.
 
@@ -75,11 +77,11 @@ When using transactional topologies, Storm does the following for you:
 3. *Fault detection:* Storm leverages the acking framework to efficiently determine when a batch has successfully processed, successfully committed, or failed. Storm will then replay batches appropriately. You don't have to do any acking or anchoring -- Storm manages all of this for you.
 4. *First class batch processing API*: Storm layers an API on top of regular bolts to allow for batch processing of tuples. Storm manages all the coordination for determining when a task has received all the tuples for that particular transaction. Storm will also take care of cleaning up any accumulated state for each transaction (like the partial counts).
 
-Finally, another thing to note is that transactional topologies require a source queue that can replay an exact batch of messages. Technologies like [Kestrel](https://github.com/robey/kestrel) can't do this. [Apache Kafka](http://incubator.apache.org/kafka/index.html) is a perfect fit for this kind of spout, and [storm-kafka](https://github.com/nathanmarz/storm-contrib/tree/master/storm-kafka) in [storm-contrib](https://github.com/nathanmarz/storm-contrib) contains a transactional spout implementation for Kafka.
+Finally, another thing to note is that transactional topologies require a source queue that can replay an exact batch of messages. Technologies like [Kestrel](https://github.com/robey/kestrel) can't do this. [Apache Kafka](http://incubator.apache.org/kafka/index.html) is a perfect fit for this kind of spout, and [storm-kafka](https://github.com/apache/storm/tree/master/external/storm-kafka) contains a transactional spout implementation for Kafka.
 
 ## The basics through example
 
-You build transactional topologies by using [TransactionalTopologyBuilder](javadocs/backtype/storm/transactional/TransactionalTopologyBuilder.html). Here's the transactional topology definition for a topology that computes the global count of tuples from the input stream. This code comes from [TransactionalGlobalCount](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/TransactionalGlobalCount.java) in storm-starter.
+You build transactional topologies by using [TransactionalTopologyBuilder](javadocs/backtype/storm/transactional/TransactionalTopologyBuilder.html). Here's the transactional topology definition for a topology that computes the global count of tuples from the input stream. This code comes from [TransactionalGlobalCount]({{page.git-blob-base}}/examples/storm-starter/src/jvm/storm/starter/TransactionalGlobalCount.java) in storm-starter.
 
 ```java
 MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
@@ -128,7 +130,7 @@ public static class BatchCount extends BaseBatchBolt {
 }
 ```
 
-A new instance of this object is created for every batch that's being processed. The actual bolt this runs within is called [BatchBoltExecutor](https://github.com/apache/incubator-storm/blob/0.7.0/src/jvm/backtype/storm/coordination/BatchBoltExecutor.java) and manages the creation and cleanup for these objects.
+A new instance of this object is created for every batch that's being processed. The actual bolt this runs within is called [BatchBoltExecutor](https://github.com/apache/storm/blob/0.7.0/src/jvm/backtype/storm/coordination/BatchBoltExecutor.java) and manages the creation and cleanup for these objects.
 
 The `prepare` method parameterizes this batch bolt with the Storm config, the topology context, an output collector, and the id for this batch of tuples. In the case of transactional topologies, the id will be a [TransactionAttempt](javadocs/backtype/storm/transactional/TransactionAttempt.html) object. The batch bolt abstraction can be used in Distributed RPC as well which uses a different type of id for the batches. `BatchBolt` can actually be parameterized with the type of the id, so if you only intend to use the batch bolt for transactional topologies, you can extend `BaseTransactionalBolt` which has this definition:
 
@@ -199,7 +201,7 @@ First, notice that this bolt implements the `ICommitter` interface. This tells S
 
 The code for `finishBatch` in `UpdateGlobalCount` gets the current value from the database and compares its transaction id to the transaction id for this batch. If they are the same, it does nothing. Otherwise, it increments the value in the database by the partial count for this batch.
 
-A more involved transactional topology example that updates multiple databases idempotently can be found in storm-starter in the [TransactionalWords](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/TransactionalWords.java) class.
+A more involved transactional topology example that updates multiple databases idempotently can be found in storm-starter in the [TransactionalWords]({{page.git-blob-base}}/examples/storm-starter/src/jvm/storm/starter/TransactionalWords.java) class.
 
 ## Transactional Topology API
 
@@ -253,7 +255,7 @@ The details of implementing a `TransactionalSpout` are in [the Javadoc](javadocs
 
 #### Partitioned Transactional Spout
 
-A common kind of transactional spout is one that reads the batches from a set of partitions across many queue brokers. For example, this is how [TransactionalKafkaSpout](https://github.com/nathanmarz/storm-contrib/blob/master/storm-kafka/src/jvm/storm/kafka/TransactionalKafkaSpout.java) works. An `IPartitionedTransactionalSpout` automates the bookkeeping work of managing the state for each partition to ensure idempotent replayability. See [the Javadoc](javadocs/backtype/storm/transactional/partitioned/IPartitionedTransactionalSpout.html) for more details.
+A common kind of transactional spout is one that reads the batches from a set of partitions across many queue brokers. For example, this is how [TransactionalKafkaSpout]({{page.git-tree-base}}/external/storm-kafka/src/jvm/storm/kafka/TransactionalKafkaSpout.java) works. An `IPartitionedTransactionalSpout` automates the bookkeeping work of managing the state for each partition to ensure idempotent replayability. See [the Javadoc](javadocs/backtype/storm/transactional/partitioned/IPartitionedTransactionalSpout.html) for more details.
 
 ### Configuration
 
@@ -323,7 +325,7 @@ In this scenario, tuples 41-50 are skipped. By failing all subsequent transactio
 
 By failing all subsequent transactions on failure, no tuples are skipped. This also shows that a requirement of transactional spouts is that they always emit where the last transaction left off.
 
-A non-idempotent transactional spout is more concisely referred to as an "OpaqueTransactionalSpout" (opaque is the opposite of idempotent). [IOpaquePartitionedTransactionalSpout](javadocs/backtype/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.html) is an interface for implementing opaque partitioned transactional spouts, of which [OpaqueTransactionalKafkaSpout](https://github.com/nathanmarz/storm-contrib/blob/kafka0.7/storm-kafka/src/jvm/storm/kafka/OpaqueTransactionalKafkaSpout.java) is an example. `OpaqueTransactionalKafkaSpout` can withstand losing individual Kafka nodes without sacrificing accuracy as long as you use the update strategy as explained in this section.
+A non-idempotent transactional spout is more concisely referred to as an "OpaqueTransactionalSpout" (opaque is the opposite of idempotent). [IOpaquePartitionedTransactionalSpout](javadocs/backtype/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.html) is an interface for implementing opaque partitioned transactional spouts, of which [OpaqueTransactionalKafkaSpout]({{page.git-tree-base}}/external/storm-kafka/src/jvm/storm/kafka/OpaqueTransactionalKafkaSpout.java) is an example. `OpaqueTransactionalKafkaSpout` can withstand losing individual Kafka nodes without sacrificing accuracy as long as you use the update strategy as explained in this section.
 
 ## Implementation
 


[11/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Defining-a-non-jvm-language-dsl-for-storm.md
----------------------------------------------------------------------
diff --git a/docs/Defining-a-non-jvm-language-dsl-for-storm.md b/docs/Defining-a-non-jvm-language-dsl-for-storm.md
new file mode 100644
index 0000000..f52f4ab
--- /dev/null
+++ b/docs/Defining-a-non-jvm-language-dsl-for-storm.md
@@ -0,0 +1,36 @@
+---
+layout: documentation
+---
+The right place to start to learn how to make a non-JVM DSL for Storm is [storm-core/src/storm.thrift](https://github.com/apache/incubator-storm/blob/master/storm-core/src/storm.thrift). Since Storm topologies are just Thrift structures, and Nimbus is a Thrift daemon, you can create and submit topologies in any language.
+
+When you create the Thrift structs for spouts and bolts, the code for the spout or bolt is specified in the ComponentObject struct:
+
+```
+union ComponentObject {
+  1: binary serialized_java;
+  2: ShellComponent shell;
+  3: JavaObject java_object;
+}
+```
+
+For a Python DSL, you would want to make use of "2" and "3". ShellComponent lets you specify a script to run that component (e.g., your python code). And JavaObject lets you specify native java spouts and bolts for the component (and Storm will use reflection to create that spout or bolt).
+
+There's a "storm shell" command that will help with submitting a topology. Its usage is like this:
+
+```
+storm shell resources/ python topology.py arg1 arg2
+```
+
+storm shell will then package resources/ into a jar, upload the jar to Nimbus, and call your topology.py script like this:
+
+```
+python topology.py arg1 arg2 {nimbus-host} {nimbus-port} {uploaded-jar-location}
+```
+
+Then you can connect to Nimbus using the Thrift API and submit the topology, passing {uploaded-jar-location} into the submitTopology method. For reference, here's the submitTopology definition:
+
+```java
+void submitTopology(1: string name, 2: string uploadedJarLocation, 3: string jsonConf, 4: StormTopology topology) throws (1: AlreadyAliveException e, 2: InvalidTopologyException ite);
+```
+
+Finally, one of the key things to do in a non-JVM DSL is make it easy to define the entire topology in one file (the bolts, spouts, and the definition of the topology).

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Distributed-RPC.md
----------------------------------------------------------------------
diff --git a/docs/Distributed-RPC.md b/docs/Distributed-RPC.md
new file mode 100644
index 0000000..fc75ee4
--- /dev/null
+++ b/docs/Distributed-RPC.md
@@ -0,0 +1,197 @@
+---
+layout: documentation
+---
+The idea behind distributed RPC (DRPC) is to parallelize the computation of really intense functions on the fly using Storm. The Storm topology takes in as input a stream of function arguments, and it emits an output stream of the results for each of those function calls. 
+
+DRPC is not so much a feature of Storm as it is a pattern expressed from Storm's primitives of streams, spouts, bolts, and topologies. DRPC could have been packaged as a separate library from Storm, but it's so useful that it's bundled with Storm.
+
+### High level overview
+
+Distributed RPC is coordinated by a "DRPC server" (Storm comes packaged with an implementation of this). The DRPC server coordinates receiving an RPC request, sending the request to the Storm topology, receiving the results from the Storm topology, and sending the results back to the waiting client. From a client's perspective, a distributed RPC call looks just like a regular RPC call. For example, here's how a client would compute the results for the "reach" function with the argument "http://twitter.com":
+
+```java
+DRPCClient client = new DRPCClient("drpc-host", 3772);
+String result = client.execute("reach", "http://twitter.com");
+```
+
+The distributed RPC workflow looks like this:
+
+![Tasks in a topology](images/drpc-workflow.png)
+
+A client sends the DRPC server the name of the function to execute and the arguments to that function. The topology implementing that function uses a `DRPCSpout` to receive a function invocation stream from the DRPC server. Each function invocation is tagged with a unique id by the DRPC server. The topology then computes the result and at the end of the topology a bolt called `ReturnResults` connects to the DRPC server and gives it the result for the function invocation id. The DRPC server then uses the id to match up that result with which client is waiting, unblocks the waiting client, and sends it the result.
+
+### LinearDRPCTopologyBuilder
+
+Storm comes with a topology builder called [LinearDRPCTopologyBuilder](javadocs/backtype/storm/drpc/LinearDRPCTopologyBuilder.html) that automates almost all the steps involved for doing DRPC. These include:
+
+1. Setting up the spout
+2. Returning the results to the DRPC server
+3. Providing functionality to bolts for doing finite aggregations over groups of tuples
+
+Let's look at a simple example. Here's the implementation of a DRPC topology that returns its input argument with a "!" appended:
+
+```java
+public static class ExclaimBolt extends BaseBasicBolt {
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+        String input = tuple.getString(1);
+        collector.emit(new Values(tuple.getValue(0), input + "!"));
+    }
+
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("id", "result"));
+    }
+}
+
+public static void main(String[] args) throws Exception {
+    LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
+    builder.addBolt(new ExclaimBolt(), 3);
+    // ...
+}
+```
+
+As you can see, there's very little to it. When creating the `LinearDRPCTopologyBuilder`, you tell it the name of the DRPC function for the topology. A single DRPC server can coordinate many functions, and the function name distinguishes the functions from one another. The first bolt you declare will take in as input 2-tuples, where the first field is the request id and the second field is the arguments for that request. `LinearDRPCTopologyBuilder` expects the last bolt to emit an output stream containing 2-tuples of the form [id, result]. Finally, all intermediate tuples must contain the request id as the first field.
+
+In this example, `ExclaimBolt` simply appends a "!" to the second field of the tuple. `LinearDRPCTopologyBuilder` handles the rest of the coordination of connecting to the DRPC server and sending results back.
+
+### Local mode DRPC
+
+DRPC can be run in local mode. Here's how to run the above example in local mode:
+
+```java
+LocalDRPC drpc = new LocalDRPC();
+LocalCluster cluster = new LocalCluster();
+
+cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
+
+System.out.println("Results for 'hello':" + drpc.execute("exclamation", "hello"));
+
+cluster.shutdown();
+drpc.shutdown();
+```
+
+First you create a `LocalDRPC` object. This object simulates a DRPC server in process, just like how `LocalCluster` simulates a Storm cluster in process. Then you create the `LocalCluster` to run the topology in local mode. `LinearDRPCTopologyBuilder` has separate methods for creating local topologies and remote topologies. In local mode the `LocalDRPC` object does not bind to any ports so the topology needs to know about the object to communicate with it. This is why `createLocalTopology` takes in the `LocalDRPC` object as input.
+
+After launching the topology, you can do DRPC invocations using the `execute` method on `LocalDRPC`.
+
+### Remote mode DRPC
+
+Using DRPC on an actual cluster is also straightforward. There's three steps:
+
+1. Launch DRPC server(s)
+2. Configure the locations of the DRPC servers
+3. Submit DRPC topologies to Storm cluster
+
+Launching a DRPC server can be done with the `storm` script and is just like launching Nimbus or the UI:
+
+```
+bin/storm drpc
+```
+
+Next, you need to configure your Storm cluster to know the locations of the DRPC server(s). This is how `DRPCSpout` knows from where to read function invocations. This can be done through the `storm.yaml` file or the topology configurations. Configuring this through the `storm.yaml` looks something like this:
+
+```yaml
+drpc.servers:
+  - "drpc1.foo.com"
+  - "drpc2.foo.com"
+```
+
+Finally, you launch DRPC topologies using `StormSubmitter` just like you launch any other topology. To run the above example in remote mode, you do something like this:
+
+```java
+StormSubmitter.submitTopology("exclamation-drpc", conf, builder.createRemoteTopology());
+```
+
+`createRemoteTopology` is used to create topologies suitable for Storm clusters.
+
+### A more complex example
+
+The exclamation DRPC example was a toy example for illustrating the concepts of DRPC. Let's look at a more complex example which really needs the parallelism a Storm cluster provides for computing the DRPC function. The example we'll look at is computing the reach of a URL on Twitter.
+
+The reach of a URL is the number of unique people exposed to a URL on Twitter. To compute reach, you need to:
+
+1. Get all the people who tweeted the URL
+2. Get all the followers of all those people
+3. Unique the set of followers
+4. Count the unique set of followers
+
+A single reach computation can involve thousands of database calls and tens of millions of follower records during the computation. It's a really, really intense computation. As you're about to see, implementing this function on top of Storm is dead simple. On a single machine, reach can take minutes to compute; on a Storm cluster, you can compute reach for even the hardest URLs in a couple seconds.
+
+A sample reach topology is defined in storm-starter [here](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/ReachTopology.java). Here's how you define the reach topology:
+
+```java
+LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("reach");
+builder.addBolt(new GetTweeters(), 3);
+builder.addBolt(new GetFollowers(), 12)
+        .shuffleGrouping();
+builder.addBolt(new PartialUniquer(), 6)
+        .fieldsGrouping(new Fields("id", "follower"));
+builder.addBolt(new CountAggregator(), 2)
+        .fieldsGrouping(new Fields("id"));
+```
+
+The topology executes as four steps:
+
+1. `GetTweeters` gets the users who tweeted the URL. It transforms an input stream of `[id, url]` into an output stream of `[id, tweeter]`. Each `url` tuple will map to many `tweeter` tuples.
+2. `GetFollowers` gets the followers for the tweeters. It transforms an input stream of `[id, tweeter]` into an output stream of `[id, follower]`. Across all the tasks, there may of course be duplication of follower tuples when someone follows multiple people who tweeted the same URL.
+3. `PartialUniquer` groups the followers stream by the follower id. This has the effect of the same follower going to the same task. So each task of `PartialUniquer` will receive mutually independent sets of followers. Once `PartialUniquer` receives all the follower tuples directed at it for the request id, it emits the unique count of its subset of followers.
+4. Finally, `CountAggregator` receives the partial counts from each of the `PartialUniquer` tasks and sums them up to complete the reach computation.
+
+Let's take a look at the `PartialUniquer` bolt:
+
+```java
+public class PartialUniquer extends BaseBatchBolt {
+    BatchOutputCollector _collector;
+    Object _id;
+    Set<String> _followers = new HashSet<String>();
+    
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
+        _collector = collector;
+        _id = id;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+        _followers.add(tuple.getString(1));
+    }
+    
+    @Override
+    public void finishBatch() {
+        _collector.emit(new Values(_id, _followers.size()));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("id", "partial-count"));
+    }
+}
+```
+
+`PartialUniquer` implements `IBatchBolt` by extending `BaseBatchBolt`. A batch bolt provides a first class API to processing a batch of tuples as a concrete unit. A new instance of the batch bolt is created for each request id, and Storm takes care of cleaning up the instances when appropriate. 
+
+When `PartialUniquer` receives a follower tuple in the `execute` method, it adds it to the set for the request id in an internal `HashSet`. 
+
+Batch bolts provide the `finishBatch` method which is called after all the tuples for this batch targeted at this task have been processed. In the callback, `PartialUniquer` emits a single tuple containing the unique count for its subset of follower ids.
+
+Under the hood, `CoordinatedBolt` is used to detect when a given bolt has received all of the tuples for any given request id. `CoordinatedBolt` makes use of direct streams to manage this coordination.
+
+The rest of the topology should be self-explanatory. As you can see, every single step of the reach computation is done in parallel, and defining the DRPC topology was extremely simple.
+
+### Non-linear DRPC topologies
+
+`LinearDRPCTopologyBuilder` only handles "linear" DRPC topologies, where the computation is expressed as a sequence of steps (like reach). It's not hard to imagine functions that would require a more complicated topology with branching and merging of the bolts. For now, to do this you'll need to drop down into using `CoordinatedBolt` directly. Be sure to talk about your use case for non-linear DRPC topologies on the mailing list to inform the construction of more general abstractions for DRPC topologies.
+
+### How LinearDRPCTopologyBuilder works
+
+* DRPCSpout emits [args, return-info]. return-info is the host and port of the DRPC server as well as the id generated by the DRPC server
+* constructs a topology comprising of:
+  * DRPCSpout
+  * PrepareRequest (generates a request id and creates a stream for the return info and a stream for the args)
+  * CoordinatedBolt wrappers and direct groupings
+  * JoinResult (joins the result with the return info)
+  * ReturnResult (connects to the DRPC server and returns the result)
+* LinearDRPCTopologyBuilder is a good example of a higher level abstraction built on top of Storm's primitives
+
+### Advanced
+* KeyedFairBolt for weaving the processing of multiple requests at the same time
+* How to use `CoordinatedBolt` directly

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Documentation.md
----------------------------------------------------------------------
diff --git a/docs/Documentation.md b/docs/Documentation.md
new file mode 100644
index 0000000..8da874c
--- /dev/null
+++ b/docs/Documentation.md
@@ -0,0 +1,50 @@
+---
+layout: documentation
+---
+### Basics of Storm
+
+* [Javadoc](javadocs/index.html)
+* [Concepts](Concepts.html)
+* [Configuration](Configuration.html)
+* [Guaranteeing message processing](Guaranteeing-message-processing.html)
+* [Fault-tolerance](Fault-tolerance.html)
+* [Command line client](Command-line-client.html)
+* [Understanding the parallelism of a Storm topology](Understanding-the-parallelism-of-a-Storm-topology.html)
+* [FAQ](FAQ.html)
+
+### Trident
+
+Trident is an alternative interface to Storm. It provides exactly-once processing, "transactional" datastore persistence, and a set of common stream analytics operations.
+
+* [Trident Tutorial](Trident-tutorial.html)     -- basic concepts and walkthrough
+* [Trident API Overview](Trident-API-Overview.html) -- operations for transforming and orchestrating data
+* [Trident State](Trident-state.html)        -- exactly-once processing and fast, persistent aggregation
+* [Trident spouts](Trident-spouts.html)       -- transactional and non-transactional data intake
+
+### Setup and deploying
+
+* [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html)
+* [Local mode](Local-mode.html)
+* [Troubleshooting](Troubleshooting.html)
+* [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html)
+* [Building Storm](Maven.html) with Maven
+
+### Intermediate
+
+* [Serialization](Serialization.html)
+* [Common patterns](Common-patterns.html)
+* [Clojure DSL](Clojure-DSL.html)
+* [Using non-JVM languages with Storm](Using-non-JVM-languages-with-Storm.html)
+* [Distributed RPC](Distributed-RPC.html)
+* [Transactional topologies](Transactional-topologies.html)
+* [Kestrel and Storm](Kestrel-and-Storm.html)
+* [Direct groupings](Direct-groupings.html)
+* [Hooks](Hooks.html)
+* [Metrics](Metrics.html)
+* [Lifecycle of a trident tuple]()
+
+### Advanced
+
+* [Defining a non-JVM language DSL for Storm](Defining-a-non-jvm-language-dsl-for-storm.html)
+* [Multilang protocol](Multilang-protocol.html) (how to provide support for another language)
+* [Implementation docs](Implementation-docs.html)

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/FAQ.md
----------------------------------------------------------------------
diff --git a/docs/FAQ.md b/docs/FAQ.md
new file mode 100644
index 0000000..8ff7a6f
--- /dev/null
+++ b/docs/FAQ.md
@@ -0,0 +1,121 @@
+---
+layout: documentation
+---
+
+## Best Practices
+
+### What rules of thumb can you give me for configuring Storm+Trident?
+
+* number of workers a multiple of number of machines; parallelism a multiple of number of workers; number of kafka partitions a multiple of number of spout parallelism
+* Use one worker per topology per machine
+* Start with fewer, larger aggregators, one per machine with workers on it
+* Use the isolation scheduler
+* Use one acker per worker -- 0.9 makes that the default, but earlier versions do not.
+* enable GC logging; you should see very few major GCs if things are in reasonable shape.
+* set the trident batch millis to about 50% of your typical end-to-end latency.
+* Start with a max spout pending that is for sure too small -- one for trident, or the number of executors for storm -- and increase it until you stop seeing changes in the flow. You'll probably end up with something near `2*(throughput in recs/sec)*(end-to-end latency)` (2x the Little's law capacity).
+
+### What are some of the best ways to get a worker to mysteriously and bafflingly die?
+
+* Do you have write access to the log directory
+* Are you blowing out your heap?
+* Are all the right libraries installed on all of the workers?
+* Is the zookeeper hostname still set to localhost?
+* Did you supply a correct, unique hostname -- one that resolves back to the machine -- to each worker, and put it in the storm conf file?
+* Have you opened firewall/securitygroup permissions _bidirectionally_ among a) all the workers, b) the storm master, c) zookeeper? Also, from the workers to any kafka/kestrel/database/etc that your topology accesses? Use netcat to poke the appropriate ports and be sure. 
+
+### Halp! I cannot see:
+
+* **my logs** Logs by default go to $STORM_HOME/logs. Check that you have write permissions to that directory. They are configured in the logback/cluster.xml (0.9) and log4j/*.properties in earlier versions.
+* **final JVM settings** Add the `-XX+PrintFlagsFinal` commandline option in the childopts (see the conf file)
+* **final Java system properties** Add `Properties props = System.getProperties(); props.list(System.out);` near where you build your topology.
+
+### How many Workers should I use?
+
+The total number of workers is set by the supervisors -- there's some number of JVM slots each supervisor will superintend. The thing you set on the topology is how many worker slots it will try to claim.
+
+There's no great reason to use more than one worker per topology per machine.
+
+With one topology running on three 8-core nodes, and parallelism hint 24, each bolt gets 8 executors per machine, i.e. one for each core. There are three big benefits to running three workers (with 8 assigned executors each) compare to running say 24 workers (one assigned executor each).
+
+First, data that is repartitioned (shuffles or group-bys) to executors in the same worker will not have to hit the transfer buffer. Instead, tuples are deposited directly from send to receive buffer. That's a big win. By contrast, if the destination executor were on the same machine in a different worker, it would have to go send -> worker transfer -> local socket -> worker recv -> exec recv buffer. It doesn't hit the network card, but it's not as big a win as when executors are in the same worker.
+
+Second, you're typically better off with three aggregators having very large backing cache than having twenty-four aggregators having small backing caches. This reduces the effect of skew, and improves LRU efficiency.
+
+Lastly, fewer workers reduces control flow chatter.
+
+## Topology
+
+### Can a Trident topology have Multiple Streams?
+
+> Can a Trident Topology work like a workflow with conditional paths (if-else)? e.g. A Spout (S1) connects to a bolt (B0) which based on certain values in the incoming tuple routes them to either bolt (B1) or bolt (B2) but not both.
+
+A Trident "each" operator returns a Stream object, which you can store in a variable. You can then run multiple eaches on the same Stream to split it, e.g.: 
+
+        Stream s = topology.each(...).groupBy(...).aggregate(...) 
+        Stream branch1 = s.each(..., FilterA) 
+        Stream branch2 = s.each(..., FilterB) 
+
+You can join streams with join, merge or multiReduce.
+
+At time of writing, you can't emit to multiple output streams from Trident -- see [STORM-68](https://issues.apache.org/jira/browse/STORM-68)
+
+## Spouts
+
+### What is a coordinator, and why are there several?
+
+A trident-spout is actually run within a storm _bolt_. The storm-spout of a trident topology is the MasterBatchCoordinator -- it coordinates trident batches and is the same no matter what spouts you use. A batch is born when the MBC dispenses a seed tuple to each of the spout-coordinators. The spout-coordinator bolts know how your particular spouts should cooperate -- so in the kafka case, it's what helps figure out what partition and offset range each spout should pull from.
+
+### What can I store into the spout's metadata record?
+
+You should only store static data, and as little of it as possible, into the metadata record (note: maybe you _can_ store more interesting things; you shouldn't, though)
+
+### How often is the 'emitPartitionBatchNew' function called?
+
+Since the MBC is the actual spout, all the tuples in a batch are just members of its tupletree. That means storm's "max spout pending" config effectively defines the number of concurrent batches trident runs. The MBC emits a new batch if it has fewer than max-spending tuples pending and if at least one [trident batch interval](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml#L115)'s worth of seconds has passed since the last batch.
+
+### If nothing was emitted does Trident slow down the calls?
+
+Yes, there's a pluggable "spout wait strategy"; the default is to sleep for a [configurable amount of time](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml#L110)
+
+### OK, then what is the trident batch interval for?
+
+You know how computers of the 486 era had a [turbo button](http://en.wikipedia.org/wiki/Turbo_button) on them? It's like that. 
+
+Actually, it has two practical uses. One is to throttle spouts that poll a remote source without throttling processing. For example, we have a spout that looks in a given S3 bucket for a new batch-uploaded file to read, linebreak and emit. We don't want it hitting S3 more than every few seconds: files don't show up more than once every few minutes, and a batch takes a few seconds to process.
+
+The other is to limit overpressure on the internal queues during startup or under a heavy burst load -- if the spouts spring to life and suddenly jam ten batches' worth of records into the system, you could have a mass of less-urgent tuples from batch 7 clog up the transfer buffer and prevent the $commit tuple from batch 3 to get through (or even just the regular old tuples from batch 3). What we do is set the trident batch interval to about half the typical end-to-end processing latency -- if it takes 600ms to process a batch, it's OK to only kick off a batch every 300ms.
+
+Note that this is a cap, not an additional delay -- with a period of 300ms, if your batch takes 258ms Trident will only delay an additional 42ms.
+
+### How do you set the batch size?
+
+Trident doesn't place its own limits on the batch count. In the case of the Kafka spout, the max fetch bytes size divided by the average record size defines an effective records per subbatch partition.
+
+### How do I resize a batch?
+
+The trident batch is a somewhat overloaded facility. Together with the number of partitions, the batch size is constrained by or serves to define
+
+1. the unit of transactional safety (tuples at risk vs time)
+2. per partition, an effective windowing mechanism for windowed stream analytics
+3. per partition, the number of simultaneous queries that will be made by a partitionQuery, partitionPersist, etc;
+4. per partition, the number of records convenient for the spout to dispatch at the same time;
+
+You can't change the overall batch size once generated, but you can change the number of partitions -- do a shuffle and then change the parallelism hint
+
+## Time Series
+
+### How do I aggregate events by time?
+
+If have records with an immutable timestamp, and you would like to count, average or otherwise aggregate them into discrete time buckets, Trident is an excellent and scalable solution.
+
+Write an `Each` function that turns the timestamp into a time bucket: if the bucket size was "by hour", then the timestamp `2013-08-08 12:34:56` would be mapped to the `2013-08-08 12:00:00` time bucket, and so would everything else in the twelve o'clock hour. Then group on that timebucket and use a grouped persistentAggregate. The persistentAggregate uses a local cacheMap backed by a data store. Groups with many records require very few reads from the data store, and use efficient bulk reads and writes; as long as your data feed is relatively prompt Trident will make very efficient use of memory and network. Even if a server drops off line for a day, then delivers that full day's worth of data in a rush, the old results will be calmly retrieved and updated -- and without interfering with calculating the current results.
+
+### How can I know that all records for a time bucket have been received?
+
+You cannot know that all events are collected -- this is an epistemological challenge, not a distributed systems challenge. You can:
+
+* Set a time limit using domain knowledge
+* Introduce a _punctuation_: a record known to come after all records in the given time bucket. Trident uses this scheme to know when a batch is complete. If you for instance receive records from a set of sensors, each in order for that sensor, then once all sensors have sent you a 3:02:xx or later timestamp lets you know you can commit. 
+* When possible, make your process incremental: each value that comes in makes the answer more an more true. A Trident ReducerAggregator is an operator that takes a prior result and a set of new records and returns a new result. This lets the result be cached and serialized to a datastore; if a server drops off line for a day and then comes back with a full day's worth of data in a rush, the old results will be calmly retrieved and updated.
+* Lambda architecture: Record all events into an archival store (S3, HBase, HDFS) on receipt. in the fast layer, once the time window is clear, process the bucket to get an actionable answer, and ignore everything older than the time window. Periodically run a global aggregation to calculate a "correct" answer.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Fault-tolerance.md
----------------------------------------------------------------------
diff --git a/docs/Fault-tolerance.md b/docs/Fault-tolerance.md
new file mode 100644
index 0000000..9a7a349
--- /dev/null
+++ b/docs/Fault-tolerance.md
@@ -0,0 +1,28 @@
+---
+layout: documentation
+---
+This page explains the design details of Storm that make it a fault-tolerant system.
+
+## What happens when a worker dies?
+
+When a worker dies, the supervisor will restart it. If it continuously fails on startup and is unable to heartbeat to Nimbus, Nimbus will reassign the worker to another machine.
+
+## What happens when a node dies?
+
+The tasks assigned to that machine will time-out and Nimbus will reassign those tasks to other machines.
+
+## What happens when Nimbus or Supervisor daemons die?
+
+The Nimbus and Supervisor daemons are designed to be fail-fast (process self-destructs whenever any unexpected situation is encountered) and stateless (all state is kept in Zookeeper or on disk). As described in [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html), the Nimbus and Supervisor daemons must be run under supervision using a tool like daemontools or monit. So if the Nimbus or Supervisor daemons die, they restart like nothing happened.
+
+Most notably, no worker processes are affected by the death of Nimbus or the Supervisors. This is in contrast to Hadoop, where if the JobTracker dies, all the running jobs are lost. 
+
+## Is Nimbus a single point of failure?
+
+If you lose the Nimbus node, the workers will still continue to function. Additionally, supervisors will continue to restart workers if they die. However, without Nimbus, workers won't be reassigned to other machines when necessary (like if you lose a worker machine). 
+
+So the answer is that Nimbus is "sort of" a SPOF. In practice, it's not a big deal since nothing catastrophic happens when the Nimbus daemon dies. There are plans to make Nimbus highly available in the future.
+
+## How does Storm guarantee data processing?
+
+Storm provides mechanisms to guarantee data processing even if nodes die or messages are lost. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for the details.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Guaranteeing-message-processing.md
----------------------------------------------------------------------
diff --git a/docs/Guaranteeing-message-processing.md b/docs/Guaranteeing-message-processing.md
new file mode 100644
index 0000000..91d4384
--- /dev/null
+++ b/docs/Guaranteeing-message-processing.md
@@ -0,0 +1,179 @@
+---
+layout: documentation
+---
+Storm guarantees that each message coming off a spout will be fully processed. This page describes how Storm accomplishes this guarantee and what you have to do as a user to benefit from Storm's reliability capabilities.
+
+### What does it mean for a message to be "fully processed"?
+
+A tuple coming off a spout can trigger thousands of tuples to be created based on it. Consider, for example, the streaming word count topology:
+
+```java
+TopologyBuilder builder = new TopologyBuilder();
+builder.setSpout("sentences", new KestrelSpout("kestrel.backtype.com",
+                                               22133,
+                                               "sentence_queue",
+                                               new StringScheme()));
+builder.setBolt("split", new SplitSentence(), 10)
+        .shuffleGrouping("sentences");
+builder.setBolt("count", new WordCount(), 20)
+        .fieldsGrouping("split", new Fields("word"));
+```
+
+This topology reads sentences off of a Kestrel queue, splits the sentences into its constituent words, and then emits for each word the number of times it has seen that word before. A tuple coming off the spout triggers many tuples being created based on it: a tuple for each word in the sentence and a tuple for the updated count for each word. The tree of messages looks something like this:
+
+![Tuple tree](images/tuple_tree.png)
+
+Storm considers a tuple coming off a spout "fully processed" when the tuple tree has been exhausted and every message in the tree has been processed. A tuple is considered failed when its tree of messages fails to be fully processed within a specified timeout. This timeout can be configured on a topology-specific basis using the [Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS](javadocs/backtype/storm/Config.html#TOPOLOGY_MESSAGE_TIMEOUT_SECS) configuration and defaults to 30 seconds.
+
+### What happens if a message is fully processed or fails to be fully processed?
+
+To understand this question, let's take a look at the lifecycle of a tuple coming off of a spout. For reference, here is the interface that spouts implement (see the [Javadoc](javadocs/backtype/storm/spout/ISpout.html) for more information):
+
+```java
+public interface ISpout extends Serializable {
+    void open(Map conf, TopologyContext context, SpoutOutputCollector collector);
+    void close();
+    void nextTuple();
+    void ack(Object msgId);
+    void fail(Object msgId);
+}
+```
+
+First, Storm requests a tuple from the `Spout` by calling the `nextTuple` method on the `Spout`. The `Spout` uses the `SpoutOutputCollector` provided in the `open` method to emit a tuple to one of its output streams. When emitting a tuple, the `Spout` provides a "message id" that will be used to identify the tuple later. For example, the `KestrelSpout` reads a message off of the kestrel queue and emits as the "message id" the id provided by Kestrel for the message. Emitting a message to the `SpoutOutputCollector` looks like this:
+
+```java
+_collector.emit(new Values("field1", "field2", 3) , msgId);
+```
+
+Next, the tuple gets sent to consuming bolts and Storm takes care of tracking the tree of messages that is created. If Storm detects that a tuple is fully processed, Storm will call the `ack` method on the originating `Spout` task with the message id that the `Spout` provided to Storm. Likewise, if the tuple times-out Storm will call the `fail` method on the `Spout`. Note that a tuple will be acked or failed by the exact same `Spout` task that created it. So if a `Spout` is executing as many tasks across the cluster, a tuple won't be acked or failed by a different task than the one that created it.
+
+Let's use `KestrelSpout` again to see what a `Spout` needs to do to guarantee message processing. When `KestrelSpout` takes a message off the Kestrel queue, it "opens" the message. This means the message is not actually taken off the queue yet, but instead placed in a "pending" state waiting for acknowledgement that the message is completed. While in the pending state, a message will not be sent to other consumers of the queue. Additionally, if a client disconnects all pending messages for that client are put back on the queue. When a message is opened, Kestrel provides the client with the data for the message as well as a unique id for the message. The `KestrelSpout` uses that exact id as the "message id" for the tuple when emitting the tuple to the `SpoutOutputCollector`. Sometime later on, when `ack` or `fail` are called on the `KestrelSpout`, the `KestrelSpout` sends an ack or fail message to Kestrel with the message id to take the message off the queue or have it put back on.
+
+### What is Storm's reliability API?
+
+There's two things you have to do as a user to benefit from Storm's reliability capabilities. First, you need to tell Storm whenever you're creating a new link in the tree of tuples. Second, you need to tell Storm when you have finished processing an individual tuple. By doing both these things, Storm can detect when the tree of tuples is fully processed and can ack or fail the spout tuple appropriately. Storm's API provides a concise way of doing both of these tasks. 
+
+Specifying a link in the tuple tree is called _anchoring_. Anchoring is done at the same time you emit a new tuple. Let's use the following bolt as an example. This bolt splits a tuple containing a sentence into a tuple for each word:
+
+```java
+public class SplitSentence extends BaseRichBolt {
+        OutputCollector _collector;
+        
+        public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+            _collector = collector;
+        }
+
+        public void execute(Tuple tuple) {
+            String sentence = tuple.getString(0);
+            for(String word: sentence.split(" ")) {
+                _collector.emit(tuple, new Values(word));
+            }
+            _collector.ack(tuple);
+        }
+
+        public void declareOutputFields(OutputFieldsDeclarer declarer) {
+            declarer.declare(new Fields("word"));
+        }        
+    }
+```
+
+Each word tuple is _anchored_ by specifying the input tuple as the first argument to `emit`. Since the word tuple is anchored, the spout tuple at the root of the tree will be replayed later on if the word tuple failed to be processed downstream. In contrast, let's look at what happens if the word tuple is emitted like this:
+
+```java
+_collector.emit(new Values(word));
+```
+
+Emitting the word tuple this way causes it to be _unanchored_. If the tuple fails be processed downstream, the root tuple will not be replayed. Depending on the fault-tolerance guarantees you need in your topology, sometimes it's appropriate to emit an unanchored tuple.
+
+An output tuple can be anchored to more than one input tuple. This is useful when doing streaming joins or aggregations. A multi-anchored tuple failing to be processed will cause multiple tuples to be replayed from the spouts. Multi-anchoring is done by specifying a list of tuples rather than just a single tuple. For example:
+
+```java
+List<Tuple> anchors = new ArrayList<Tuple>();
+anchors.add(tuple1);
+anchors.add(tuple2);
+_collector.emit(anchors, new Values(1, 2, 3));
+```
+
+Multi-anchoring adds the output tuple into multiple tuple trees. Note that it's also possible for multi-anchoring to break the tree structure and create tuple DAGs, like so:
+
+![Tuple DAG](images/tuple-dag.png)
+
+Storm's implementation works for DAGs as well as trees (pre-release it only worked for trees, and the name "tuple tree" stuck).
+
+Anchoring is how you specify the tuple tree -- the next and final piece to Storm's reliability API is specifying when you've finished processing an individual tuple in the tuple tree. This is done by using the `ack` and `fail` methods on the `OutputCollector`. If you look back at the `SplitSentence` example, you can see that the input tuple is acked after all the word tuples are emitted.
+
+You can use the `fail` method on the `OutputCollector` to immediately fail the spout tuple at the root of the tuple tree. For example, your application may choose to catch an exception from a database client and explicitly fail the input tuple. By failing the tuple explicitly, the spout tuple can be replayed faster than if you waited for the tuple to time-out.
+
+Every tuple you process must be acked or failed. Storm uses memory to track each tuple, so if you don't ack/fail every tuple, the task will eventually run out of memory. 
+
+A lot of bolts follow a common pattern of reading an input tuple, emitting tuples based on it, and then acking the tuple at the end of the `execute` method. These bolts fall into the categories of filters and simple functions. Storm has an interface called `BasicBolt` that encapsulates this pattern for you. The `SplitSentence` example can be written as a `BasicBolt` like follows:
+
+```java
+public class SplitSentence extends BaseBasicBolt {
+        public void execute(Tuple tuple, BasicOutputCollector collector) {
+            String sentence = tuple.getString(0);
+            for(String word: sentence.split(" ")) {
+                collector.emit(new Values(word));
+            }
+        }
+
+        public void declareOutputFields(OutputFieldsDeclarer declarer) {
+            declarer.declare(new Fields("word"));
+        }        
+    }
+```
+
+This implementation is simpler than the implementation from before and is semantically identical. Tuples emitted to `BasicOutputCollector` are automatically anchored to the input tuple, and the input tuple is acked for you automatically when the execute method completes.
+
+In contrast, bolts that do aggregations or joins may delay acking a tuple until after it has computed a result based on a bunch of tuples. Aggregations and joins will commonly multi-anchor their output tuples as well. These things fall outside the simpler pattern of `IBasicBolt`.
+
+### How do I make my applications work correctly given that tuples can be replayed?
+
+As always in software design, the answer is "it depends." Storm 0.7.0 introduced the "transactional topologies" feature, which enables you to get fully fault-tolerant exactly-once messaging semantics for most computations. Read more about transactional topologies [here](Transactional-topologies.html). 
+
+
+### How does Storm implement reliability in an efficient way?
+
+A Storm topology has a set of special "acker" tasks that track the DAG of tuples for every spout tuple. When an acker sees that a DAG is complete, it sends a message to the spout task that created the spout tuple to ack the message. You can set the number of acker tasks for a topology in the topology configuration using [Config.TOPOLOGY_ACKERS](javadocs/backtype/storm/Config.html#TOPOLOGY_ACKERS). Storm defaults TOPOLOGY_ACKERS to one task -- you will need to increase this number for topologies processing large amounts of messages. 
+
+The best way to understand Storm's reliability implementation is to look at the lifecycle of tuples and tuple DAGs. When a tuple is created in a topology, whether in a spout or a bolt, it is given a random 64 bit id. These ids are used by ackers to track the tuple DAG for every spout tuple.
+
+Every tuple knows the ids of all the spout tuples for which it exists in their tuple trees. When you emit a new tuple in a bolt, the spout tuple ids from the tuple's anchors are copied into the new tuple. When a tuple is acked, it sends a message to the appropriate acker tasks with information about how the tuple tree changed. In particular it tells the acker "I am now completed within the tree for this spout tuple, and here are the new tuples in the tree that were anchored to me". 
+
+For example, if tuples "D" and "E" were created based on tuple "C", here's how the tuple tree changes when "C" is acked: 
+
+![What happens on an ack](images/ack_tree.png)
+
+Since "C" is removed from the tree at the same time that "D" and "E" are added to it, the tree can never be prematurely completed.
+
+There are a few more details to how Storm tracks tuple trees. As mentioned already, you can have an arbitrary number of acker tasks in a topology. This leads to the following question: when a tuple is acked in the topology, how does it know to which acker task to send that information? 
+
+Storm uses mod hashing to map a spout tuple id to an acker task. Since every tuple carries with it the spout tuple ids of all the trees they exist within, they know which acker tasks to communicate with. 
+
+Another detail of Storm is how the acker tasks track which spout tasks are responsible for each spout tuple they're tracking. When a spout task emits a new tuple, it simply sends a message to the appropriate acker telling it that its task id is responsible for that spout tuple. Then when an acker sees a tree has been completed, it knows to which task id to send the completion message.
+
+Acker tasks do not track the tree of tuples explicitly. For large tuple trees with tens of thousands of nodes (or more), tracking all the tuple trees could overwhelm the memory used by the ackers. Instead, the ackers take a different strategy that only requires a fixed amount of space per spout tuple (about 20 bytes). This tracking algorithm is the key to how Storm works and is one of its major breakthroughs.
+
+An acker task stores a map from a spout tuple id to a pair of values. The first value is the task id that created the spout tuple which is used later on to send completion messages. The second value is a 64 bit number called the "ack val". The ack val is a representation of the state of the entire tuple tree, no matter how big or how small.  It is simply the xor of all tuple ids that have been created and/or acked in the tree.
+
+When an acker task sees that an "ack val" has become 0, then it knows that the tuple tree is completed. Since tuple ids are random 64 bit numbers, the chances of an "ack val" accidentally becoming 0 is extremely small. If you work the math, at 10K acks per second, it will take 50,000,000 years until a mistake is made. And even then, it will only cause data loss if that tuple happens to fail in the topology.
+
+Now that you understand the reliability algorithm, let's go over all the failure cases and see how in each case Storm avoids data loss:
+
+- **A tuple isn't acked because the task died**: In this case the spout tuple ids at the root of the trees for the failed tuple will time out and be replayed.
+- **Acker task dies**: In this case all the spout tuples the acker was tracking will time out and be replayed.
+- **Spout task dies**: In this case the source that the spout talks to is responsible for replaying the messages. For example, queues like Kestrel and RabbitMQ will place all pending messages back on the queue when a client disconnects.
+
+As you have seen, Storm's reliability mechanisms are completely distributed, scalable, and fault-tolerant. 
+
+### Tuning reliability
+
+Acker tasks are lightweight, so you don't need very many of them in a topology. You can track their performance through the Storm UI (component id "__acker"). If the throughput doesn't look right, you'll need to add more acker tasks. 
+
+If reliability isn't important to you -- that is, you don't care about losing tuples in failure situations -- then you can improve performance by not tracking the tuple tree for spout tuples. Not tracking a tuple tree halves the number of messages transferred since normally there's an ack message for every tuple in the tuple tree. Additionally, it requires fewer ids to be kept in each downstream tuple, reducing bandwidth usage.
+
+There are three ways to remove reliability. The first is to set Config.TOPOLOGY_ACKERS to 0. In this case, Storm will call the `ack` method on the spout immediately after the spout emits a tuple. The tuple tree won't be tracked.
+
+The second way is to remove reliability on a message by message basis. You can turn off tracking for an individual spout tuple by omitting a message id in the `SpoutOutputCollector.emit` method.
+
+Finally, if you don't care if a particular subset of the tuples downstream in the topology fail to be processed, you can emit them as unanchored tuples. Since they're not anchored to any spout tuples, they won't cause any spout tuples to fail if they aren't acked.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Hooks.md
----------------------------------------------------------------------
diff --git a/docs/Hooks.md b/docs/Hooks.md
new file mode 100644
index 0000000..bbe87a9
--- /dev/null
+++ b/docs/Hooks.md
@@ -0,0 +1,7 @@
+---
+layout: documentation
+---
+Storm provides hooks with which you can insert custom code to run on any number of events within Storm. You create a hook by extending the [BaseTaskHook](javadocs/backtype/storm/hooks/BaseTaskHook.html) class and overriding the appropriate method for the event you want to catch. There are two ways to register your hook:
+
+1. In the open method of your spout or prepare method of your bolt using the [TopologyContext#addTaskHook](javadocs/backtype/storm/task/TopologyContext.html) method.
+2. Through the Storm configuration using the ["topology.auto.task.hooks"](javadocs/backtype/storm/Config.html#TOPOLOGY_AUTO_TASK_HOOKS) config. These hooks are automatically registered in every spout or bolt, and are useful for doing things like integrating with a custom monitoring system.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Implementation-docs.md
----------------------------------------------------------------------
diff --git a/docs/Implementation-docs.md b/docs/Implementation-docs.md
new file mode 100644
index 0000000..f01083a
--- /dev/null
+++ b/docs/Implementation-docs.md
@@ -0,0 +1,18 @@
+---
+layout: documentation
+---
+This section of the wiki is dedicated to explaining how Storm is implemented. You should have a good grasp of how to use Storm before reading these sections. 
+
+- [Structure of the codebase](Structure-of-the-codebase.html)
+- [Lifecycle of a topology](Lifecycle-of-a-topology.html)
+- [Message passing implementation](Message-passing-implementation.html)
+- [Acking framework implementation](Acking-framework-implementation.html)
+- [Metrics](Metrics.html)
+- How transactional topologies work
+   - subtopology for TransactionalSpout
+   - how state is stored in ZK
+   - subtleties around what to do when emitting batches out of order
+- Unit testing
+  - time simulation
+  - complete-topology
+  - tracker clusters

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Installing-native-dependencies.md
----------------------------------------------------------------------
diff --git a/docs/Installing-native-dependencies.md b/docs/Installing-native-dependencies.md
new file mode 100644
index 0000000..1937d4b
--- /dev/null
+++ b/docs/Installing-native-dependencies.md
@@ -0,0 +1,38 @@
+---
+layout: documentation
+---
+The native dependencies are only needed on actual Storm clusters. When running Storm in local mode, Storm uses a pure Java messaging system so that you don't need to install native dependencies on your development machine.
+
+Installing ZeroMQ and JZMQ is usually straightforward. Sometimes, however, people run into issues with autoconf and get strange errors. If you run into any issues, please email the [Storm mailing list](http://groups.google.com/group/storm-user) or come get help in the #storm-user room on freenode. 
+
+Storm has been tested with ZeroMQ 2.1.7, and this is the recommended ZeroMQ release that you install. You can download a ZeroMQ release [here](http://download.zeromq.org/). Installing ZeroMQ should look something like this:
+
+```
+wget http://download.zeromq.org/zeromq-2.1.7.tar.gz
+tar -xzf zeromq-2.1.7.tar.gz
+cd zeromq-2.1.7
+./configure
+make
+sudo make install
+```
+
+JZMQ is the Java bindings for ZeroMQ. JZMQ doesn't have any releases (we're working with them on that), so there is risk of a regression if you always install from the master branch. To prevent a regression from happening, you should instead install from [this fork](http://github.com/nathanmarz/jzmq) which is tested to work with Storm. Installing JZMQ should look something like this:
+
+```
+#install jzmq
+git clone https://github.com/nathanmarz/jzmq.git
+cd jzmq
+./autogen.sh
+./configure
+make
+sudo make install
+```
+
+To get the JZMQ build to work, you may need to do one or all of the following:
+
+1. Set JAVA_HOME environment variable appropriately
+2. Install Java dev package (more info [here](http://codeslinger.posterous.com/getting-zeromq-and-jzmq-running-on-mac-os-x) for Mac OSX users)
+3. Upgrade autoconf on your machine
+4. Follow the instructions in [this blog post](http://blog.pmorelli.com/getting-zeromq-and-jzmq-running-on-mac-os-x)
+
+If you run into any errors when running `./configure`, [this thread](http://stackoverflow.com/questions/3522248/how-do-i-compile-jzmq-for-zeromq-on-osx) may provide a solution.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Kestrel-and-Storm.md
----------------------------------------------------------------------
diff --git a/docs/Kestrel-and-Storm.md b/docs/Kestrel-and-Storm.md
new file mode 100644
index 0000000..e16b0d9
--- /dev/null
+++ b/docs/Kestrel-and-Storm.md
@@ -0,0 +1,198 @@
+---
+layout: documentation
+---
+This page explains how to use to Storm to consume items from a Kestrel cluster.
+
+## Preliminaries
+### Storm
+This tutorial uses examples from the [storm-kestrel](https://github.com/nathanmarz/storm-kestrel) project and the [storm-starter](https://github.com/nathanmarz/storm-starter) project. It's recommended that you clone those projects and follow along with the examples. Read [Setting up development environment](https://github.com/apache/incubator-storm/wiki/Setting-up-development-environment) and [Creating a new Storm project](https://github.com/apache/incubator-storm/wiki/Creating-a-new-Storm-project) to get your machine set up.
+### Kestrel
+It assumes you are able to run locally a Kestrel server as described [here](https://github.com/nathanmarz/storm-kestrel).
+
+## Kestrel Server and Queue
+A single kestrel server has a set of queues. A Kestrel queue is a very simple message queue that runs on the JVM and uses the memcache protocol (with some extensions) to talk to clients. For details, look at the implementation of the [KestrelThriftClient](https://github.com/nathanmarz/storm-kestrel/blob/master/src/jvm/backtype/storm/spout/KestrelThriftClient.java) class provided in [storm-kestrel](https://github.com/nathanmarz/storm-kestrel) project.
+
+Each queue is strictly ordered following the FIFO (first in, first out) principle. To keep up with performance items are cached in system memory; though, only the first 128MB is kept in memory. When stopping the server, the queue state is stored in a journal file.
+
+Further, details can be found [here](https://github.com/nathanmarz/kestrel/blob/master/docs/guide.md).
+
+Kestrel is:
+* fast
+* small
+* durable
+* reliable
+
+For instance, Twitter uses Kestrel as the backbone of its messaging infrastructure as described [here] (http://bhavin.directi.com/notes-on-kestrel-the-open-source-twitter-queue/).
+
+## Add items to Kestrel
+At first, we need to have a program that can add items to a Kestrel queue. The following method takes benefit of the KestrelClient implementation in [storm-kestrel](https://github.com/nathanmarz/storm-kestrel). It adds sentences into a Kestrel queue randomly chosen out of an array that holds five possible sentences.
+
+```
+    private static void queueSentenceItems(KestrelClient kestrelClient, String queueName)
+			throws ParseError, IOException {
+
+		String[] sentences = new String[] {
+	            "the cow jumped over the moon",
+	            "an apple a day keeps the doctor away",
+	            "four score and seven years ago",
+	            "snow white and the seven dwarfs",
+	            "i am at two with nature"};
+
+		Random _rand = new Random();
+
+		for(int i=1; i<=10; i++){
+
+			String sentence = sentences[_rand.nextInt(sentences.length)];
+
+			String val = "ID " + i + " " + sentence;
+
+			boolean queueSucess = kestrelClient.queue(queueName, val);
+
+			System.out.println("queueSucess=" +queueSucess+ " [" + val +"]");
+		}
+	}
+```
+
+## Remove items from Kestrel
+
+This method dequeues items from a queue without removing them.
+```
+    private static void dequeueItems(KestrelClient kestrelClient, String queueName) throws IOException, ParseError
+			 {
+		for(int i=1; i<=12; i++){
+
+			Item item = kestrelClient.dequeue(queueName);
+
+			if(item==null){
+				System.out.println("The queue (" + queueName + ") contains no items.");
+			}
+			else
+			{
+				byte[] data = item._data;
+
+				String receivedVal = new String(data);
+
+				System.out.println("receivedItem=" + receivedVal);
+			}
+		}
+```
+
+This method dequeues items from a queue and then removes them.
+```
+    private static void dequeueAndRemoveItems(KestrelClient kestrelClient, String queueName)
+    throws IOException, ParseError
+		 {
+			for(int i=1; i<=12; i++){
+
+				Item item = kestrelClient.dequeue(queueName);
+
+
+				if(item==null){
+					System.out.println("The queue (" + queueName + ") contains no items.");
+				}
+				else
+				{
+					int itemID = item._id;
+
+
+					byte[] data = item._data;
+
+					String receivedVal = new String(data);
+
+					kestrelClient.ack(queueName, itemID);
+
+					System.out.println("receivedItem=" + receivedVal);
+				}
+			}
+	}
+```
+
+## Add Items continuously to Kestrel
+
+This is our final program to run in order to add continuously sentence items to a queue called **sentence_queue** of a locally running Kestrel server.
+
+In order to stop it type a closing bracket char ']' in console and hit 'Enter'.
+
+```
+    import java.io.IOException;
+    import java.io.InputStream;
+    import java.util.Random;
+
+    import backtype.storm.spout.KestrelClient;
+    import backtype.storm.spout.KestrelClient.Item;
+    import backtype.storm.spout.KestrelClient.ParseError;
+
+    public class AddSentenceItemsToKestrel {
+
+    	/**
+    	 * @param args
+    	 */
+    	public static void main(String[] args) {
+
+    		InputStream is = System.in;
+
+			char closing_bracket = ']';
+
+			int val = closing_bracket;
+
+			boolean aux = true;
+
+			try {
+
+				KestrelClient kestrelClient = null;
+				String queueName = "sentence_queue";
+
+				while(aux){
+
+					kestrelClient = new KestrelClient("localhost",22133);
+
+					queueSentenceItems(kestrelClient, queueName);
+
+					kestrelClient.close();
+
+					Thread.sleep(1000);
+
+					if(is.available()>0){
+					 if(val==is.read())
+						 aux=false;
+					}
+				}
+			} catch (IOException e) {
+				// TODO Auto-generated catch block
+				e.printStackTrace();
+			}
+			catch (ParseError e) {
+				// TODO Auto-generated catch block
+				e.printStackTrace();
+			} catch (InterruptedException e) {
+				// TODO Auto-generated catch block
+				e.printStackTrace();
+			}
+
+			System.out.println("end");
+
+	    }
+	}
+```
+## Using KestrelSpout
+
+This topology reads sentences off of a Kestrel queue using KestrelSpout, splits the sentences into its constituent words (Bolt: SplitSentence), and then emits for each word the number of times it has seen that word before (Bolt: WordCount). How data is processed is described in detail in [Guaranteeing message processing](Guaranteeing-message-processing.html).
+
+```
+    TopologyBuilder builder = new TopologyBuilder();
+    builder.setSpout("sentences", new KestrelSpout("localhost",22133,"sentence_queue",new StringScheme()));
+    builder.setBolt("split", new SplitSentence(), 10)
+    	        .shuffleGrouping("sentences");
+    builder.setBolt("count", new WordCount(), 20)
+	        .fieldsGrouping("split", new Fields("word"));
+```
+
+## Execution
+
+At first, start your local kestrel server in production or development mode.
+
+Than, wait about 5 seconds in order to avoid a ConnectException.
+
+Now execute the program to add items to the queue and launch the Storm topology. The order in which you launch the programs is of no importance.
+
+If you run the topology with TOPOLOGY_DEBUG you should see tuples being emitted in the topology.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Lifecycle-of-a-topology.md
----------------------------------------------------------------------
diff --git a/docs/Lifecycle-of-a-topology.md b/docs/Lifecycle-of-a-topology.md
new file mode 100644
index 0000000..4919be8
--- /dev/null
+++ b/docs/Lifecycle-of-a-topology.md
@@ -0,0 +1,80 @@
+---
+layout: documentation
+---
+(**NOTE**: this page is based on the 0.7.1 code; many things have changed since then, including a split between tasks and executors, and a reorganization of the code under `storm-core/src` rather than `src/`.)
+
+This page explains in detail the lifecycle of a topology from running the "storm jar" command to uploading the topology to Nimbus to the supervisors starting/stopping workers to workers and tasks setting themselves up. It also explains how Nimbus monitors topologies and how topologies are shutdown when they are killed.
+
+First a couple of important notes about topologies:
+
+1. The actual topology that runs is different than the topology the user specifies. The actual topology has implicit streams and an implicit "acker" bolt added to manage the acking framework (used to guarantee data processing). The implicit topology is created via the [system-topology!](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/common.clj#L188) function.
+2. `system-topology!` is used in two places:
+  - when Nimbus is creating tasks for the topology [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L316)
+  - in the worker so it knows where it needs to route messages to [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L90)
+
+## Starting a topology
+
+- "storm jar" command executes your class with the specified arguments. The only special thing that "storm jar" does is set the "storm.jar" environment variable for use by `StormSubmitter` later. [code](https://github.com/apache/incubator-storm/blob/0.7.1/bin/storm#L101)
+- When your code uses `StormSubmitter.submitTopology`, `StormSubmitter` takes the following actions:
+  - First, `StormSubmitter` uploads the jar if it hasn't been uploaded before. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/jvm/backtype/storm/StormSubmitter.java#L83)
+    - Jar uploading is done via Nimbus's Thrift interface [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/storm.thrift#L200)
+    - `beginFileUpload` returns a path in Nimbus's inbox
+    - 15 kilobytes are uploaded at a time through `uploadChunk`
+    - `finishFileUpload` is called when it's finished uploading
+    - Here is Nimbus's implementation of those Thrift methods: [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L694)
+  - Second, `StormSubmitter` calls `submitTopology` on the Nimbus thrift interface [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/jvm/backtype/storm/StormSubmitter.java#L60)
+    - The topology config is serialized using JSON (JSON is used so that writing DSL's in any language is as easy as possible)
+    - Notice that the Thrift `submitTopology` call takes in the Nimbus inbox path where the jar was uploaded
+
+- Nimbus receives the topology submission. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L639)
+- Nimbus normalizes the topology configuration. The main purpose of normalization is to ensure that every single task will have the same serialization registrations, which is critical for getting serialization working correctly. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L557) 
+- Nimbus sets up the static state for the topology [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L661)
+    - Jars and configs are kept on local filesystem because they're too big for Zookeeper. The jar and configs are copied into the path {nimbus local dir}/stormdist/{topology id}
+    - `setup-storm-static` writes task -> component mapping into ZK
+    - `setup-heartbeats` creates a ZK "directory" in which tasks can heartbeat
+- Nimbus calls `mk-assignment` to assign tasks to machines [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L458) 
+    - Assignment record definition is here: [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/common.clj#L25)
+    - Assignment contains:
+      - `master-code-dir`: used by supervisors to download the correct jars/configs for the topology from Nimbus
+      - `task->node+port`: Map from a task id to the worker that task should be running on. (A worker is identified by a node/port pair)
+      - `node->host`: A map from node id to hostname. This is used so workers know which machines to connect to to communicate with other workers. Node ids are used to identify supervisors so that multiple supervisors can be run on one machine. One place this is done is with Mesos integration.
+      - `task->start-time-secs`: Contains a map from task id to the timestamp at which Nimbus launched that task. This is used by Nimbus when monitoring topologies, as tasks are given a longer timeout to heartbeat when they're first launched (the launch timeout is configured by "nimbus.task.launch.secs" config)
+- Once topologies are assigned, they're initially in a deactivated mode. `start-storm` writes data into Zookeeper so that the cluster knows the topology is active and can start emitting tuples from spouts. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L504)
+
+- TODO cluster state diagram (show all nodes and what's kept everywhere)
+
+- Supervisor runs two functions in the background:
+    - `synchronize-supervisor`: This is called whenever assignments in Zookeeper change and also every 10 seconds. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L241)
+      - Downloads code from Nimbus for topologies assigned to this machine for which it doesn't have the code yet. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L258)
+      - Writes into local filesystem what this node is supposed to be running. It writes a map from port -> LocalAssignment. LocalAssignment contains a topology id as well as the list of task ids for that worker. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L13)
+    - `sync-processes`: Reads from the LFS what `synchronize-supervisor` wrote and compares that to what's actually running on the machine. It then starts/stops worker processes as necessary to synchronize. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L177)
+    
+- Worker processes start up through the `mk-worker` function [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L67)
+  - Worker connects to other workers and starts a thread to monitor for changes. So if a worker gets reassigned, the worker will automatically reconnect to the other worker's new location. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L123)
+  - Monitors whether a topology is active or not and stores that state in the `storm-active-atom` variable. This variable is used by tasks to determine whether or not to call `nextTuple` on the spouts. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L155)
+  - The worker launches the actual tasks as threads within it [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L178)
+- Tasks are set up through the `mk-task` function [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L160)
+  - Tasks set up routing function which takes in a stream and an output tuple and returns a list of task ids to send the tuple to [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L207) (there's also a 3-arity version used for direct streams)
+  - Tasks set up the spout-specific or bolt-specific code with [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L241)
+   
+## Topology Monitoring
+
+- Nimbus monitors the topology during its lifetime
+   - Schedules recurring task on the timer thread to check the topologies [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L623)
+   - Nimbus's behavior is represented as a finite state machine [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L98)
+   - The "monitor" event is called on a topology every "nimbus.monitor.freq.secs", which calls `reassign-topology` through `reassign-transition` [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L497)
+   - `reassign-topology` calls `mk-assignments`, the same function used to assign the topology the first time. `mk-assignments` is also capable of incrementally updating a topology
+      - `mk-assignments` checks heartbeats and reassigns workers as necessary
+      - Any reassignments change the state in ZK, which will trigger supervisors to synchronize and start/stop workers
+      
+## Killing a topology
+
+- "storm kill" command runs this code which just calls the Nimbus Thrift interface to kill the topology: [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/command/kill_topology.clj)
+- Nimbus receives the kill command [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L671)
+- Nimbus applies the "kill" transition to the topology [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L676)
+- The kill transition function changes the status of the topology to "killed" and schedules the "remove" event to run "wait time seconds" in the future. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L63)
+   - The wait time defaults to the topology message timeout but can be overridden with the -w flag in the "storm kill" command
+   - This causes the topology to be deactivated for the wait time before its actually shut down. This gives the topology a chance to finish processing what it's currently processing before shutting down the workers
+   - Changing the status during the kill transition ensures that the kill protocol is fault-tolerant to Nimbus crashing. On startup, if the status of the topology is "killed", Nimbus schedules the remove event to run "wait time seconds" in the future [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L111)
+- Removing a topology cleans out the assignment and static information from ZK [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L116)
+- A separate cleanup thread runs the `do-cleanup` function which will clean up the heartbeat dir and the jars/configs stored locally. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L577)

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Local-mode.md
----------------------------------------------------------------------
diff --git a/docs/Local-mode.md b/docs/Local-mode.md
new file mode 100644
index 0000000..1f98e36
--- /dev/null
+++ b/docs/Local-mode.md
@@ -0,0 +1,27 @@
+---
+layout: documentation
+---
+Local mode simulates a Storm cluster in process and is useful for developing and testing topologies. Running topologies in local mode is similar to running topologies [on a cluster](Running-topologies-on-a-production-cluster.html). 
+
+To create an in-process cluster, simply use the `LocalCluster` class. For example:
+
+```java
+import backtype.storm.LocalCluster;
+
+LocalCluster cluster = new LocalCluster();
+```
+
+You can then submit topologies using the `submitTopology` method on the `LocalCluster` object. Just like the corresponding method on [StormSubmitter](javadocs/backtype/storm/StormSubmitter.html), `submitTopology` takes a name, a topology configuration, and the topology object. You can then kill a topology using the `killTopology` method which takes the topology name as an argument.
+
+To shutdown a local cluster, simple call:
+
+```java
+cluster.shutdown();
+```
+
+### Common configurations for local mode
+
+You can see a full list of configurations [here](javadocs/backtype/storm/Config.html).
+
+1. **Config.TOPOLOGY_MAX_TASK_PARALLELISM**: This config puts a ceiling on the number of threads spawned for a single component. Oftentimes production topologies have a lot of parallelism (hundreds of threads) which places unreasonable load when trying to test the topology in local mode. This config lets you easy control that parallelism.
+2. **Config.TOPOLOGY_DEBUG**: When this is set to true, Storm will log a message every time a tuple is emitted from any spout or bolt. This is extremely useful for debugging.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Maven.md
----------------------------------------------------------------------
diff --git a/docs/Maven.md b/docs/Maven.md
new file mode 100644
index 0000000..85828da
--- /dev/null
+++ b/docs/Maven.md
@@ -0,0 +1,56 @@
+---
+layout: documentation
+---
+To develop topologies, you'll need the Storm jars on your classpath. You should either include the unpacked jars in the classpath for your project or use Maven to include Storm as a development dependency. Storm is hosted on Clojars (a Maven repository). To include Storm in your project as a development dependency, add the following to your pom.xml:
+
+```xml
+<repository>
+  <id>clojars.org</id>
+  <url>http://clojars.org/repo</url>
+</repository>
+```
+
+```xml
+<dependency>
+  <groupId>storm</groupId>
+  <artifactId>storm</artifactId>
+  <version>0.7.2</version>
+  <scope>test</scope>
+</dependency>
+```
+
+[Here's an example](https://github.com/nathanmarz/storm-starter/blob/master/m2-pom.xml) of a pom.xml for a Storm project.
+
+If Maven isn't your thing, check out [leiningen](https://github.com/technomancy/leiningen). Leiningen is a build tool for Clojure, but it can be used for pure Java projects as well. Leiningen makes builds and dependency management using Maven dead-simple. Here's an example project.clj for a pure-Java Storm project:
+
+```clojure
+(defproject storm-starter "0.0.1-SNAPSHOT"
+  :java-source-path "src/jvm"
+  :javac-options {:debug "true" :fork "true"}
+  :jvm-opts ["-Djava.library.path=/usr/local/lib:/opt/local/lib:/usr/lib"]
+  :dependencies []
+  :dev-dependencies [
+                     [storm "0.7.2"]
+                     ])
+```
+
+You can fetch dependencies using `lein deps`, build the project with `lein compile`, and make a jar suitable for submitting to a cluster with `lein uberjar`.
+
+### Using Storm as a library
+
+If you want to use Storm as a library (e.g., use the Distributed RPC client) and have the Storm dependency jars be distributed with your application, there's a separate Maven dependency called "storm/storm-lib". The only difference between this dependency and the usual "storm/storm" is that storm-lib does not have any logging configured.
+
+### Developing Storm
+
+You will want to
+
+	bash ./bin/install_zmq.sh   # install the jzmq dependency
+	lein sub install
+
+Build javadocs with
+
+	bash ./bin/javadoc.sh
+
+### Building a Storm Release
+
+Use the file `bin/build_release.sh` to make a zipfile like the ones you would download (and like what the bin files require in order to run daemons).

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Message-passing-implementation.md
----------------------------------------------------------------------
diff --git a/docs/Message-passing-implementation.md b/docs/Message-passing-implementation.md
new file mode 100644
index 0000000..f22a5aa
--- /dev/null
+++ b/docs/Message-passing-implementation.md
@@ -0,0 +1,28 @@
+---
+layout: documentation
+---
+(Note: this walkthrough is out of date as of 0.8.0. 0.8.0 revamped the message passing infrastructure to be based on the Disruptor)
+
+This page walks through how emitting and transferring tuples works in Storm.
+
+- Worker is responsible for message transfer
+   - `refresh-connections` is called every "task.refresh.poll.secs" or whenever assignment in ZK changes. It manages connections to other workers and maintains a mapping from task -> worker [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L123)
+   - Provides a "transfer function" that is used by tasks to send tuples to other tasks. The transfer function takes in a task id and a tuple, and it serializes the tuple and puts it onto a "transfer queue". There is a single transfer queue for each worker. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L56)
+   - The serializer is thread-safe [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/jvm/backtype/storm/serialization/KryoTupleSerializer.java#L26)
+   - The worker has a single thread which drains the transfer queue and sends the messages to other workers [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L185)
+   - Message sending happens through this protocol: [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/messaging/protocol.clj)
+   - The implementation for distributed mode uses ZeroMQ [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/messaging/zmq.clj)
+   - The implementation for local mode uses in memory Java queues (so that it's easy to use Storm locally without needing to get ZeroMQ installed) [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/messaging/local.clj)
+- Receiving messages in tasks works differently in local mode and distributed mode
+   - In local mode, the tuple is sent directly to an in-memory queue for the receiving task [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/messaging/local.clj#L21)
+   - In distributed mode, each worker listens on a single TCP port for incoming messages and then routes those messages in-memory to tasks. The TCP port is called a "virtual port", because it receives [task id, message] and then routes it to the actual task. [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/worker.clj#L204)
+      - The virtual port implementation is here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/zilch/virtual_port.clj)
+      - Tasks listen on an in-memory ZeroMQ port for messages from the virtual port [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L201)
+        - Bolts listen here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L489)
+        - Spouts listen here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L382)
+- Tasks are responsible for message routing. A tuple is emitted either to a direct stream (where the task id is specified) or a regular stream. In direct streams, the message is only sent if that bolt subscribes to that direct stream. In regular streams, the stream grouping functions are used to determine the task ids to send the tuple to.
+  - Tasks have a routing map from {stream id} -> {component id} -> {stream grouping function} [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L198)
+  - The "tasks-fn" returns the task ids to send the tuples to for either regular stream emit or direct stream emit [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L207)
+  - After getting the output task ids, bolts and spouts use the transfer-fn provided by the worker to actually transfer the tuples
+      - Bolt transfer code here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L429)
+      - Spout transfer code here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L329)

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Metrics.md
----------------------------------------------------------------------
diff --git a/docs/Metrics.md b/docs/Metrics.md
new file mode 100644
index 0000000..f43f8c7
--- /dev/null
+++ b/docs/Metrics.md
@@ -0,0 +1,34 @@
+---
+layout: documentation
+---
+Storm exposes a metrics interface to report summary statistics across the full topology.
+It's used internally to track the numbers you see in the Nimbus UI console: counts of executes and acks; average process latency per bolt; worker heap usage; and so forth.
+
+### Metric Types
+
+Metrics have to implement just one method, `getValueAndReset` -- do any remaining work to find the summary value, and reset back to an initial state. For example, the MeanReducer divides the running total by its running count to find the mean, then initializes both values back to zero.
+
+Storm gives you these metric types:
+
+* [AssignableMetric]() -- set the metric to the explicit value you supply. Useful if it's an external value or in the case that you are already calculating the summary statistic yourself.
+* [CombinedMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/CombinedMetric.java) -- generic interface for metrics that can be updated associatively. 
+* [CountMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/CountMetric.java) -- a running total of the supplied values. Call `incr()` to increment by one, `incrBy(n)` to add/subtract the given number.
+  - [MultiCountMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/MultiCountMetric.java) -- a hashmap of count metrics.
+* [ReducedMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/ReducedMetric.java)
+  - [MeanReducer](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/MeanReducer.java) -- track a running average of values given to its `reduce()` method. (It accepts `Double`, `Integer` or `Long` values, and maintains the internal average as a `Double`.) Despite his reputation, the MeanReducer is actually a pretty nice guy in person.
+  - [MultiReducedMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/MultiReducedMetric.java) -- a hashmap of reduced metrics.
+
+
+### Metric Consumer
+
+
+### Build your own metric
+
+
+
+### Builtin Metrics
+
+The [builtin metrics](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj) instrument Storm itself.
+
+[builtin_metrics.clj](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj) sets up data structures for the built-in metrics, and facade methods that the other framework components can use to update them. The metrics themselves are calculated in the calling code -- see for example [`ack-spout-msg`](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/executor.clj#358)  in `clj/b/s/daemon/daemon/executor.clj`
+


[06/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/assets/css/bootstrap.css
----------------------------------------------------------------------
diff --git a/docs/assets/css/bootstrap.css b/docs/assets/css/bootstrap.css
index c6f3d21..680e768 100644
--- a/docs/assets/css/bootstrap.css
+++ b/docs/assets/css/bootstrap.css
@@ -1,10 +1,9 @@
 /*!
- * Bootstrap v3.3.1 (http://getbootstrap.com)
- * Copyright 2011-2014 Twitter, Inc.
+ * Bootstrap v3.3.5 (http://getbootstrap.com)
+ * Copyright 2011-2015 Twitter, Inc.
  * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
  */
-
-/*! normalize.css v3.0.2 | MIT License | git.io/normalize */
+/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */
 html {
   font-family: sans-serif;
   -webkit-text-size-adjust: 100%;
@@ -239,9 +238,6 @@ th {
   h3 {
     page-break-after: avoid;
   }
-  select {
-    background: #fff !important;
-  }
   .navbar {
     display: none;
   }
@@ -268,7 +264,7 @@ th {
   font-family: 'Glyphicons Halflings';
 
   src: url('../fonts/glyphicons-halflings-regular.eot');
-  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');
+  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');
 }
 .glyphicon {
   position: relative;
@@ -883,6 +879,192 @@ th {
 .glyphicon-tree-deciduous:before {
   content: "\e200";
 }
+.glyphicon-cd:before {
+  content: "\e201";
+}
+.glyphicon-save-file:before {
+  content: "\e202";
+}
+.glyphicon-open-file:before {
+  content: "\e203";
+}
+.glyphicon-level-up:before {
+  content: "\e204";
+}
+.glyphicon-copy:before {
+  content: "\e205";
+}
+.glyphicon-paste:before {
+  content: "\e206";
+}
+.glyphicon-alert:before {
+  content: "\e209";
+}
+.glyphicon-equalizer:before {
+  content: "\e210";
+}
+.glyphicon-king:before {
+  content: "\e211";
+}
+.glyphicon-queen:before {
+  content: "\e212";
+}
+.glyphicon-pawn:before {
+  content: "\e213";
+}
+.glyphicon-bishop:before {
+  content: "\e214";
+}
+.glyphicon-knight:before {
+  content: "\e215";
+}
+.glyphicon-baby-formula:before {
+  content: "\e216";
+}
+.glyphicon-tent:before {
+  content: "\26fa";
+}
+.glyphicon-blackboard:before {
+  content: "\e218";
+}
+.glyphicon-bed:before {
+  content: "\e219";
+}
+.glyphicon-apple:before {
+  content: "\f8ff";
+}
+.glyphicon-erase:before {
+  content: "\e221";
+}
+.glyphicon-hourglass:before {
+  content: "\231b";
+}
+.glyphicon-lamp:before {
+  content: "\e223";
+}
+.glyphicon-duplicate:before {
+  content: "\e224";
+}
+.glyphicon-piggy-bank:before {
+  content: "\e225";
+}
+.glyphicon-scissors:before {
+  content: "\e226";
+}
+.glyphicon-bitcoin:before {
+  content: "\e227";
+}
+.glyphicon-btc:before {
+  content: "\e227";
+}
+.glyphicon-xbt:before {
+  content: "\e227";
+}
+.glyphicon-yen:before {
+  content: "\00a5";
+}
+.glyphicon-jpy:before {
+  content: "\00a5";
+}
+.glyphicon-ruble:before {
+  content: "\20bd";
+}
+.glyphicon-rub:before {
+  content: "\20bd";
+}
+.glyphicon-scale:before {
+  content: "\e230";
+}
+.glyphicon-ice-lolly:before {
+  content: "\e231";
+}
+.glyphicon-ice-lolly-tasted:before {
+  content: "\e232";
+}
+.glyphicon-education:before {
+  content: "\e233";
+}
+.glyphicon-option-horizontal:before {
+  content: "\e234";
+}
+.glyphicon-option-vertical:before {
+  content: "\e235";
+}
+.glyphicon-menu-hamburger:before {
+  content: "\e236";
+}
+.glyphicon-modal-window:before {
+  content: "\e237";
+}
+.glyphicon-oil:before {
+  content: "\e238";
+}
+.glyphicon-grain:before {
+  content: "\e239";
+}
+.glyphicon-sunglasses:before {
+  content: "\e240";
+}
+.glyphicon-text-size:before {
+  content: "\e241";
+}
+.glyphicon-text-color:before {
+  content: "\e242";
+}
+.glyphicon-text-background:before {
+  content: "\e243";
+}
+.glyphicon-object-align-top:before {
+  content: "\e244";
+}
+.glyphicon-object-align-bottom:before {
+  content: "\e245";
+}
+.glyphicon-object-align-horizontal:before {
+  content: "\e246";
+}
+.glyphicon-object-align-left:before {
+  content: "\e247";
+}
+.glyphicon-object-align-vertical:before {
+  content: "\e248";
+}
+.glyphicon-object-align-right:before {
+  content: "\e249";
+}
+.glyphicon-triangle-right:before {
+  content: "\e250";
+}
+.glyphicon-triangle-left:before {
+  content: "\e251";
+}
+.glyphicon-triangle-bottom:before {
+  content: "\e252";
+}
+.glyphicon-triangle-top:before {
+  content: "\e253";
+}
+.glyphicon-console:before {
+  content: "\e254";
+}
+.glyphicon-superscript:before {
+  content: "\e255";
+}
+.glyphicon-subscript:before {
+  content: "\e256";
+}
+.glyphicon-menu-left:before {
+  content: "\e257";
+}
+.glyphicon-menu-right:before {
+  content: "\e258";
+}
+.glyphicon-menu-down:before {
+  content: "\e259";
+}
+.glyphicon-menu-up:before {
+  content: "\e260";
+}
 * {
   -webkit-box-sizing: border-box;
      -moz-box-sizing: border-box;
@@ -987,6 +1169,9 @@ hr {
   overflow: visible;
   clip: auto;
 }
+[role="button"] {
+  cursor: pointer;
+}
 h1,
 h2,
 h3,
@@ -1155,62 +1340,72 @@ mark,
 .text-primary {
   color: #337ab7;
 }
-a.text-primary:hover {
+a.text-primary:hover,
+a.text-primary:focus {
   color: #286090;
 }
 .text-success {
   color: #3c763d;
 }
-a.text-success:hover {
+a.text-success:hover,
+a.text-success:focus {
   color: #2b542c;
 }
 .text-info {
   color: #31708f;
 }
-a.text-info:hover {
+a.text-info:hover,
+a.text-info:focus {
   color: #245269;
 }
 .text-warning {
   color: #8a6d3b;
 }
-a.text-warning:hover {
+a.text-warning:hover,
+a.text-warning:focus {
   color: #66512c;
 }
 .text-danger {
   color: #a94442;
 }
-a.text-danger:hover {
+a.text-danger:hover,
+a.text-danger:focus {
   color: #843534;
 }
 .bg-primary {
   color: #fff;
   background-color: #337ab7;
 }
-a.bg-primary:hover {
+a.bg-primary:hover,
+a.bg-primary:focus {
   background-color: #286090;
 }
 .bg-success {
   background-color: #dff0d8;
 }
-a.bg-success:hover {
+a.bg-success:hover,
+a.bg-success:focus {
   background-color: #c1e2b3;
 }
 .bg-info {
   background-color: #d9edf7;
 }
-a.bg-info:hover {
+a.bg-info:hover,
+a.bg-info:focus {
   background-color: #afd9ee;
 }
 .bg-warning {
   background-color: #fcf8e3;
 }
-a.bg-warning:hover {
+a.bg-warning:hover,
+a.bg-warning:focus {
   background-color: #f7ecb5;
 }
 .bg-danger {
   background-color: #f2dede;
 }
-a.bg-danger:hover {
+a.bg-danger:hover,
+a.bg-danger:focus {
   background-color: #e4b9b9;
 }
 .page-header {
@@ -2123,7 +2318,7 @@ th {
 .table-bordered > thead > tr > td {
   border-bottom-width: 2px;
 }
-.table-striped > tbody > tr:nth-child(odd) {
+.table-striped > tbody > tr:nth-of-type(odd) {
   background-color: #f9f9f9;
 }
 .table-hover > tbody > tr:hover {
@@ -2390,10 +2585,13 @@ output {
 .form-control[disabled],
 .form-control[readonly],
 fieldset[disabled] .form-control {
-  cursor: not-allowed;
   background-color: #eee;
   opacity: 1;
 }
+.form-control[disabled],
+fieldset[disabled] .form-control {
+  cursor: not-allowed;
+}
 textarea.form-control {
   height: auto;
 }
@@ -2401,22 +2599,30 @@ input[type="search"] {
   -webkit-appearance: none;
 }
 @media screen and (-webkit-min-device-pixel-ratio: 0) {
-  input[type="date"],
-  input[type="time"],
-  input[type="datetime-local"],
-  input[type="month"] {
+  input[type="date"].form-control,
+  input[type="time"].form-control,
+  input[type="datetime-local"].form-control,
+  input[type="month"].form-control {
     line-height: 34px;
   }
   input[type="date"].input-sm,
   input[type="time"].input-sm,
   input[type="datetime-local"].input-sm,
-  input[type="month"].input-sm {
+  input[type="month"].input-sm,
+  .input-group-sm input[type="date"],
+  .input-group-sm input[type="time"],
+  .input-group-sm input[type="datetime-local"],
+  .input-group-sm input[type="month"] {
     line-height: 30px;
   }
   input[type="date"].input-lg,
   input[type="time"].input-lg,
   input[type="datetime-local"].input-lg,
-  input[type="month"].input-lg {
+  input[type="month"].input-lg,
+  .input-group-lg input[type="date"],
+  .input-group-lg input[type="time"],
+  .input-group-lg input[type="datetime-local"],
+  .input-group-lg input[type="month"] {
     line-height: 46px;
   }
 }
@@ -2452,6 +2658,7 @@ input[type="search"] {
 }
 .radio-inline,
 .checkbox-inline {
+  position: relative;
   display: inline-block;
   padding-left: 20px;
   margin-bottom: 0;
@@ -2485,6 +2692,7 @@ fieldset[disabled] .checkbox label {
   cursor: not-allowed;
 }
 .form-control-static {
+  min-height: 34px;
   padding-top: 7px;
   padding-bottom: 7px;
   margin-bottom: 0;
@@ -2494,44 +2702,80 @@ fieldset[disabled] .checkbox label {
   padding-right: 0;
   padding-left: 0;
 }
-.input-sm,
-.form-group-sm .form-control {
+.input-sm {
   height: 30px;
   padding: 5px 10px;
   font-size: 12px;
   line-height: 1.5;
   border-radius: 3px;
 }
-select.input-sm,
-select.form-group-sm .form-control {
+select.input-sm {
   height: 30px;
   line-height: 30px;
 }
 textarea.input-sm,
-textarea.form-group-sm .form-control,
-select[multiple].input-sm,
-select[multiple].form-group-sm .form-control {
+select[multiple].input-sm {
   height: auto;
 }
-.input-lg,
-.form-group-lg .form-control {
+.form-group-sm .form-control {
+  height: 30px;
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.form-group-sm select.form-control {
+  height: 30px;
+  line-height: 30px;
+}
+.form-group-sm textarea.form-control,
+.form-group-sm select[multiple].form-control {
+  height: auto;
+}
+.form-group-sm .form-control-static {
+  height: 30px;
+  min-height: 32px;
+  padding: 6px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+}
+.input-lg {
   height: 46px;
   padding: 10px 16px;
   font-size: 18px;
-  line-height: 1.33;
+  line-height: 1.3333333;
   border-radius: 6px;
 }
-select.input-lg,
-select.form-group-lg .form-control {
+select.input-lg {
   height: 46px;
   line-height: 46px;
 }
 textarea.input-lg,
-textarea.form-group-lg .form-control,
-select[multiple].input-lg,
-select[multiple].form-group-lg .form-control {
+select[multiple].input-lg {
   height: auto;
 }
+.form-group-lg .form-control {
+  height: 46px;
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.3333333;
+  border-radius: 6px;
+}
+.form-group-lg select.form-control {
+  height: 46px;
+  line-height: 46px;
+}
+.form-group-lg textarea.form-control,
+.form-group-lg select[multiple].form-control {
+  height: auto;
+}
+.form-group-lg .form-control-static {
+  height: 46px;
+  min-height: 38px;
+  padding: 11px 16px;
+  font-size: 18px;
+  line-height: 1.3333333;
+}
 .has-feedback {
   position: relative;
 }
@@ -2550,12 +2794,16 @@ select[multiple].form-group-lg .form-control {
   text-align: center;
   pointer-events: none;
 }
-.input-lg + .form-control-feedback {
+.input-lg + .form-control-feedback,
+.input-group-lg + .form-control-feedback,
+.form-group-lg .form-control + .form-control-feedback {
   width: 46px;
   height: 46px;
   line-height: 46px;
 }
-.input-sm + .form-control-feedback {
+.input-sm + .form-control-feedback,
+.input-group-sm + .form-control-feedback,
+.form-group-sm .form-control + .form-control-feedback {
   width: 30px;
   height: 30px;
   line-height: 30px;
@@ -2740,12 +2988,14 @@ select[multiple].form-group-lg .form-control {
 }
 @media (min-width: 768px) {
   .form-horizontal .form-group-lg .control-label {
-    padding-top: 14.3px;
+    padding-top: 14.333333px;
+    font-size: 18px;
   }
 }
 @media (min-width: 768px) {
   .form-horizontal .form-group-sm .control-label {
     padding-top: 6px;
+    font-size: 12px;
   }
 }
 .btn {
@@ -2795,21 +3045,32 @@ select[multiple].form-group-lg .form-control {
 .btn.disabled,
 .btn[disabled],
 fieldset[disabled] .btn {
-  pointer-events: none;
   cursor: not-allowed;
   filter: alpha(opacity=65);
   -webkit-box-shadow: none;
           box-shadow: none;
   opacity: .65;
 }
+a.btn.disabled,
+fieldset[disabled] a.btn {
+  pointer-events: none;
+}
 .btn-default {
   color: #333;
   background-color: #fff;
   border-color: #ccc;
 }
-.btn-default:hover,
 .btn-default:focus,
-.btn-default.focus,
+.btn-default.focus {
+  color: #333;
+  background-color: #e6e6e6;
+  border-color: #8c8c8c;
+}
+.btn-default:hover {
+  color: #333;
+  background-color: #e6e6e6;
+  border-color: #adadad;
+}
 .btn-default:active,
 .btn-default.active,
 .open > .dropdown-toggle.btn-default {
@@ -2817,6 +3078,19 @@ fieldset[disabled] .btn {
   background-color: #e6e6e6;
   border-color: #adadad;
 }
+.btn-default:active:hover,
+.btn-default.active:hover,
+.open > .dropdown-toggle.btn-default:hover,
+.btn-default:active:focus,
+.btn-default.active:focus,
+.open > .dropdown-toggle.btn-default:focus,
+.btn-default:active.focus,
+.btn-default.active.focus,
+.open > .dropdown-toggle.btn-default.focus {
+  color: #333;
+  background-color: #d4d4d4;
+  border-color: #8c8c8c;
+}
 .btn-default:active,
 .btn-default.active,
 .open > .dropdown-toggle.btn-default {
@@ -2852,9 +3126,17 @@ fieldset[disabled] .btn-default.active {
   background-color: #337ab7;
   border-color: #2e6da4;
 }
-.btn-primary:hover,
 .btn-primary:focus,
-.btn-primary.focus,
+.btn-primary.focus {
+  color: #fff;
+  background-color: #286090;
+  border-color: #122b40;
+}
+.btn-primary:hover {
+  color: #fff;
+  background-color: #286090;
+  border-color: #204d74;
+}
 .btn-primary:active,
 .btn-primary.active,
 .open > .dropdown-toggle.btn-primary {
@@ -2862,6 +3144,19 @@ fieldset[disabled] .btn-default.active {
   background-color: #286090;
   border-color: #204d74;
 }
+.btn-primary:active:hover,
+.btn-primary.active:hover,
+.open > .dropdown-toggle.btn-primary:hover,
+.btn-primary:active:focus,
+.btn-primary.active:focus,
+.open > .dropdown-toggle.btn-primary:focus,
+.btn-primary:active.focus,
+.btn-primary.active.focus,
+.open > .dropdown-toggle.btn-primary.focus {
+  color: #fff;
+  background-color: #204d74;
+  border-color: #122b40;
+}
 .btn-primary:active,
 .btn-primary.active,
 .open > .dropdown-toggle.btn-primary {
@@ -2897,9 +3192,17 @@ fieldset[disabled] .btn-primary.active {
   background-color: #5cb85c;
   border-color: #4cae4c;
 }
-.btn-success:hover,
 .btn-success:focus,
-.btn-success.focus,
+.btn-success.focus {
+  color: #fff;
+  background-color: #449d44;
+  border-color: #255625;
+}
+.btn-success:hover {
+  color: #fff;
+  background-color: #449d44;
+  border-color: #398439;
+}
 .btn-success:active,
 .btn-success.active,
 .open > .dropdown-toggle.btn-success {
@@ -2907,6 +3210,19 @@ fieldset[disabled] .btn-primary.active {
   background-color: #449d44;
   border-color: #398439;
 }
+.btn-success:active:hover,
+.btn-success.active:hover,
+.open > .dropdown-toggle.btn-success:hover,
+.btn-success:active:focus,
+.btn-success.active:focus,
+.open > .dropdown-toggle.btn-success:focus,
+.btn-success:active.focus,
+.btn-success.active.focus,
+.open > .dropdown-toggle.btn-success.focus {
+  color: #fff;
+  background-color: #398439;
+  border-color: #255625;
+}
 .btn-success:active,
 .btn-success.active,
 .open > .dropdown-toggle.btn-success {
@@ -2942,9 +3258,17 @@ fieldset[disabled] .btn-success.active {
   background-color: #5bc0de;
   border-color: #46b8da;
 }
-.btn-info:hover,
 .btn-info:focus,
-.btn-info.focus,
+.btn-info.focus {
+  color: #fff;
+  background-color: #31b0d5;
+  border-color: #1b6d85;
+}
+.btn-info:hover {
+  color: #fff;
+  background-color: #31b0d5;
+  border-color: #269abc;
+}
 .btn-info:active,
 .btn-info.active,
 .open > .dropdown-toggle.btn-info {
@@ -2952,6 +3276,19 @@ fieldset[disabled] .btn-success.active {
   background-color: #31b0d5;
   border-color: #269abc;
 }
+.btn-info:active:hover,
+.btn-info.active:hover,
+.open > .dropdown-toggle.btn-info:hover,
+.btn-info:active:focus,
+.btn-info.active:focus,
+.open > .dropdown-toggle.btn-info:focus,
+.btn-info:active.focus,
+.btn-info.active.focus,
+.open > .dropdown-toggle.btn-info.focus {
+  color: #fff;
+  background-color: #269abc;
+  border-color: #1b6d85;
+}
 .btn-info:active,
 .btn-info.active,
 .open > .dropdown-toggle.btn-info {
@@ -2987,9 +3324,17 @@ fieldset[disabled] .btn-info.active {
   background-color: #f0ad4e;
   border-color: #eea236;
 }
-.btn-warning:hover,
 .btn-warning:focus,
-.btn-warning.focus,
+.btn-warning.focus {
+  color: #fff;
+  background-color: #ec971f;
+  border-color: #985f0d;
+}
+.btn-warning:hover {
+  color: #fff;
+  background-color: #ec971f;
+  border-color: #d58512;
+}
 .btn-warning:active,
 .btn-warning.active,
 .open > .dropdown-toggle.btn-warning {
@@ -2997,6 +3342,19 @@ fieldset[disabled] .btn-info.active {
   background-color: #ec971f;
   border-color: #d58512;
 }
+.btn-warning:active:hover,
+.btn-warning.active:hover,
+.open > .dropdown-toggle.btn-warning:hover,
+.btn-warning:active:focus,
+.btn-warning.active:focus,
+.open > .dropdown-toggle.btn-warning:focus,
+.btn-warning:active.focus,
+.btn-warning.active.focus,
+.open > .dropdown-toggle.btn-warning.focus {
+  color: #fff;
+  background-color: #d58512;
+  border-color: #985f0d;
+}
 .btn-warning:active,
 .btn-warning.active,
 .open > .dropdown-toggle.btn-warning {
@@ -3032,9 +3390,17 @@ fieldset[disabled] .btn-warning.active {
   background-color: #d9534f;
   border-color: #d43f3a;
 }
-.btn-danger:hover,
 .btn-danger:focus,
-.btn-danger.focus,
+.btn-danger.focus {
+  color: #fff;
+  background-color: #c9302c;
+  border-color: #761c19;
+}
+.btn-danger:hover {
+  color: #fff;
+  background-color: #c9302c;
+  border-color: #ac2925;
+}
 .btn-danger:active,
 .btn-danger.active,
 .open > .dropdown-toggle.btn-danger {
@@ -3042,6 +3408,19 @@ fieldset[disabled] .btn-warning.active {
   background-color: #c9302c;
   border-color: #ac2925;
 }
+.btn-danger:active:hover,
+.btn-danger.active:hover,
+.open > .dropdown-toggle.btn-danger:hover,
+.btn-danger:active:focus,
+.btn-danger.active:focus,
+.open > .dropdown-toggle.btn-danger:focus,
+.btn-danger:active.focus,
+.btn-danger.active.focus,
+.open > .dropdown-toggle.btn-danger.focus {
+  color: #fff;
+  background-color: #ac2925;
+  border-color: #761c19;
+}
 .btn-danger:active,
 .btn-danger.active,
 .open > .dropdown-toggle.btn-danger {
@@ -3109,7 +3488,7 @@ fieldset[disabled] .btn-link:focus {
 .btn-group-lg > .btn {
   padding: 10px 16px;
   font-size: 18px;
-  line-height: 1.33;
+  line-height: 1.3333333;
   border-radius: 6px;
 }
 .btn-sm,
@@ -3149,11 +3528,9 @@ input[type="button"].btn-block {
 }
 .collapse {
   display: none;
-  visibility: hidden;
 }
 .collapse.in {
   display: block;
-  visibility: visible;
 }
 tr.collapse.in {
   display: table-row;
@@ -3181,10 +3558,12 @@ tbody.collapse.in {
   height: 0;
   margin-left: 2px;
   vertical-align: middle;
-  border-top: 4px solid;
+  border-top: 4px dashed;
+  border-top: 4px solid \9;
   border-right: 4px solid transparent;
   border-left: 4px solid transparent;
 }
+.dropup,
 .dropdown {
   position: relative;
 }
@@ -3297,13 +3676,14 @@ tbody.collapse.in {
 .navbar-fixed-bottom .dropdown .caret {
   content: "";
   border-top: 0;
-  border-bottom: 4px solid;
+  border-bottom: 4px dashed;
+  border-bottom: 4px solid \9;
 }
 .dropup .dropdown-menu,
 .navbar-fixed-bottom .dropdown .dropdown-menu {
   top: auto;
   bottom: 100%;
-  margin-bottom: 1px;
+  margin-bottom: 2px;
 }
 @media (min-width: 768px) {
   .navbar-right .dropdown-menu {
@@ -3345,6 +3725,7 @@ tbody.collapse.in {
 .btn-toolbar {
   margin-left: -5px;
 }
+.btn-toolbar .btn,
 .btn-toolbar .btn-group,
 .btn-toolbar .input-group {
   float: left;
@@ -3375,12 +3756,12 @@ tbody.collapse.in {
 .btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {
   border-radius: 0;
 }
-.btn-group > .btn-group:first-child > .btn:last-child,
-.btn-group > .btn-group:first-child > .dropdown-toggle {
+.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,
+.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
   border-top-right-radius: 0;
   border-bottom-right-radius: 0;
 }
-.btn-group > .btn-group:last-child > .btn:first-child {
+.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {
   border-top-left-radius: 0;
   border-bottom-left-radius: 0;
 }
@@ -3506,7 +3887,7 @@ tbody.collapse.in {
   height: 46px;
   padding: 10px 16px;
   font-size: 18px;
-  line-height: 1.33;
+  line-height: 1.3333333;
   border-radius: 6px;
 }
 select.input-group-lg > .form-control,
@@ -3635,6 +4016,7 @@ select[multiple].input-group-sm > .input-group-btn > .btn {
 }
 .input-group-btn:last-child > .btn,
 .input-group-btn:last-child > .btn-group {
+  z-index: 2;
   margin-left: -1px;
 }
 .nav {
@@ -3820,11 +4202,9 @@ select[multiple].input-group-sm > .input-group-btn > .btn {
 }
 .tab-content > .tab-pane {
   display: none;
-  visibility: hidden;
 }
 .tab-content > .active {
   display: block;
-  visibility: visible;
 }
 .nav-tabs .dropdown-menu {
   margin-top: -1px;
@@ -3871,7 +4251,6 @@ select[multiple].input-group-sm > .input-group-btn > .btn {
     height: auto !important;
     padding-bottom: 0;
     overflow: visible !important;
-    visibility: visible !important;
   }
   .navbar-collapse.in {
     overflow-y: visible;
@@ -4120,6 +4499,7 @@ select[multiple].input-group-sm > .input-group-btn > .btn {
   border-top-right-radius: 0;
 }
 .navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {
+  margin-bottom: 0;
   border-top-left-radius: 4px;
   border-top-right-radius: 4px;
   border-bottom-right-radius: 0;
@@ -4412,6 +4792,7 @@ fieldset[disabled] .navbar-inverse .btn-link:focus {
 .pagination > li > span:hover,
 .pagination > li > a:focus,
 .pagination > li > span:focus {
+  z-index: 3;
   color: #23527c;
   background-color: #eee;
   border-color: #ddd;
@@ -4443,6 +4824,7 @@ fieldset[disabled] .navbar-inverse .btn-link:focus {
 .pagination-lg > li > span {
   padding: 10px 16px;
   font-size: 18px;
+  line-height: 1.3333333;
 }
 .pagination-lg > li:first-child > a,
 .pagination-lg > li:first-child > span {
@@ -4458,6 +4840,7 @@ fieldset[disabled] .navbar-inverse .btn-link:focus {
 .pagination-sm > li > span {
   padding: 5px 10px;
   font-size: 12px;
+  line-height: 1.5;
 }
 .pagination-sm > li:first-child > a,
 .pagination-sm > li:first-child > span {
@@ -4584,7 +4967,7 @@ a.label:focus {
   color: #fff;
   text-align: center;
   white-space: nowrap;
-  vertical-align: baseline;
+  vertical-align: middle;
   background-color: #777;
   border-radius: 10px;
 }
@@ -4595,7 +4978,8 @@ a.label:focus {
   position: relative;
   top: -1px;
 }
-.btn-xs .badge {
+.btn-xs .badge,
+.btn-group-xs > .btn .badge {
   top: 0;
   padding: 1px 5px;
 }
@@ -4620,7 +5004,8 @@ a.badge:focus {
   margin-left: 3px;
 }
 .jumbotron {
-  padding: 30px 15px;
+  padding-top: 30px;
+  padding-bottom: 30px;
   margin-bottom: 30px;
   color: inherit;
   background-color: #eee;
@@ -4646,7 +5031,8 @@ a.badge:focus {
 }
 @media screen and (min-width: 768px) {
   .jumbotron {
-    padding: 48px 0;
+    padding-top: 48px;
+    padding-bottom: 48px;
   }
   .container .jumbotron,
   .container-fluid .jumbotron {
@@ -4859,6 +5245,20 @@ a.thumbnail.active {
 .media:first-child {
   margin-top: 0;
 }
+.media,
+.media-body {
+  overflow: hidden;
+  zoom: 1;
+}
+.media-body {
+  width: 10000px;
+}
+.media-object {
+  display: block;
+}
+.media-object.img-thumbnail {
+  max-width: none;
+}
 .media-right,
 .media > .pull-right {
   padding-left: 10px;
@@ -4908,18 +5308,26 @@ a.thumbnail.active {
   border-bottom-right-radius: 4px;
   border-bottom-left-radius: 4px;
 }
-a.list-group-item {
+a.list-group-item,
+button.list-group-item {
   color: #555;
 }
-a.list-group-item .list-group-item-heading {
+a.list-group-item .list-group-item-heading,
+button.list-group-item .list-group-item-heading {
   color: #333;
 }
 a.list-group-item:hover,
-a.list-group-item:focus {
+button.list-group-item:hover,
+a.list-group-item:focus,
+button.list-group-item:focus {
   color: #555;
   text-decoration: none;
   background-color: #f5f5f5;
 }
+button.list-group-item {
+  width: 100%;
+  text-align: left;
+}
 .list-group-item.disabled,
 .list-group-item.disabled:hover,
 .list-group-item.disabled:focus {
@@ -4965,20 +5373,27 @@ a.list-group-item:focus {
   color: #3c763d;
   background-color: #dff0d8;
 }
-a.list-group-item-success {
+a.list-group-item-success,
+button.list-group-item-success {
   color: #3c763d;
 }
-a.list-group-item-success .list-group-item-heading {
+a.list-group-item-success .list-group-item-heading,
+button.list-group-item-success .list-group-item-heading {
   color: inherit;
 }
 a.list-group-item-success:hover,
-a.list-group-item-success:focus {
+button.list-group-item-success:hover,
+a.list-group-item-success:focus,
+button.list-group-item-success:focus {
   color: #3c763d;
   background-color: #d0e9c6;
 }
 a.list-group-item-success.active,
+button.list-group-item-success.active,
 a.list-group-item-success.active:hover,
-a.list-group-item-success.active:focus {
+button.list-group-item-success.active:hover,
+a.list-group-item-success.active:focus,
+button.list-group-item-success.active:focus {
   color: #fff;
   background-color: #3c763d;
   border-color: #3c763d;
@@ -4987,20 +5402,27 @@ a.list-group-item-success.active:focus {
   color: #31708f;
   background-color: #d9edf7;
 }
-a.list-group-item-info {
+a.list-group-item-info,
+button.list-group-item-info {
   color: #31708f;
 }
-a.list-group-item-info .list-group-item-heading {
+a.list-group-item-info .list-group-item-heading,
+button.list-group-item-info .list-group-item-heading {
   color: inherit;
 }
 a.list-group-item-info:hover,
-a.list-group-item-info:focus {
+button.list-group-item-info:hover,
+a.list-group-item-info:focus,
+button.list-group-item-info:focus {
   color: #31708f;
   background-color: #c4e3f3;
 }
 a.list-group-item-info.active,
+button.list-group-item-info.active,
 a.list-group-item-info.active:hover,
-a.list-group-item-info.active:focus {
+button.list-group-item-info.active:hover,
+a.list-group-item-info.active:focus,
+button.list-group-item-info.active:focus {
   color: #fff;
   background-color: #31708f;
   border-color: #31708f;
@@ -5009,20 +5431,27 @@ a.list-group-item-info.active:focus {
   color: #8a6d3b;
   background-color: #fcf8e3;
 }
-a.list-group-item-warning {
+a.list-group-item-warning,
+button.list-group-item-warning {
   color: #8a6d3b;
 }
-a.list-group-item-warning .list-group-item-heading {
+a.list-group-item-warning .list-group-item-heading,
+button.list-group-item-warning .list-group-item-heading {
   color: inherit;
 }
 a.list-group-item-warning:hover,
-a.list-group-item-warning:focus {
+button.list-group-item-warning:hover,
+a.list-group-item-warning:focus,
+button.list-group-item-warning:focus {
   color: #8a6d3b;
   background-color: #faf2cc;
 }
 a.list-group-item-warning.active,
+button.list-group-item-warning.active,
 a.list-group-item-warning.active:hover,
-a.list-group-item-warning.active:focus {
+button.list-group-item-warning.active:hover,
+a.list-group-item-warning.active:focus,
+button.list-group-item-warning.active:focus {
   color: #fff;
   background-color: #8a6d3b;
   border-color: #8a6d3b;
@@ -5031,20 +5460,27 @@ a.list-group-item-warning.active:focus {
   color: #a94442;
   background-color: #f2dede;
 }
-a.list-group-item-danger {
+a.list-group-item-danger,
+button.list-group-item-danger {
   color: #a94442;
 }
-a.list-group-item-danger .list-group-item-heading {
+a.list-group-item-danger .list-group-item-heading,
+button.list-group-item-danger .list-group-item-heading {
   color: inherit;
 }
 a.list-group-item-danger:hover,
-a.list-group-item-danger:focus {
+button.list-group-item-danger:hover,
+a.list-group-item-danger:focus,
+button.list-group-item-danger:focus {
   color: #a94442;
   background-color: #ebcccc;
 }
 a.list-group-item-danger.active,
+button.list-group-item-danger.active,
 a.list-group-item-danger.active:hover,
-a.list-group-item-danger.active:focus {
+button.list-group-item-danger.active:hover,
+a.list-group-item-danger.active:focus,
+button.list-group-item-danger.active:focus {
   color: #fff;
   background-color: #a94442;
   border-color: #a94442;
@@ -5083,7 +5519,11 @@ a.list-group-item-danger.active:focus {
   font-size: 16px;
   color: inherit;
 }
-.panel-title > a {
+.panel-title > a,
+.panel-title > small,
+.panel-title > .small,
+.panel-title > small > a,
+.panel-title > .small > a {
   color: inherit;
 }
 .panel-footer {
@@ -5114,6 +5554,10 @@ a.list-group-item-danger.active:focus {
   border-bottom-right-radius: 3px;
   border-bottom-left-radius: 3px;
 }
+.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+}
 .panel-heading + .list-group .list-group-item:first-child {
   border-top-width: 0;
 }
@@ -5412,10 +5856,10 @@ a.list-group-item-danger.active:focus {
   height: 100%;
   border: 0;
 }
-.embed-responsive.embed-responsive-16by9 {
+.embed-responsive-16by9 {
   padding-bottom: 56.25%;
 }
-.embed-responsive.embed-responsive-4by3 {
+.embed-responsive-4by3 {
   padding-bottom: 75%;
 }
 .well {
@@ -5474,7 +5918,7 @@ button.close {
   right: 0;
   bottom: 0;
   left: 0;
-  z-index: 1040;
+  z-index: 1050;
   display: none;
   overflow: hidden;
   -webkit-overflow-scrolling: touch;
@@ -5517,10 +5961,12 @@ button.close {
           box-shadow: 0 3px 9px rgba(0, 0, 0, .5);
 }
 .modal-backdrop {
-  position: absolute;
+  position: fixed;
   top: 0;
   right: 0;
+  bottom: 0;
   left: 0;
+  z-index: 1040;
   background-color: #000;
 }
 .modal-backdrop.fade {
@@ -5593,11 +6039,23 @@ button.close {
   display: block;
   font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
   font-size: 12px;
+  font-style: normal;
   font-weight: normal;
-  line-height: 1.4;
-  visibility: visible;
+  line-height: 1.42857143;
+  text-align: left;
+  text-align: start;
+  text-decoration: none;
+  text-shadow: none;
+  text-transform: none;
+  letter-spacing: normal;
+  word-break: normal;
+  word-spacing: normal;
+  word-wrap: normal;
+  white-space: normal;
   filter: alpha(opacity=0);
   opacity: 0;
+
+  line-break: auto;
 }
 .tooltip.in {
   filter: alpha(opacity=90);
@@ -5624,7 +6082,6 @@ button.close {
   padding: 3px 8px;
   color: #fff;
   text-align: center;
-  text-decoration: none;
   background-color: #000;
   border-radius: 4px;
 }
@@ -5701,9 +6158,18 @@ button.close {
   padding: 1px;
   font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
   font-size: 14px;
+  font-style: normal;
   font-weight: normal;
   line-height: 1.42857143;
   text-align: left;
+  text-align: start;
+  text-decoration: none;
+  text-shadow: none;
+  text-transform: none;
+  letter-spacing: normal;
+  word-break: normal;
+  word-spacing: normal;
+  word-wrap: normal;
   white-space: normal;
   background-color: #fff;
   -webkit-background-clip: padding-box;
@@ -5713,6 +6179,8 @@ button.close {
   border-radius: 6px;
   -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);
           box-shadow: 0 5px 10px rgba(0, 0, 0, .2);
+
+  line-break: auto;
 }
 .popover.top {
   margin-top: -10px;
@@ -5840,8 +6308,8 @@ button.close {
 
     -webkit-backface-visibility: hidden;
             backface-visibility: hidden;
-    -webkit-perspective: 1000;
-            perspective: 1000;
+    -webkit-perspective: 1000px;
+            perspective: 1000px;
   }
   .carousel-inner > .item.next,
   .carousel-inner > .item.active.right {
@@ -5940,6 +6408,7 @@ button.close {
   top: 50%;
   z-index: 5;
   display: inline-block;
+  margin-top: -10px;
 }
 .carousel-control .icon-prev,
 .carousel-control .glyphicon-chevron-left {
@@ -5955,8 +6424,8 @@ button.close {
 .carousel-control .icon-next {
   width: 20px;
   height: 20px;
-  margin-top: -10px;
   font-family: serif;
+  line-height: 1;
 }
 .carousel-control .icon-prev:before {
   content: '\2039';
@@ -6114,7 +6583,6 @@ button.close {
 }
 .hidden {
   display: none !important;
-  visibility: hidden !important;
 }
 .affix {
   position: fixed;
@@ -6147,7 +6615,7 @@ button.close {
     display: block !important;
   }
   table.visible-xs {
-    display: table;
+    display: table !important;
   }
   tr.visible-xs {
     display: table-row !important;
@@ -6177,7 +6645,7 @@ button.close {
     display: block !important;
   }
   table.visible-sm {
-    display: table;
+    display: table !important;
   }
   tr.visible-sm {
     display: table-row !important;
@@ -6207,7 +6675,7 @@ button.close {
     display: block !important;
   }
   table.visible-md {
-    display: table;
+    display: table !important;
   }
   tr.visible-md {
     display: table-row !important;
@@ -6237,7 +6705,7 @@ button.close {
     display: block !important;
   }
   table.visible-lg {
-    display: table;
+    display: table !important;
   }
   tr.visible-lg {
     display: table-row !important;
@@ -6290,7 +6758,7 @@ button.close {
     display: block !important;
   }
   table.visible-print {
-    display: table;
+    display: table !important;
   }
   tr.visible-print {
     display: table-row !important;


[08/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Transactional-topologies.md
----------------------------------------------------------------------
diff --git a/docs/Transactional-topologies.md b/docs/Transactional-topologies.md
new file mode 100644
index 0000000..1271a21
--- /dev/null
+++ b/docs/Transactional-topologies.md
@@ -0,0 +1,359 @@
+---
+layout: documentation
+---
+**NOTE**: Transactional topologies have been deprecated -- use the [Trident](Trident-tutorial.html) framework instead.
+
+__________________________________________________________________________
+
+Storm [guarantees data processing](Guaranteeing-message-processing.html) by providing an at least once processing guarantee. The most common question asked about Storm is "Given that tuples can be replayed, how do you do things like counting on top of Storm? Won't you overcount?"
+
+Storm 0.7.0 introduces transactional topologies, which enable you to get exactly once messaging semantics for pretty much any computation. So you can do things like counting in a fully-accurate, scalable, and fault-tolerant way.
+
+Like [Distributed RPC](Distributed-RPC.html), transactional topologies aren't so much a feature of Storm as they are a higher level abstraction built on top of Storm's primitives of streams, spouts, bolts, and topologies.
+
+This page explains the transactional topology abstraction, how to use the API, and provides details as to its implementation.
+
+## Concepts
+
+Let's build up to Storm's abstraction for transactional topologies one step at a time. Let's start by looking at the simplest possible approach, and then we'll iterate on the design until we reach Storm's design.
+
+### Design 1
+
+The core idea behind transactional topologies is to provide a _strong ordering_ on the processing of data. The simplest manifestation of this, and the first design we'll look at, is processing the tuples one at a time and not moving on to the next tuple until the current tuple has been successfully processed by the topology.
+
+Each tuple is associated with a transaction id. If the tuple fails and needs to be replayed, then it is emitted with the exact same transaction id. A transaction id is an integer that increments for every tuple, so the first tuple will have transaction id `1`, the second id `2`, and so on.
+
+The strong ordering of tuples gives you the capability to achieve exactly-once semantics even in the case of tuple replay. Let's look at an example of how you would do this.
+
+Suppose you want to do a global count of the tuples in the stream. Instead of storing just the count in the database, you instead store the count and the latest transaction id together as one value in the database. When your code updates the count in the db, it should update the count *only if the transaction id in the database differs from the transaction id for the tuple currently being processed*. Consider the two cases:
+
+1. *The transaction id in the database is different than the current transaction id:* Because of the strong ordering of transactions, we know for sure that the current tuple isn't represented in that count. So we can safely increment the count and update the transaction id.
+2. *The transaction id is the same as the current transaction id:* Then we know that this tuple is already incorporated into the count and can skip the update. The tuple must have failed after updating the database but before reporting success back to Storm.
+
+This logic and the strong ordering of transactions ensures that the count in the database will be accurate even if tuples are replayed.  Credit for this trick of storing a transaction id in the database along with the value goes to the Kafka devs, particularly [this design document](http://incubator.apache.org/kafka/07/design.html).
+
+Furthermore, notice that the topology can safely update many sources of state in the same transaction and achieve exactly-once semantics. If there's a failure, any updates that already succeeded will skip on the retry, and any updates that failed will properly retry. For example, if you were processing a stream of tweeted urls, you could update a database that stores a tweet count for each url as well as a database that stores a tweet count for each domain.
+
+There is a significant problem though with this design of processing one tuple at time. Having to wait for each tuple to be _completely processed_ before moving on to the next one is horribly inefficient. It entails a huge amount of database calls (at least one per tuple), and this design makes very little use of the parallelization capabilities of Storm. So it isn't very scalable.
+
+### Design 2
+
+Instead of processing one tuple at a time, a better approach is to process a batch of tuples for each transaction. So if you're doing a global count, you would increment the count by the number of tuples in the entire batch. If a batch fails, you replay the exact batch that failed. Instead of assigning a transaction id to each tuple, you assign a transaction id to each batch, and the processing of the batches is strongly ordered. Here's a diagram of this design:
+
+![Storm cluster](images/transactional-batches.png)
+
+So if you're processing 1000 tuples per batch, your application will do 1000x less database operations than design 1. Additionally, it takes advantage of Storm's parallelization capabilities as the computation for each batch can be parallelized.
+
+While this design is significantly better than design 1, it's still not as resource-efficient as possible. The workers in the topology spend a lot of time being idle waiting for the other portions of the computation to finish. For example, in a topology like this:
+
+![Storm cluster](images/transactional-design-2.png)
+
+After bolt 1 finishes its portion of the processing, it will be idle until the rest of the bolts finish and the next batch can be emitted from the spout.
+
+### Design 3 (Storm's design)
+
+A key realization is that not all the work for processing batches of tuples needs to be strongly ordered. For example, when computing a global count, there's two parts to the computation:
+
+1. Computing the partial count for the batch
+2. Updating the global count in the database with the partial count
+
+The computation of #2 needs to be strongly ordered across the batches, but there's no reason you shouldn't be able to _pipeline_ the computation of the batches by computing #1 for many batches in parallel. So while batch 1 is working on updating the database, batches 2 through 10 can compute their partial counts.
+
+Storm accomplishes this distinction by breaking the computation of a batch into two phases:
+
+1. The processing phase: this is the phase that can be done in parallel for many batches
+2. The commit phase: The commit phases for batches are strongly ordered. So the commit for batch 2 is not done until the commit for batch 1 has been successful.
+
+The two phases together are called a "transaction". Many batches can be in the processing phase at a given moment, but only one batch can be in the commit phase. If there's any failure in the processing or commit phase for a batch, the entire transaction is replayed (both phases).
+
+## Design details
+
+When using transactional topologies, Storm does the following for you:
+
+1. *Manages state:* Storm stores in Zookeeper all the state necessary to do transactional topologies. This includes the current transaction id as well as the metadata defining the parameters for each batch.
+2. *Coordinates the transactions:* Storm will manage everything necessary to determine which transactions should be processing or committing at any point.
+3. *Fault detection:* Storm leverages the acking framework to efficiently determine when a batch has successfully processed, successfully committed, or failed. Storm will then replay batches appropriately. You don't have to do any acking or anchoring -- Storm manages all of this for you.
+4. *First class batch processing API*: Storm layers an API on top of regular bolts to allow for batch processing of tuples. Storm manages all the coordination for determining when a task has received all the tuples for that particular transaction. Storm will also take care of cleaning up any accumulated state for each transaction (like the partial counts).
+
+Finally, another thing to note is that transactional topologies require a source queue that can replay an exact batch of messages. Technologies like [Kestrel](https://github.com/robey/kestrel) can't do this. [Apache Kafka](http://incubator.apache.org/kafka/index.html) is a perfect fit for this kind of spout, and [storm-kafka](https://github.com/nathanmarz/storm-contrib/tree/master/storm-kafka) in [storm-contrib](https://github.com/nathanmarz/storm-contrib) contains a transactional spout implementation for Kafka.
+
+## The basics through example
+
+You build transactional topologies by using [TransactionalTopologyBuilder](javadocs/backtype/storm/transactional/TransactionalTopologyBuilder.html). Here's the transactional topology definition for a topology that computes the global count of tuples from the input stream. This code comes from [TransactionalGlobalCount](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/TransactionalGlobalCount.java) in storm-starter.
+
+```java
+MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
+TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
+builder.setBolt("partial-count", new BatchCount(), 5)
+        .shuffleGrouping("spout");
+builder.setBolt("sum", new UpdateGlobalCount())
+        .globalGrouping("partial-count");
+```
+
+`TransactionalTopologyBuilder` takes as input in the constructor an id for the transactional topology, an id for the spout within the topology, a transactional spout, and optionally the parallelism for the transactional spout. The id for the transactional topology is used to store state about the progress of topology in Zookeeper, so that if you restart the topology it will continue where it left off.
+
+A transactional topology has a single `TransactionalSpout` that is defined in the constructor of `TransactionalTopologyBuilder`. In this example, `MemoryTransactionalSpout` is used which reads in data from an in-memory partitioned source of data (the `DATA` variable). The second argument defines the fields for the data, and the third argument specifies the maximum number of tuples to emit from each partition per batch of tuples. The interface for defining your own transactional spouts is discussed later on in this tutorial.
+
+Now on to the bolts. This topology parallelizes the computation of the global count. The first bolt, `BatchCount`, randomly partitions the input stream using a shuffle grouping and emits the count for each partition. The second bolt, `UpdateGlobalCount`, does a global grouping and sums together the partial counts to get the count for the batch. It then updates the global count in the database if necessary.
+
+Here's the definition of `BatchCount`:
+
+```java
+public static class BatchCount extends BaseBatchBolt {
+    Object _id;
+    BatchOutputCollector _collector;
+
+    int _count = 0;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
+        _collector = collector;
+        _id = id;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+        _count++;
+    }
+
+    @Override
+    public void finishBatch() {
+        _collector.emit(new Values(_id, _count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("id", "count"));
+    }
+}
+```
+
+A new instance of this object is created for every batch that's being processed. The actual bolt this runs within is called [BatchBoltExecutor](https://github.com/apache/incubator-storm/blob/0.7.0/src/jvm/backtype/storm/coordination/BatchBoltExecutor.java) and manages the creation and cleanup for these objects.
+
+The `prepare` method parameterizes this batch bolt with the Storm config, the topology context, an output collector, and the id for this batch of tuples. In the case of transactional topologies, the id will be a [TransactionAttempt](javadocs/backtype/storm/transactional/TransactionAttempt.html) object. The batch bolt abstraction can be used in Distributed RPC as well which uses a different type of id for the batches. `BatchBolt` can actually be parameterized with the type of the id, so if you only intend to use the batch bolt for transactional topologies, you can extend `BaseTransactionalBolt` which has this definition:
+
+```java
+public abstract class BaseTransactionalBolt extends BaseBatchBolt<TransactionAttempt> {
+}
+```
+
+All tuples emitted within a transactional topology must have the `TransactionAttempt` as the first field of the tuple. This lets Storm identify which tuples belong to which batches. So when you emit tuples you need to make sure to meet this requirement.
+
+The `TransactionAttempt` contains two values: the "transaction id" and the "attempt id". The "transaction id" is the unique id chosen for this batch and is the same no matter how many times the batch is replayed. The "attempt id" is a unique id for this particular batch of tuples and lets Storm distinguish tuples from different emissions of the same batch. Without the attempt id, Storm could confuse a replay of a batch with tuples from a prior time that batch was emitted. This would be disastrous.
+
+The transaction id increases by 1 for every batch emitted. So the first batch has id "1", the second has id "2", and so on.
+
+The `execute` method is called for every tuple in the batch. You should accumulate state for the batch in a local instance variable every time this method is called. The `BatchCount` bolt increments a local counter variable for every tuple.
+
+Finally, `finishBatch` is called when the task has received all tuples intended for it for this particular batch. `BatchCount` emits the partial count to the output stream when this method is called.
+
+Here's the definition of `UpdateGlobalCount`:
+
+```java
+public static class UpdateGlobalCount extends BaseTransactionalBolt implements ICommitter {
+    TransactionAttempt _attempt;
+    BatchOutputCollector _collector;
+
+    int _sum = 0;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt attempt) {
+        _collector = collector;
+        _attempt = attempt;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+        _sum+=tuple.getInteger(1);
+    }
+
+    @Override
+    public void finishBatch() {
+        Value val = DATABASE.get(GLOBAL_COUNT_KEY);
+        Value newval;
+        if(val == null || !val.txid.equals(_attempt.getTransactionId())) {
+            newval = new Value();
+            newval.txid = _attempt.getTransactionId();
+            if(val==null) {
+                newval.count = _sum;
+            } else {
+                newval.count = _sum + val.count;
+            }
+            DATABASE.put(GLOBAL_COUNT_KEY, newval);
+        } else {
+            newval = val;
+        }
+        _collector.emit(new Values(_attempt, newval.count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("id", "sum"));
+    }
+}
+```
+
+`UpdateGlobalCount` is specific to transactional topologies so it extends `BaseTransactionalBolt`. In the `execute` method, `UpdateGlobalCount` accumulates the count for this batch by summing together the partial batches. The interesting stuff happens in `finishBatch`.
+
+First, notice that this bolt implements the `ICommitter` interface. This tells Storm that the `finishBatch` method of this bolt should be part of the commit phase of the transaction. So calls to `finishBatch` for this bolt will be strongly ordered by transaction id (calls to `execute` on the other hand can happen during either the processing or commit phases). An alternative way to mark a bolt as a committer is to use the `setCommitterBolt` method in `TransactionalTopologyBuilder` instead of `setBolt`.
+
+The code for `finishBatch` in `UpdateGlobalCount` gets the current value from the database and compares its transaction id to the transaction id for this batch. If they are the same, it does nothing. Otherwise, it increments the value in the database by the partial count for this batch.
+
+A more involved transactional topology example that updates multiple databases idempotently can be found in storm-starter in the [TransactionalWords](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/TransactionalWords.java) class.
+
+## Transactional Topology API
+
+This section outlines the different pieces of the transactional topology API.
+
+### Bolts
+
+There are three kinds of bolts possible in a transactional topology:
+
+1. [BasicBolt](javadocs/backtype/storm/topology/base/BaseBasicBolt.html): This bolt doesn't deal with batches of tuples and just emits tuples based on a single tuple of input.
+2. [BatchBolt](javadocs/backtype/storm/topology/base/BaseBatchBolt.html): This bolt processes batches of tuples. `execute` is called for each tuple, and `finishBatch` is called when the batch is complete.
+3. BatchBolt's that are marked as committers: The only difference between this bolt and a regular batch bolt is when `finishBatch` is called. A committer bolt has `finishedBatch` called during the commit phase. The commit phase is guaranteed to occur only after all prior batches have successfully committed, and it will be retried until all bolts in the topology succeed the commit for the batch. There are two ways to make a `BatchBolt` a committer, by having the `BatchBolt` implement the [ICommitter](javadocs/backtype/storm/transactional/ICommitter.html) marker interface, or by using the `setCommiterBolt` method in `TransactionalTopologyBuilder`.
+
+#### Processing phase vs. commit phase in bolts
+
+To nail down the difference between the processing phase and commit phase of a transaction, let's look at an example topology:
+
+![Storm cluster](images/transactional-commit-flow.png)
+
+In this topology, only the bolts with a red outline are committers.
+
+During the processing phase, bolt A will process the complete batch from the spout, call `finishBatch` and send its tuples to bolts B and C. Bolt B is a committer so it will process all the tuples but finishBatch won't be called. Bolt C also will not have `finishBatch` called because it doesn't know if it has received all the tuples from Bolt B yet (because Bolt B is waiting for the transaction to commit). Finally, Bolt D will receive any tuples Bolt C emitted during invocations of its `execute` method.
+
+When the batch commits, `finishBatch` is called on Bolt B. Once it finishes, Bolt C can now detect that it has received all the tuples and will call `finishBatch`. Finally, Bolt D will receive its complete batch and call `finishBatch`.
+
+Notice that even though Bolt D is a committer, it doesn't have to wait for a second commit message when it receives the whole batch. Since it receives the whole batch during the commit phase, it goes ahead and completes the transaction.
+
+Committer bolts act just like batch bolts during the commit phase. The only difference between committer bolts and batch bolts is that committer bolts will not call `finishBatch` during the processing phase of a transaction.
+
+#### Acking
+
+Notice that you don't have to do any acking or anchoring when working with transactional topologies. Storm manages all of that underneath the hood. The acking strategy is heavily optimized.
+
+#### Failing a transaction
+
+When using regular bolts, you can call the `fail` method on `OutputCollector` to fail the tuple trees of which that tuple is a member. Since transactional topologies hide the acking framework from you, they provide a different mechanism to fail a batch (and cause the batch to be replayed). Just throw a [FailedException](javadocs/backtype/storm/topology/FailedException.html). Unlike regular exceptions, this will only cause that particular batch to replay and will not crash the process.
+
+### Transactional spout
+
+The `TransactionalSpout` interface is completely different from a regular `Spout` interface. A `TransactionalSpout` implementation emits batches of tuples and must ensure that the same batch of tuples is always emitted for the same transaction id.
+
+A transactional spout looks like this while a topology is executing:
+
+![Storm cluster](images/transactional-spout-structure.png)
+
+The coordinator on the left is a regular Storm spout that emits a tuple whenever a batch should be emitted for a transaction. The emitters execute as a regular Storm bolt and are responsible for emitting the actual tuples for the batch. The emitters subscribe to the "batch emit" stream of the coordinator using an all grouping.
+
+The need to be idempotent with respect to the tuples it emits requires a `TransactionalSpout` to store a small amount of state. The state is stored in Zookeeper.
+
+The details of implementing a `TransactionalSpout` are in [the Javadoc](javadocs/backtype/storm/transactional/ITransactionalSpout.html).
+
+#### Partitioned Transactional Spout
+
+A common kind of transactional spout is one that reads the batches from a set of partitions across many queue brokers. For example, this is how [TransactionalKafkaSpout](https://github.com/nathanmarz/storm-contrib/blob/master/storm-kafka/src/jvm/storm/kafka/TransactionalKafkaSpout.java) works. An `IPartitionedTransactionalSpout` automates the bookkeeping work of managing the state for each partition to ensure idempotent replayability. See [the Javadoc](javadocs/backtype/storm/transactional/partitioned/IPartitionedTransactionalSpout.html) for more details.
+
+### Configuration
+
+There's two important bits of configuration for transactional topologies:
+
+1. *Zookeeper:* By default, transactional topologies will store state in the same Zookeeper instance as used to manage the Storm cluster. You can override this with the "transactional.zookeeper.servers" and "transactional.zookeeper.port" configs.
+2. *Number of active batches permissible at once:* You must set a limit to the number of batches that can be processed at once. You configure this using the "topology.max.spout.pending" config. If you don't set this config, it will default to 1.
+
+## What if you can't emit the same batch of tuples for a given transaction id?
+
+So far the discussion around transactional topologies has assumed that you can always emit the exact same batch of tuples for the same transaction id. So what do you do if this is not possible?
+
+Consider an example of when this is not possible. Suppose you are reading tuples from a partitioned message broker (stream is partitioned across many machines), and a single transaction will include tuples from all the individual machines. Now suppose one of the nodes goes down at the same time that a transaction fails. Without that node, it is impossible to replay the same batch of tuples you just played for that transaction id. The processing in your topology will halt as its unable to replay the identical batch. The only possible solution is to emit a different batch for that transaction id than you emitted before. Is it possible to still achieve exactly-once messaging semantics even if the batches change?
+
+It turns out that you can still achieve exactly-once messaging semantics in your processing with a non-idempotent transactional spout, although this requires a bit more work on your part in developing the topology.
+
+If a batch can change for a given transaction id, then the logic we've been using so far of "skip the update if the transaction id in the database is the same as the id for the current transaction" is no longer valid. This is because the current batch is different than the batch for the last time the transaction was committed, so the result will not necessarily be the same. You can fix this problem by storing a little bit more state in the database. Let's again use the example of storing a global count in the database and suppose the partial count for the batch is stored in the `partialCount` variable.
+
+Instead of storing a value in the database that looks like this:
+
+```java
+class Value {
+  Object count;
+  BigInteger txid;
+}
+```
+
+For non-idempotent transactional spouts you should instead store a value that looks like this:
+
+```java
+class Value {
+  Object count;
+  BigInteger txid;
+  Object prevCount;
+}
+```
+
+The logic for the update is as follows:
+
+1. If the transaction id for the current batch is the same as the transaction id in the database, set `val.count = val.prevCount + partialCount`.
+2. Otherwise, set `val.prevCount = val.count`, `val.count = val.count + partialCount` and `val.txid = batchTxid`.
+
+This logic works because once you commit a particular transaction id for the first time, all prior transaction ids will never be committed again.
+
+There's a few more subtle aspects of transactional topologies that make opaque transactional spouts possible.
+
+When a transaction fails, all subsequent transactions in the processing phase are considered failed as well. Each of those transactions will be re-emitted and reprocessed. Without this behavior, the following situation could happen:
+
+1. Transaction A emits tuples 1-50
+2. Transaction B emits tuples 51-100
+3. Transaction A fails
+4. Transaction A emits tuples 1-40
+5. Transaction A commits
+6. Transaction B commits
+7. Transaction C emits tuples 101-150
+
+In this scenario, tuples 41-50 are skipped. By failing all subsequent transactions, this would happen instead:
+
+1. Transaction A emits tuples 1-50
+2. Transaction B emits tuples 51-100
+3. Transaction A fails (and causes Transaction B to fail)
+4. Transaction A emits tuples 1-40
+5. Transaction B emits tuples 41-90
+5. Transaction A commits
+6. Transaction B commits
+7. Transaction C emits tuples 91-140
+
+By failing all subsequent transactions on failure, no tuples are skipped. This also shows that a requirement of transactional spouts is that they always emit where the last transaction left off.
+
+A non-idempotent transactional spout is more concisely referred to as an "OpaqueTransactionalSpout" (opaque is the opposite of idempotent). [IOpaquePartitionedTransactionalSpout](javadocs/backtype/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.html) is an interface for implementing opaque partitioned transactional spouts, of which [OpaqueTransactionalKafkaSpout](https://github.com/nathanmarz/storm-contrib/blob/kafka0.7/storm-kafka/src/jvm/storm/kafka/OpaqueTransactionalKafkaSpout.java) is an example. `OpaqueTransactionalKafkaSpout` can withstand losing individual Kafka nodes without sacrificing accuracy as long as you use the update strategy as explained in this section.
+
+## Implementation
+
+The implementation for transactional topologies is very elegant. Managing the commit protocol, detecting failures, and pipelining batches seem complex, but everything turns out to be a straightforward mapping to Storm's primitives.
+
+How the data flow works:
+
+Here's how transactional spout works:
+
+1. Transactional spout is a subtopology consisting of a coordinator spout and an emitter bolt
+2. The coordinator is a regular spout with a parallelism of 1
+3. The emitter is a bolt with a parallelism of P, connected to the coordinator's "batch" stream using an all grouping
+4. When the coordinator determines it's time to enter the processing phase for a transaction, it emits a tuple containing the TransactionAttempt and the metadata for that transaction to the "batch" stream
+5. Because of the all grouping, every single emitter task receives the notification that it's time to emit its portion of the tuples for that transaction attempt
+6. Storm automatically manages the anchoring/acking necessary throughout the whole topology to determine when a transaction has completed the processing phase. The key here is that *the root tuple was created by the coordinator, so the coordinator will receive an "ack" if the processing phase succeeds, and a "fail" if it doesn't succeed for any reason (failure or timeout).
+7. If the processing phase succeeds, and all prior transactions have successfully committed, the coordinator emits a tuple containing the TransactionAttempt to the "commit" stream.
+8. All committing bolts subscribe to the commit stream using an all grouping, so that they will all receive a notification when the commit happens.
+9. Like the processing phase, the coordinator uses the acking framework to determine whether the commit phase succeeded or not. If it receives an "ack", it marks that transaction as complete in zookeeper.
+
+More notes:
+
+- Transactional spouts are a sub-topology consisting of a spout and a bolt
+  - the spout is the coordinator and contains a single task
+  - the bolt is the emitter
+  - the bolt subscribes to the coordinator with an all grouping
+  - serialization of metadata is handled by kryo. kryo is initialized ONLY with the registrations defined in the component configuration for the transactionalspout
+- the coordinator uses the acking framework to determine when a batch has been successfully processed, and then to determine when a batch has been successfully committed.
+- state is stored in zookeeper using RotatingTransactionalState
+- commiting bolts subscribe to the coordinators commit stream using an all grouping
+- CoordinatedBolt is used to detect when a bolt has received all the tuples for a particular batch.
+  - this is the same abstraction that is used in DRPC
+  - for commiting bolts, it waits to receive a tuple from the coordinator's commit stream before calling finishbatch
+  - so it can't call finishbatch until it's received all tuples from all subscribed components AND its received the commit stream tuple (for committers). this ensures that it can't prematurely call finishBatch

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Trident-API-Overview.md
----------------------------------------------------------------------
diff --git a/docs/Trident-API-Overview.md b/docs/Trident-API-Overview.md
new file mode 100644
index 0000000..3b68645
--- /dev/null
+++ b/docs/Trident-API-Overview.md
@@ -0,0 +1,311 @@
+---
+layout: documentation
+---
+# Trident API overview
+
+The core data model in Trident is the "Stream", processed as a series of batches. A stream is partitioned among the nodes in the cluster, and operations applied to a stream are applied in parallel across each partition.
+
+There are five kinds of operations in Trident:
+
+1. Operations that apply locally to each partition and cause no network transfer
+2. Repartitioning operations that repartition a stream but otherwise don't change the contents (involves network transfer)
+3. Aggregation operations that do network transfer as part of the operation
+4. Operations on grouped streams
+5. Merges and joins
+
+## Partition-local operations
+
+Partition-local operations involve no network transfer and are applied to each batch partition independently.
+
+### Functions
+
+A function takes in a set of input fields and emits zero or more tuples as output. The fields of the output tuple are appended to the original input tuple in the stream. If a function emits no tuples, the original input tuple is filtered out. Otherwise, the input tuple is duplicated for each output tuple. Suppose you have this function:
+
+```java
+public class MyFunction extends BaseFunction {
+    public void execute(TridentTuple tuple, TridentCollector collector) {
+        for(int i=0; i < tuple.getInteger(0); i++) {
+            collector.emit(new Values(i));
+        }
+    }
+}
+```
+
+Now suppose you have a stream in the variable "mystream" with the fields ["a", "b", "c"] with the following tuples:
+
+```
+[1, 2, 3]
+[4, 1, 6]
+[3, 0, 8]
+```
+
+If you run this code:
+
+```java
+mystream.each(new Fields("b"), new MyFunction(), new Fields("d")))
+```
+
+The resulting tuples would have fields ["a", "b", "c", "d"] and look like this:
+
+```
+[1, 2, 3, 0]
+[1, 2, 3, 1]
+[4, 1, 6, 0]
+```
+
+### Filters
+
+Filters take in a tuple as input and decide whether or not to keep that tuple or not. Suppose you had this filter:
+
+```java
+public class MyFilter extends BaseFunction {
+    public boolean isKeep(TridentTuple tuple) {
+        return tuple.getInteger(0) == 1 && tuple.getInteger(1) == 2;
+    }
+}
+```
+
+Now suppose you had these tuples with fields ["a", "b", "c"]:
+
+```
+[1, 2, 3]
+[2, 1, 1]
+[2, 3, 4]
+```
+
+If you ran this code:
+
+```java
+mystream.each(new Fields("b", "a"), new MyFilter())
+```
+
+The resulting tuples would be:
+
+```
+[2, 1, 1]
+```
+
+### partitionAggregate
+
+partitionAggregate runs a function on each partition of a batch of tuples. Unlike functions, the tuples emitted by partitionAggregate replace the input tuples given to it. Consider this example:
+
+```java
+mystream.partitionAggregate(new Fields("b"), new Sum(), new Fields("sum"))
+```
+
+Suppose the input stream contained fields ["a", "b"] and the following partitions of tuples:
+
+```
+Partition 0:
+["a", 1]
+["b", 2]
+
+Partition 1:
+["a", 3]
+["c", 8]
+
+Partition 2:
+["e", 1]
+["d", 9]
+["d", 10]
+```
+
+Then the output stream of that code would contain these tuples with one field called "sum":
+
+```
+Partition 0:
+[3]
+
+Partition 1:
+[11]
+
+Partition 2:
+[20]
+```
+
+There are three different interfaces for defining aggregators: CombinerAggregator, ReducerAggregator, and Aggregator.
+
+Here's the interface for CombinerAggregator:
+
+```java
+public interface CombinerAggregator<T> extends Serializable {
+    T init(TridentTuple tuple);
+    T combine(T val1, T val2);
+    T zero();
+}
+```
+
+A CombinerAggregator returns a single tuple with a single field as output. CombinerAggregators run the init function on each input tuple and use the combine function to combine values until there's only one value left. If there's no tuples in the partition, the CombinerAggregator emits the output of the zero function. For example, here's the implementation of Count:
+
+```java
+public class Count implements CombinerAggregator<Long> {
+    public Long init(TridentTuple tuple) {
+        return 1L;
+    }
+
+    public Long combine(Long val1, Long val2) {
+        return val1 + val2;
+    }
+
+    public Long zero() {
+        return 0L;
+    }
+}
+```
+
+The benefits of CombinerAggregators are seen when you use the with the aggregate method instead of partitionAggregate. In that case, Trident automatically optimizes the computation by doing partial aggregations before transferring tuples over the network.
+
+A ReducerAggregator has the following interface:
+
+```java
+public interface ReducerAggregator<T> extends Serializable {
+    T init();
+    T reduce(T curr, TridentTuple tuple);
+}
+```
+
+A ReducerAggregator produces an initial value with init, and then it iterates on that value for each input tuple to produce a single tuple with a single value as output. For example, here's how you would define Count as a ReducerAggregator:
+
+```java
+public class Count implements ReducerAggregator<Long> {
+    public Long init() {
+        return 0L;
+    }
+    
+    public Long reduce(Long curr, TridentTuple tuple) {
+        return curr + 1;
+    }
+}
+```
+
+ReducerAggregator can also be used with persistentAggregate, as you'll see later.
+
+The most general interface for performing aggregations is Aggregator, which looks like this:
+
+```java
+public interface Aggregator<T> extends Operation {
+    T init(Object batchId, TridentCollector collector);
+    void aggregate(T state, TridentTuple tuple, TridentCollector collector);
+    void complete(T state, TridentCollector collector);
+}
+```
+
+Aggregators can emit any number of tuples with any number of fields. They can emit tuples at any point during execution. Aggregators execute in the following way:
+
+1. The init method is called before processing the batch. The return value of init is an Object that will represent the state of the aggregation and will be passed into the aggregate and complete methods.
+2. The aggregate method is called for each input tuple in the batch partition. This method can update the state and optionally emit tuples.
+3. The complete method is called when all tuples for the batch partition have been processed by aggregate. 
+
+Here's how you would implement Count as an Aggregator:
+
+```java
+public class CountAgg extends BaseAggregator<CountState> {
+    static class CountState {
+        long count = 0;
+    }
+
+    public CountState init(Object batchId, TridentCollector collector) {
+        return new CountState();
+    }
+
+    public void aggregate(CountState state, TridentTuple tuple, TridentCollector collector) {
+        state.count+=1;
+    }
+
+    public void complete(CountState state, TridentCollector collector) {
+        collector.emit(new Values(state.count));
+    }
+}
+```
+
+Sometimes you want to execute multiple aggregators at the same time. This is called chaining and can be accomplished like this:
+
+```java
+mystream.chainedAgg()
+        .partitionAggregate(new Count(), new Fields("count"))
+        .partitionAggregate(new Fields("b"), new Sum(), new Fields("sum"))
+        .chainEnd()
+```
+
+This code will run the Count and Sum aggregators on each partition. The output will contain a single tuple with the fields ["count", "sum"].
+
+### stateQuery and partitionPersist
+
+stateQuery and partitionPersist query and update sources of state, respectively. You can read about how to use them on [Trident state doc](Trident-state.html).
+
+### projection
+
+The projection method on Stream keeps only the fields specified in the operation. If you had a Stream with fields ["a", "b", "c", "d"] and you ran this code:
+
+```java
+mystream.project(new Fields("b", "d"))
+```
+
+The output stream would contain only the fields ["b", "d"].
+
+
+## Repartitioning operations
+
+Repartitioning operations run a function to change how the tuples are partitioned across tasks. The number of partitions can also change as a result of repartitioning (for example, if the parallelism hint is greater after repartioning). Repartitioning requires network transfer. Here are the repartitioning functions:
+
+1. shuffle: Use random round robin algorithm to evenly redistribute tuples across all target partitions
+2. broadcast: Every tuple is replicated to all target partitions. This can useful during DRPC – for example, if you need to do a stateQuery on every partition of data.
+3. partitionBy: partitionBy takes in a set of fields and does semantic partitioning based on that set of fields. The fields are hashed and modded by the number of target partitions to select the target partition. partitionBy guarantees that the same set of fields always goes to the same target partition.
+4. global: All tuples are sent to the same partition. The same partition is chosen for all batches in the stream.
+5. batchGlobal: All tuples in the batch are sent to the same partition. Different batches in the stream may go to different partitions. 
+6. partition: This method takes in a custom partitioning function that implements backtype.storm.grouping.CustomStreamGrouping
+
+## Aggregation operations
+
+Trident has aggregate and persistentAggregate methods for doing aggregations on Streams. aggregate is run on each batch of the stream in isolation, while persistentAggregate will aggregation on all tuples across all batches in the stream and store the result in a source of state.
+
+Running aggregate on a Stream does a global aggregation. When you use a ReducerAggregator or an Aggregator, the stream is first repartitioned into a single partition, and then the aggregation function is run on that partition. When you use a CombinerAggregator, on the other hand, first Trident will compute partial aggregations of each partition, then repartition to a single partition, and then finish the aggregation after the network transfer. CombinerAggregator's are far more efficient and should be used when possible.
+
+Here's an example of using aggregate to get a global count for a batch:
+
+```java
+mystream.aggregate(new Count(), new Fields("count"))
+```
+
+Like partitionAggregate, aggregators for aggregate can be chained. However, if you chain a CombinerAggregator with a non-CombinerAggregator, Trident is unable to do the partial aggregation optimization.
+
+You can read more about how to use persistentAggregate in the [Trident state doc](https://github.com/apache/incubator-storm/wiki/Trident-state).
+
+## Operations on grouped streams
+
+The groupBy operation repartitions the stream by doing a partitionBy on the specified fields, and then within each partition groups tuples together whose group fields are equal. For example, here's an illustration of a groupBy operation:
+
+![Grouping](images/grouping.png)
+
+If you run aggregators on a grouped stream, the aggregation will be run within each group instead of against the whole batch. persistentAggregate can also be run on a GroupedStream, in which case the results will be stored in a [MapState](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/state/map/MapState.java) with the key being the grouping fields. You can read more about persistentAggregate in the [Trident state doc](Trident-state.html).
+
+Like regular streams, aggregators on grouped streams can be chained.
+
+## Merges and joins
+
+The last part of the API is combining different streams together. The simplest way to combine streams is to merge them into one stream. You can do that with the TridentTopology#merge method, like so:
+
+```java
+topology.merge(stream1, stream2, stream3);
+```
+
+Trident will name the output fields of the new, merged stream as the output fields of the first stream.
+
+Another way to combine streams is with a join. Now, a standard join, like the kind from SQL, require finite input. So they don't make sense with infinite streams. Joins in Trident only apply within each small batch that comes off of the spout. 
+
+Here's an example join between a stream containing fields ["key", "val1", "val2"] and another stream containing ["x", "val1"]:
+
+```java
+topology.join(stream1, new Fields("key"), stream2, new Fields("x"), new Fields("key", "a", "b", "c"));
+```
+
+This joins stream1 and stream2 together using "key" and "x" as the join fields for each respective stream. Then, Trident requires that all the output fields of the new stream be named, since the input streams could have overlapping field names. The tuples emitted from the join will contain:
+
+1. First, the list of join fields. In this case, "key" corresponds to "key" from stream1 and "x" from stream2.
+2. Next, a list of all non-join fields from all streams, in order of how the streams were passed to the join method. In this case, "a" and "b" correspond to "val1" and "val2" from stream1, and "c" corresponds to "val1" from stream2.
+
+When a join happens between streams originating from different spouts, those spouts will be synchronized with how they emit batches. That is, a batch of processing will include tuples from each spout.
+
+You might be wondering – how do you do something like a "windowed join", where tuples from one side of the join are joined against the last hour of tuples from the other side of the join.
+
+To do this, you would make use of partitionPersist and stateQuery. The last hour of tuples from one side of the join would be stored and rotated in a source of state, keyed by the join field. Then the stateQuery would do lookups by the join field to perform the "join".

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Trident-spouts.md
----------------------------------------------------------------------
diff --git a/docs/Trident-spouts.md b/docs/Trident-spouts.md
new file mode 100644
index 0000000..92330a7
--- /dev/null
+++ b/docs/Trident-spouts.md
@@ -0,0 +1,42 @@
+---
+layout: documentation
+---
+# Trident spouts
+
+Like in the vanilla Storm API, spouts are the source of streams in a Trident topology. On top of the vanilla Storm spouts, Trident exposes additional APIs for more sophisticated spouts.
+
+There is an inextricable link between how you source your data streams and how you update state (e.g. databases) based on those data streams. See [Trident state doc](Trident-state.html) for an explanation of this – understanding this link is imperative for understanding the spout options available.
+
+Regular Storm spouts will be non-transactional spouts in a Trident topology. To use a regular Storm IRichSpout, create the stream like this in a TridentTopology:
+
+```java
+TridentTopology topology = new TridentTopology();
+topology.newStream("myspoutid", new MyRichSpout());
+```
+
+All spouts in a Trident topology are required to be given a unique identifier for the stream – this identifier must be unique across all topologies run on the cluster. Trident will use this identifier to store metadata about what the spout has consumed in Zookeeper, including the txid and any metadata associated with the spout.
+
+You can configure the Zookeeper storage of spout metadata via the following configuration options:
+
+1. `transactional.zookeeper.servers`: A list of Zookeeper hostnames 
+2. `transactional.zookeeper.port`: The port of the Zookeeper cluster
+3. `transactional.zookeeper.root`: The root dir in Zookeeper where metadata is stored. Metadata will be stored at the path <root path>/<spout id>
+
+## Pipelining
+
+By default, Trident processes a single batch at a time, waiting for the batch to succeed or fail before trying another batch. You can get significantly higher throughput – and lower latency of processing of each batch – by pipelining the batches. You configure the maximum amount of batches to be processed simultaneously with the "topology.max.spout.pending" property. 
+
+Even while processing multiple batches simultaneously, Trident will order any state updates taking place in the topology among batches. For example, suppose you're doing a global count aggregation into a database. The idea is that while you're updating the count in the database for batch 1, you can still be computing the partial counts for batches 2 through 10. Trident won't move on to the state updates for batch 2 until the state updates for batch 1 have succeeded. This is essential for achieving exactly-once processing semantics, as outline in [Trident state doc](Trident-state.html).
+
+## Trident spout types
+
+Here are the following spout APIs available:
+
+1. [ITridentSpout](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/spout/ITridentSpout.java): The most general API that can support transactional or opaque transactional semantics. Generally you'll use one of the partitioned flavors of this API rather than this one directly.
+2. [IBatchSpout](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/spout/IBatchSpout.java): A non-transactional spout that emits batches of tuples at a time
+3. [IPartitionedTridentSpout](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/spout/IPartitionedTridentSpout.java): A transactional spout that reads from a partitioned data source (like a cluster of Kafka servers)
+4. [IOpaquePartitionedTridentSpout](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/storm/trident/spout/IOpaquePartitionedTridentSpout.java): An opaque transactional spout that reads from a partitioned data source
+
+And, like mentioned in the beginning of this tutorial, you can use regular IRichSpout's as well.
+ 
+

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Trident-state.md
----------------------------------------------------------------------
diff --git a/docs/Trident-state.md b/docs/Trident-state.md
new file mode 100644
index 0000000..2ace8c8
--- /dev/null
+++ b/docs/Trident-state.md
@@ -0,0 +1,330 @@
+---
+layout: documentation
+---
+# State in Trident
+
+Trident has first-class abstractions for reading from and writing to stateful sources. The state can either be internal to the topology – e.g., kept in-memory and backed by HDFS – or externally stored in a database like Memcached or Cassandra. There's no difference in the Trident API for either case.
+
+Trident manages state in a fault-tolerant way so that state updates are idempotent in the face of retries and failures. This lets you reason about Trident topologies as if each message were processed exactly-once.
+
+There's various levels of fault-tolerance possible when doing state updates. Before getting to those, let's look at an example that illustrates the tricks necessary to achieve exactly-once semantics. Suppose that you're doing a count aggregation of your stream and want to store the running count in a database. Now suppose you store in the database a single value representing the count, and every time you process a new tuple you increment the count.
+
+When failures occur, tuples will be replayed. This brings up a problem when doing state updates (or anything with side effects) – you have no idea if you've ever successfully updated the state based on this tuple before. Perhaps you never processed the tuple before, in which case you should increment the count. Perhaps you've processed the tuple and successfully incremented the count, but the tuple failed processing in another step. In this case, you should not increment the count. Or perhaps you saw the tuple before but got an error when updating the database. In this case, you *should* update the database.
+
+By just storing the count in the database, you have no idea whether or not this tuple has been processed before. So you need more information in order to make the right decision. Trident provides the following semantics which are sufficient for achieving exactly-once processing semantics:
+
+1. Tuples are processed as small batches (see [the tutorial](Trident-tutorial.html))
+2. Each batch of tuples is given a unique id called the "transaction id" (txid). If the batch is replayed, it is given the exact same txid.
+3. State updates are ordered among batches. That is, the state updates for batch 3 won't be applied until the state updates for batch 2 have succeeded.
+
+With these primitives, your State implementation can detect whether or not the batch of tuples has been processed before and take the appropriate action to update the state in a consistent way. The action you take depends on the exact semantics provided by your input spouts as to what's in each batch. There's three kinds of spouts possible with respect to fault-tolerance: "non-transactional", "transactional", and "opaque transactional". Likewise, there's three kinds of state possible with respect to fault-tolerance: "non-transactional", "transactional", and "opaque transactional". Let's take a look at each spout type and see what kind of fault-tolerance you can achieve with each.
+
+## Transactional spouts
+
+Remember, Trident processes tuples as small batches with each batch being given a unique transaction id. The properties of spouts vary according to the guarantees they can provide as to what's in each batch. A transactional spout has the following properties:
+
+1. Batches for a given txid are always the same. Replays of batches for a txid will exact same set of tuples as the first time that batch was emitted for that txid.
+2. There's no overlap between batches of tuples (tuples are in one batch or another, never multiple).
+3. Every tuple is in a batch (no tuples are skipped)
+
+This is a pretty easy type of spout to understand, the stream is divided into fixed batches that never change. storm-contrib has [an implementation of a transactional spout](https://github.com/nathanmarz/storm-contrib/blob/{{page.version}}/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java) for Kafka.
+
+You might be wondering – why wouldn't you just always use a transactional spout? They're simple and easy to understand. One reason you might not use one is because they're not necessarily very fault-tolerant. For example, the way TransactionalTridentKafkaSpout works is the batch for a txid will contain tuples from all the Kafka partitions for a topic. Once a batch has been emitted, any time that batch is re-emitted in the future the exact same set of tuples must be emitted to meet the semantics of transactional spouts. Now suppose a batch is emitted from TransactionalTridentKafkaSpout, the batch fails to process, and at the same time one of the Kafka nodes goes down. You're now incapable of replaying the same batch as you did before (since the node is down and some partitions for the topic are not unavailable), and processing will halt. 
+
+This is why "opaque transactional" spouts exist – they are fault-tolerant to losing source nodes while still allowing you to achieve exactly-once processing semantics. We'll cover those spouts in the next section though.
+
+(One side note – once Kafka supports replication, it will be possible to have transactional spouts that are fault-tolerant to node failure, but that feature does not exist yet.)
+
+Before we get to "opaque transactional" spouts, let's look at how you would design a State implementation that has exactly-once semantics for transactional spouts. This State type is called a "transactional state" and takes advantage of the fact that any given txid is always associated with the exact same set of tuples.
+
+Suppose your topology computes word count and you want to store the word counts in a key/value database. The key will be the word, and the value will contain the count. You've already seen that storing just the count as the value isn't sufficient to know whether you've processed a batch of tuples before. Instead, what you can do is store the transaction id with the count in the database as an atomic value. Then, when updating the count, you can just compare the transaction id in the database with the transaction id for the current batch. If they're the same, you skip the update – because of the strong ordering, you know for sure that the value in the database incorporates the current batch. If they're different, you increment the count. This logic works because the batch for a txid never changes, and Trident ensures that state updates are ordered among batches.
+
+Consider this example of why it works. Suppose you are processing txid 3 which consists of the following batch of tuples:
+
+```
+["man"]
+["man"]
+["dog"]
+```
+
+Suppose the database currently holds the following key/value pairs:
+
+```
+man => [count=3, txid=1]
+dog => [count=4, txid=3]
+apple => [count=10, txid=2]
+```
+
+The txid associated with "man" is txid 1. Since the current txid is 3, you know for sure that this batch of tuples is not represented in that count. So you can go ahead and increment the count by 2 and update the txid. On the other hand, the txid for "dog" is the same as the current txid. So you know for sure that the increment from the current batch is already represented in the database for the "dog" key. So you can skip the update. After completing updates, the database looks like this:
+
+```
+man => [count=5, txid=3]
+dog => [count=4, txid=3]
+apple => [count=10, txid=2]
+```
+
+Let's now look at opaque transactional spouts and how to design states for that type of spout.
+
+## Opaque transactional spouts
+
+As described before, an opaque transactional spout cannot guarantee that the batch of tuples for a txid remains constant. An opaque transactional spout has the following property:
+
+1. Every tuple is *successfully* processed in exactly one batch. However, it's possible for a tuple to fail to process in one batch and then succeed to process in a later batch.
+
+[OpaqueTridentKafkaSpout](https://github.com/nathanmarz/storm-contrib/blob/{{page.version}}/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java) is a spout that has this property and is fault-tolerant to losing Kafka nodes. Whenever it's time for OpaqueTridentKafkaSpout to emit a batch, it emits tuples starting from where the last batch finished emitting. This ensures that no tuple is ever skipped or successfully processed by multiple batches.
+
+With opaque transactional spouts, it's no longer possible to use the trick of skipping state updates if the transaction id in the database is the same as the transaction id for the current batch. This is because the batch may have changed between state updates.
+
+What you can do is store more state in the database. Rather than store a value and transaction id in the database, you instead store a value, transaction id, and the previous value in the database. Let's again use the example of storing a count in the database. Suppose the partial count for your batch is "2" and it's time to apply a state update. Suppose the value in the database looks like this:
+
+```
+{ value = 4,
+  prevValue = 1,
+  txid = 2
+}
+```
+
+Suppose your current txid is 3, different than what's in the database. In this case, you set "prevValue" equal to "value", increment "value" by your partial count, and update the txid. The new database value will look like this:
+
+```
+{ value = 6,
+  prevValue = 4,
+  txid = 3
+}
+```
+
+Now suppose your current txid is 2, equal to what's in the database. Now you know that the "value" in the database contains an update from a previous batch for your current txid, but that batch may have been different so you have to ignore it. What you do in this case is increment "prevValue" by your partial count to compute the new "value". You then set the value in the database to this:
+
+```
+{ value = 3,
+  prevValue = 1,
+  txid = 2
+}
+```
+
+This works because of the strong ordering of batches provided by Trident. Once Trident moves onto a new batch for state updates, it will never go back to a previous batch. And since opaque transactional spouts guarantee no overlap between batches – that each tuple is successfully processed by one batch – you can safely update based on the previous value.
+
+## Non-transactional spouts
+
+Non-transactional spouts don't provide any guarantees about what's in each batch. So it might have at-most-once processing, in which case tuples are not retried after failed batches. Or it might have at-least-once processing, where tuples can be processed successfully by multiple batches. There's no way to achieve exactly-once semantics for this kind of spout.
+
+## Summary of spout and state types
+
+This diagram shows which combinations of spouts / states enable exactly-once messaging semantics:
+
+![Spouts vs States](images/spout-vs-state.png)
+
+Opaque transactional states have the strongest fault-tolerance, but this comes at the cost of needing to store the txid and two values in the database. Transactional states require less state in the database, but only work with transactional spouts. Finally, non-transactional states require the least state in the database but cannot achieve exactly-once semantics.
+
+The state and spout types you choose are a tradeoff between fault-tolerance and storage costs, and ultimately your application requirements will determine which combination is right for you.
+
+## State APIs
+
+You've seen the intricacies of what it takes to achieve exactly-once semantics. The nice thing about Trident is that it internalizes all the fault-tolerance logic within the State – as a user you don't have to deal with comparing txids, storing multiple values in the database, or anything like that. You can write code like this:
+
+```java
+TridentTopology topology = new TridentTopology();        
+TridentState wordCounts =
+      topology.newStream("spout1", spout)
+        .each(new Fields("sentence"), new Split(), new Fields("word"))
+        .groupBy(new Fields("word"))
+        .persistentAggregate(MemcachedState.opaque(serverLocations), new Count(), new Fields("count"))                
+        .parallelismHint(6);
+```
+
+All the logic necessary to manage opaque transactional state logic is internalized in the MemcachedState.opaque call. Additionally, updates are automatically batched to minimize roundtrips to the database.
+
+The base State interface just has two methods:
+
+```java
+public interface State {
+    void beginCommit(Long txid); // can be null for things like partitionPersist occurring off a DRPC stream
+    void commit(Long txid);
+}
+```
+
+You're told when a state update is beginning, when a state update is ending, and you're given the txid in each case. Trident assumes nothing about how your state works, what kind of methods there are to update it, and what kind of methods there are to read from it.
+
+Suppose you have a home-grown database that contains user location information and you want to be able to access it from Trident. Your State implementation would have methods for getting and setting user information:
+
+```java
+public class LocationDB implements State {
+    public void beginCommit(Long txid) {    
+    }
+    
+    public void commit(Long txid) {    
+    }
+    
+    public void setLocation(long userId, String location) {
+      // code to access database and set location
+    }
+    
+    public String getLocation(long userId) {
+      // code to get location from database
+    }
+}
+```
+
+You then provide Trident a StateFactory that can create instances of your State object within Trident tasks. The StateFactory for your LocationDB might look something like this:
+
+```java
+public class LocationDBFactory implements StateFactory {
+   public State makeState(Map conf, int partitionIndex, int numPartitions) {
+      return new LocationDB();
+   } 
+}
+```
+
+Trident provides the QueryFunction interface for writing Trident operations that query a source of state, and the StateUpdater interface for writing Trident operations that update a source of state. For example, let's write an operation "QueryLocation" that queries the LocationDB for the locations of users. Let's start off with how you would use it in a topology. Let's say this topology consumes an input stream of userids:
+
+```java
+TridentTopology topology = new TridentTopology();
+TridentState locations = topology.newStaticState(new LocationDBFactory());
+topology.newStream("myspout", spout)
+        .stateQuery(locations, new Fields("userid"), new QueryLocation(), new Fields("location"))
+```
+
+Now let's take a look at what the implementation of QueryLocation would look like:
+
+```java
+public class QueryLocation extends BaseQueryFunction<LocationDB, String> {
+    public List<String> batchRetrieve(LocationDB state, List<TridentTuple> inputs) {
+        List<String> ret = new ArrayList();
+        for(TridentTuple input: inputs) {
+            ret.add(state.getLocation(input.getLong(0)));
+        }
+        return ret;
+    }
+
+    public void execute(TridentTuple tuple, String location, TridentCollector collector) {
+        collector.emit(new Values(location));
+    }    
+}
+```
+
+QueryFunction's execute in two steps. First, Trident collects a batch of reads together and passes them to batchRetrieve. In this case, batchRetrieve will receive multiple user ids. batchRetrieve is expected to return a list of results that's the same size as the list of input tuples. The first element of the result list corresponds to the result for the first input tuple, the second is the result for the second input tuple, and so on.
+
+You can see that this code doesn't take advantage of the batching that Trident does, since it just queries the LocationDB one at a time. So a better way to write the LocationDB would be like this:
+
+```java
+public class LocationDB implements State {
+    public void beginCommit(Long txid) {    
+    }
+    
+    public void commit(Long txid) {    
+    }
+    
+    public void setLocationsBulk(List<Long> userIds, List<String> locations) {
+      // set locations in bulk
+    }
+    
+    public List<String> bulkGetLocations(List<Long> userIds) {
+      // get locations in bulk
+    }
+}
+```
+
+Then, you can write the QueryLocation function like this:
+
+```java
+public class QueryLocation extends BaseQueryFunction<LocationDB, String> {
+    public List<String> batchRetrieve(LocationDB state, List<TridentTuple> inputs) {
+        List<Long> userIds = new ArrayList<Long>();
+        for(TridentTuple input: inputs) {
+            userIds.add(input.getLong(0));
+        }
+        return state.bulkGetLocations(userIds);
+    }
+
+    public void execute(TridentTuple tuple, String location, TridentCollector collector) {
+        collector.emit(new Values(location));
+    }    
+}
+```
+
+This code will be much more efficient by reducing roundtrips to the database. 
+
+To update state, you make use of the StateUpdater interface. Here's a StateUpdater that updates a LocationDB with new location information:
+
+```java
+public class LocationUpdater extends BaseStateUpdater<LocationDB> {
+    public void updateState(LocationDB state, List<TridentTuple> tuples, TridentCollector collector) {
+        List<Long> ids = new ArrayList<Long>();
+        List<String> locations = new ArrayList<String>();
+        for(TridentTuple t: tuples) {
+            ids.add(t.getLong(0));
+            locations.add(t.getString(1));
+        }
+        state.setLocationsBulk(ids, locations);
+    }
+}
+```
+
+Here's how you would use this operation in a Trident topology:
+
+```java
+TridentTopology topology = new TridentTopology();
+TridentState locations = 
+    topology.newStream("locations", locationsSpout)
+        .partitionPersist(new LocationDBFactory(), new Fields("userid", "location"), new LocationUpdater())
+```
+
+The partitionPersist operation updates a source of state. The StateUpdater receives the State and a batch of tuples with updates to that State. This code just grabs the userids and locations from the input tuples and does a bulk set into the State. 
+
+partitionPersist returns a TridentState object representing the location db being updated by the Trident topology. You could then use this state in stateQuery operations elsewhere in the topology. 
+
+You can also see that StateUpdaters are given a TridentCollector. Tuples emitted to this collector go to the "new values stream". In this case, there's nothing interesting to emit to that stream, but if you were doing something like updating counts in a database, you could emit the updated counts to that stream. You can then get access to the new values stream for further processing via the TridentState#newValuesStream method.
+
+## persistentAggregate
+
+Trident has another method for updating States called persistentAggregate. You've seen this used in the streaming word count example, shown again below:
+
+```java
+TridentTopology topology = new TridentTopology();        
+TridentState wordCounts =
+      topology.newStream("spout1", spout)
+        .each(new Fields("sentence"), new Split(), new Fields("word"))
+        .groupBy(new Fields("word"))
+        .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
+```
+
+persistentAggregate is an additional abstraction built on top of partitionPersist that knows how to take a Trident aggregator and use it to apply updates to the source of state. In this case, since this is a grouped stream, Trident expects the state you provide to implement the "MapState" interface. The grouping fields will be the keys in the state, and the aggregation result will be the values in the state. The "MapState" interface looks like this:
+
+```java
+public interface MapState<T> extends State {
+    List<T> multiGet(List<List<Object>> keys);
+    List<T> multiUpdate(List<List<Object>> keys, List<ValueUpdater> updaters);
+    void multiPut(List<List<Object>> keys, List<T> vals);
+}
+```
+
+When you do aggregations on non-grouped streams (a global aggregation), Trident expects your State object to implement the "Snapshottable" interface:
+
+```java
+public interface Snapshottable<T> extends State {
+    T get();
+    T update(ValueUpdater updater);
+    void set(T o);
+}
+```
+
+[MemoryMapState](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/testing/MemoryMapState.java) and [MemcachedState](https://github.com/nathanmarz/trident-memcached/blob/master/src/jvm/trident/memcached/MemcachedState.java) each implement both of these interfaces.
+
+## Implementing Map States
+
+Trident makes it easy to implement MapState's, doing almost all the work for you. The OpaqueMap, TransactionalMap, and NonTransactionalMap classes implement all the logic for doing the respective fault-tolerance logic. You simply provide these classes with an IBackingMap implementation that knows how to do multiGets and multiPuts of the respective key/values. IBackingMap looks like this:
+
+```java
+public interface IBackingMap<T> {
+    List<T> multiGet(List<List<Object>> keys); 
+    void multiPut(List<List<Object>> keys, List<T> vals); 
+}
+```
+
+OpaqueMap's will call multiPut with [OpaqueValue](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/state/OpaqueValue.java)'s for the vals, TransactionalMap's will give [TransactionalValue](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/state/TransactionalValue.java)'s for the vals, and NonTransactionalMaps will just pass the objects from the topology through.
+
+Trident also provides the [CachedMap](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/state/map/CachedMap.java) class to do automatic LRU caching of map key/vals.
+
+Finally, Trident provides the [SnapshottableMap](https://github.com/apache/incubator-storm/blob/{{page.version}}/storm-core/src/jvm/storm/trident/state/map/SnapshottableMap.java) class that turns a MapState into a Snapshottable object, by storing global aggregations into a fixed key.
+
+Take a look at the implementation of [MemcachedState](https://github.com/nathanmarz/trident-memcached/blob/master/src/jvm/trident/memcached/MemcachedState.java) to see how all these utilities can be put together to make a high performance MapState implementation. MemcachedState allows you to choose between opaque transactional, transactional, and non-transactional semantics.


[10/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Multilang-protocol.md
----------------------------------------------------------------------
diff --git a/docs/Multilang-protocol.md b/docs/Multilang-protocol.md
new file mode 100644
index 0000000..a3cb22c
--- /dev/null
+++ b/docs/Multilang-protocol.md
@@ -0,0 +1,221 @@
+---
+layout: documentation
+---
+This page explains the multilang protocol as of Storm 0.7.1. Versions prior to 0.7.1 used a somewhat different protocol, documented [here](Storm-multi-language-protocol-(versions-0.7.0-and-below\).html).
+
+# Storm Multi-Language Protocol
+
+## Shell Components
+
+Support for multiple languages is implemented via the ShellBolt,
+ShellSpout, and ShellProcess classes.  These classes implement the
+IBolt and ISpout interfaces and the protocol for executing a script or
+program via the shell using Java's ProcessBuilder class.
+
+## Output fields
+
+Output fields are part of the Thrift definition of the topology. This means that when you multilang in Java, you need to create a bolt that extends ShellBolt, implements IRichBolt, and declare the fields in `declareOutputFields` (similarly for ShellSpout).
+
+You can learn more about this on [Concepts](Concepts.html)
+
+## Protocol Preamble
+
+A simple protocol is implemented via the STDIN and STDOUT of the
+executed script or program. All data exchanged with the process is
+encoded in JSON, making support possible for pretty much any language.
+
+# Packaging Your Stuff
+
+To run a shell component on a cluster, the scripts that are shelled
+out to must be in the `resources/` directory within the jar submitted
+to the master.
+
+However, during development or testing on a local machine, the resources
+directory just needs to be on the classpath.
+
+## The Protocol
+
+Notes:
+
+* Both ends of this protocol use a line-reading mechanism, so be sure to
+trim off newlines from the input and to append them to your output.
+* All JSON inputs and outputs are terminated by a single line containing "end". Note that this delimiter is not itself JSON encoded.
+* The bullet points below are written from the perspective of the script writer's
+STDIN and STDOUT.
+
+### Initial Handshake
+
+The initial handshake is the same for both types of shell components:
+
+* STDIN: Setup info. This is a JSON object with the Storm configuration, Topology context, and a PID directory, like this:
+
+```
+{
+    "conf": {
+        "topology.message.timeout.secs": 3,
+        // etc
+    },
+    "context": {
+        "task->component": {
+            "1": "example-spout",
+            "2": "__acker",
+            "3": "example-bolt"
+        },
+        "taskid": 3
+    },
+    "pidDir": "..."
+}
+```
+
+Your script should create an empty file named with its PID in this directory. e.g.
+the PID is 1234, so an empty file named 1234 is created in the directory. This
+file lets the supervisor know the PID so it can shutdown the process later on.
+
+* STDOUT: Your PID, in a JSON object, like `{"pid": 1234}`. The shell component will log the PID to its log.
+
+What happens next depends on the type of component:
+
+### Spouts
+
+Shell spouts are synchronous. The rest happens in a while(true) loop:
+
+* STDIN: Either a next, ack, or fail command.
+
+"next" is the equivalent of ISpout's `nextTuple`. It looks like:
+
+```
+{"command": "next"}
+```
+
+"ack" looks like:
+
+```
+{"command": "ack", "id": "1231231"}
+```
+
+"fail" looks like:
+
+```
+{"command": "fail", "id": "1231231"}
+```
+
+* STDOUT: The results of your spout for the previous command. This can
+  be a sequence of emits and logs.
+
+An emit looks like:
+
+```
+{
+	"command": "emit",
+	// The id for the tuple. Leave this out for an unreliable emit. The id can
+    // be a string or a number.
+	"id": "1231231",
+	// The id of the stream this tuple was emitted to. Leave this empty to emit to default stream.
+	"stream": "1",
+	// If doing an emit direct, indicate the task to send the tuple to
+	"task": 9,
+	// All the values in this tuple
+	"tuple": ["field1", 2, 3]
+}
+```
+
+If not doing an emit direct, you will immediately receive the task ids to which the tuple was emitted on STDIN as a JSON array.
+
+A "log" will log a message in the worker log. It looks like:
+
+```
+{
+	"command": "log",
+	// the message to log
+	"msg": "hello world!"
+}
+```
+
+* STDOUT: a "sync" command ends the sequence of emits and logs. It looks like:
+
+```
+{"command": "sync"}
+```
+
+After you sync, ShellSpout will not read your output until it sends another next, ack, or fail command.
+
+Note that, similarly to ISpout, all of the spouts in the worker will be locked up after a next, ack, or fail, until you sync. Also like ISpout, if you have no tuples to emit for a next, you should sleep for a small amount of time before syncing. ShellSpout will not automatically sleep for you.
+
+
+### Bolts
+
+The shell bolt protocol is asynchronous. You will receive tuples on STDIN as soon as they are available, and you may emit, ack, and fail, and log at any time by writing to STDOUT, as follows:
+
+* STDIN: A tuple! This is a JSON encoded structure like this:
+
+```
+{
+    // The tuple's id - this is a string to support languages lacking 64-bit precision
+	"id": "-6955786537413359385",
+	// The id of the component that created this tuple
+	"comp": "1",
+	// The id of the stream this tuple was emitted to
+	"stream": "1",
+	// The id of the task that created this tuple
+	"task": 9,
+	// All the values in this tuple
+	"tuple": ["snow white and the seven dwarfs", "field2", 3]
+}
+```
+
+* STDOUT: An ack, fail, emit, or log. Emits look like:
+
+```
+{
+	"command": "emit",
+	// The ids of the tuples this output tuples should be anchored to
+	"anchors": ["1231231", "-234234234"],
+	// The id of the stream this tuple was emitted to. Leave this empty to emit to default stream.
+	"stream": "1",
+	// If doing an emit direct, indicate the task to send the tuple to
+	"task": 9,
+	// All the values in this tuple
+	"tuple": ["field1", 2, 3]
+}
+```
+
+If not doing an emit direct, you will receive the task ids to which
+the tuple was emitted on STDIN as a JSON array. Note that, due to the
+asynchronous nature of the shell bolt protocol, when you read after
+emitting, you may not receive the task ids. You may instead read the
+task ids for a previous emit or a new tuple to process. You will
+receive the task id lists in the same order as their corresponding
+emits, however.
+
+An ack looks like:
+
+```
+{
+	"command": "ack",
+	// the id of the tuple to ack
+	"id": "123123"
+}
+```
+
+A fail looks like:
+
+```
+{
+	"command": "fail",
+	// the id of the tuple to fail
+	"id": "123123"
+}
+```
+
+A "log" will log a message in the worker log. It looks like:
+
+```
+{
+	"command": "log",
+	// the message to log
+	"msg": "hello world!"
+}
+```
+
+* Note that, as of version 0.7.1, there is no longer any need for a
+  shell bolt to 'sync'.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Powered-By.md
----------------------------------------------------------------------
diff --git a/docs/Powered-By.md b/docs/Powered-By.md
new file mode 100644
index 0000000..7fcc034
--- /dev/null
+++ b/docs/Powered-By.md
@@ -0,0 +1,1028 @@
+---
+layout: documentation
+---
+Want to be added to this page? Send an email [here](mailto:nathan.marz@gmail.com).
+
+<table>
+
+<tr>
+<td>
+<a href="http://groupon.com">Groupon</a>
+</td>
+<td>
+<p>
+At Groupon we use Storm to build real-time data integration systems. Storm helps us analyze, clean, normalize, and resolve large amounts of non-unique data points with low latency and high throughput.
+</p>
+</td>
+</tr>
+
+<tr>
+<td><a href="http://www.weather.com/">The Weather Channel</a></td>
+<td>
+<p>At Weather Channel we use several Storm topologies to ingest and persist weather data. Each topology is responsible for fetching one dataset from an internal or external network (the Internet), reshaping the records for use by our company, and persisting the records to relational databases. It is particularly useful to have an automatic mechanism for repeating attempts to download and manipulate the data when there is a hiccup.</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.fullcontact.com/">FullContact</a>
+</td>
+<td>
+<p>
+At FullContact we currently use Storm as the backbone of the system which synchronizes our Cloud Address Book with third party services such as Google Contacts and Salesforce. We also use it to provide real-time support for our contact graph analysis and federated contact search systems.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://twitter.com">Twitter</a>
+</td>
+<td>
+<p>
+Storm powers a wide variety of Twitter systems, ranging in applications from discovery, realtime analytics, personalization, search, revenue optimization, and many more. Storm integrates with the rest of Twitter's infrastructure, including database systems (Cassandra, Memcached, etc), the messaging infrastructure, Mesos, and the monitoring/alerting systems. Storm's isolation scheduler makes it easy to use the same cluster both for production applications and in-development applications, and it provides a sane way to do capacity planning.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.yahoo.com">Yahoo!</a>
+</td>
+<td>
+<p>
+Yahoo! is developing a next generation platform that enables the convergence of big-data and low-latency processing. While Hadoop is our primary technology for batch processing, Storm empowers stream/micro-batch processing of user events, content feeds, and application logs. 
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.yahoo.co.jp/">Yahoo! JAPAN</a>
+</td>
+<td>
+<p>
+Yahoo! JAPAN is a leading web portal in Japan. Storm applications are processing various streaming data such as logs or social data. We use Storm to feed contents, monitor systems, detect trending topics, and crawl on websites.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://www.webmd.com">WebMD</a>
+</td>
+<td>
+<p>
+We use Storm to power our Medscape Medpulse mobile application which allow medical professionals to follow important medical trends with Medscape's curated Today on Twitter feed and selection of blogs. Storm topology is capturing and processing tweets with twitter streaming API, enhance tweets with metadata and images, do real time NLP and execute several business rules. Storm also monitors selection of blogs in order to give our customers real-time updates.  We also use Storm for internal data pipelines to do ETL and for our internal marketing platform where time and freshness are essential.
+</p>
+<p>
+We use storm to power our search indexing process.  We continue to discover new use cases for storm and it became one of the core component in our technology stack.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.spotify.com">Spotify</a>
+</td>
+<td>
+<p>
+Spotify serves streaming music to over 10 million subscribers and 40 million active users. Storm powers a wide range of real-time features at Spotify, including music recommendation, monitoring, analytics, and ads targeting. Together with Kafka, memcached, Cassandra, and netty-zmtp based messaging, Storm enables us to build low-latency fault-tolerant distributed systems with ease.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://www.infochimps.com">Infochimps</a>
+</td>
+<td>
+<p>
+Infochimps uses Storm as part of its Big Data Enterprise Cloud. Specifically, it uses Storm as the basis for one of three of its cloud data services - namely, Data Delivery Services (DDS), which uses Storm to provide a fault-tolerant and linearly scalable enterprise data collection, transport, and complex in-stream processing cloud service. 
+</p>
+
+<p>
+In much the same way that Hadoop provides batch ETL and large-scale batch analytical processing, the Data Delivery Service provides real-time ETL and large-scale real-time analytical processing — the perfect complement to Hadoop (or in some cases, what you needed instead of Hadoop).
+</p>
+
+<p>
+DDS uses both Storm and Kafka along with a host of additional technologies to provide an enterprise-class real-time stream processing solution with features including:
+</p>
+
+<ul>
+<li>
+Integration connections to any variety of data sources in a way that is robust yet as non-invasive
+</li>
+<li>
+Optimizations for highly scalable, reliable data import and distributed ETL (extract, transform, load), fulfilling data transport needs
+</li>
+<li>
+Developer tools for rapid development of decorators, which perform the real-time stream processing
+</li>
+<li>
+Guaranteed delivery framework and data failover snapshots to send processed data to analytics systems, databases, file systems, and applications with extreme reliability
+</li>
+<li>
+Rapid solution development and deployment, along with our expert Big Data methodology and best practices
+</li>
+</ul>
+
+<p>Infochimps has extensive experience in deploying its DDS to power large-scale clickstream web data flows, massive Twitter stream processes, Foursquare event processing, customer purchase data, product pricing data, and more.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://healthmarketscience.com/">Health Market Science</a>
+</td>
+<td>
+<p>
+Health Market Science (HMS) provides data management as a service for the healthcare industry.  Storm is at the core of the HMS big data platform functioning as the data ingestion mechanism, which orchestrates the data flow across multiple persistence mechanisms that allow HMS to deliver Master Data Management (MDM) and analytics capabilities for wide range of healthcare needs: compliance, integrity, data quality, and operational decision support.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://cerner.com/">Cerner</a>
+</td>
+<td>
+<p>
+Cerner is a leader in health care information technology. We have been using Storm since its release to process massive amounts of clinical data in real-time. Storm integrates well in our architecture, allowing us to quickly provide clinicians with the data they need to make medical decisions.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.aeris.com/">Aeris Communications</a>
+</td>
+<td>
+<p>
+Aeris Communications has the only cellular network that was designed and built exclusively for machines. Our ability to provide scalable, reliable real-time analytics - powered by Storm - for machine to machine (M2M) communication offers immense value to our customers. We are using Storm in production since Q1 of 2013.
+</p>
+</td>
+</tr>
+
+
+
+<tr>
+<td>
+<a href="http://flipboard.com/">Flipboard</a>
+</td>
+<td>
+<p>
+Flipboard is the worldʼs first social magazine, a single place to keep up with everything  you care about and collect it in ways that let reflect you. Inspired by the beauty and  ease of print media, Flipboard is designed so you can easily flip through news from around the world or stories from right at home, helping people find the one thing that  can inform, entertain or even inspire them every day.
+</p>
+<p>
+We are using Storm across a wide range of our services from content search, to realtime analytics, to generating custom magazine feeds. We then integrate Storm across our infrastructure within systems like ElasticSearch, HBase, Hadoop and HDFS to create a highly scalable data platform.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.rubiconproject.com/">Rubicon Project</a>
+</td>
+<td>
+<p>
+Storm is being used in production mode at the Rubicon Project to analyze the results of auctions of ad impressions on its RTB exchange as they occur.  It is currently processing around 650 million auction results in three data centers daily (with 3 separate Storm clusters). One simple application is identifying new creatives (ads) in real time for ad quality purposes.  A more sophisticated application is an "Inventory Valuation Service" that uses DRPC to return appraisals of new impressions before the auction takes place.  The appraisals are used for various optimization problems, such as deciding whether to auction an impression or skip it when close to maximum capacity.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.ooyala.com/">Ooyala</a>
+</td>
+<td>
+<p>
+Ooyala powers personalized multi-screen video experiences for some of the world's largest networks, brands and media companies. We provide all the technology and tools our customers need to manage, distribute and monetize digital video content at a global scale.
+</p>
+
+<p>
+At the core of our technology is an analytics engine that processes over two billion analytics events each day, derived from nearly 200 million viewers worldwide who watch video on an Ooyala-powered player.
+</p>
+
+<p>
+Ooyala will be deploying Storm in production to give our customers real-time streaming analytics on consumer viewing behavior and digital content trends. Storm enables us to rapidly mine one of the world's largest online video data sets to deliver up-to-the-minute business intelligence ranging from real-time viewing patterns to personalized content recommendations to dynamic programming guides and dozens of other insights for maximizing revenue with online video.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.taobao.com/index_global.php">Taobao</a>
+</td>
+<td>
+<p>
+We make statistics of logs and extract useful information from the statistics in almost real-time with Storm.  Logs are read from Kafka-like persistent message queues into spouts, then processed and emitted over the topologies to compute desired results, which are then stored into distributed databases to be used elsewhere. Input log count varies from 2 millions to 1.5 billion every day, whose size is up to 2 terabytes among the projects.  The main challenge here is not only real-time processing of big data set; storing and persisting result is also a challenge and needs careful design and implementation.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.alibaba.com/">Alibaba</a>
+</td>
+<td>
+<p>
+Alibaba is the leading B2B e-commerce website in the world. We use storm to process the application log and the data change in database to supply realtime stats for data apps.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://iQIYI.COM">iQIYI</a>
+</td>
+<td>
+<p>
+iQIYI is China`s largest online video platform. We are using Storm in our video advertising system, video recommendation system, log analysis system and many other scenarios. Now we have several standalone Storm clusters, and we also have Storm clusters on Mesos and on Yarn. Kafka-Storm integration and Storm–HBase integration are quite common in our production environment. We have great interests in the new development about integration of Storm with other applications, like HBase, HDFS and Kafka.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://www.baidu.com/">Baidu</a>
+</td>
+<td>
+<p>
+Baidu offers top searching technology services for websites, audio files and images, my group using Storm to process the searching logs to supply realtime stats for accounting pv, ar-time and so on.
+This project helps Ops to determine and monitor services status and can do great things in the future.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.yelp.com/">Yelp</a>
+</td>
+<td>
+<p>
+Yelp is using Storm with <a href="http://pyleus.org/">Pyleus</a> to build a platform for developers to consume and process high throughput streams of data in real time. We have ongoing projects to use Storm and Pyleus for overhauling our internal application metrics pipeline, building an automated Python profile analysis system, and for general ETL operations. As its support for non-JVM components matures, we hope to make Storm the standard way of processing streaming data at Yelp.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://www.klout.com/">Klout</a>
+</td>
+<td>
+<p>
+Klout helps everyone discover and be recognized for their influence by analyzing engagement with their content across social networks. Our analysis powers a daily Klout Score on a scale from 1-100 that shows how much influence social media users have and on what topics. We are using Storm to develop a realtime scoring and moments generation pipeline. Leveraging Storm's intuitive Trident abstraction we are able to create complex topologies which stream data from our network collectors via Kafka, processed and written out to HDFS.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.loggly.com">Loggly</a>
+</td>
+<td>
+<p>
+Loggly is the world's most popular cloud-based log management. Our cloud-based log management service helps DevOps and technical teams make sense of the the massive quantity of logs that are being produced by a growing number of cloud-centric applications – in order to solve operational problems faster. Storm is the heart of our ingestion pipeline where it filters, parses and analyses billions of log events all-day, every day and in real-time.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://premise.is/">premise.is</a>
+</td>
+<td>
+<p>
+We're building a platform for alternative, bottom-up, high-granularity econometric data capture, particularly targeting opaque developing economies (i.e., Argentina might lie about their inflation statistics, but their black market certainly doesn't). Basically we get to funnel hedge fund money into improving global economic transparency. 
+</p>
+<p>
+We've been using Storm in production since January 2012 as a streaming, time-indexed web crawl + extraction + machine learning-based semantic markup flow (about 60 physical nodes comparable to m1.large; generating a modest 25GB/hr incremental). We wanted to have an end-to-end push-based system where new inputs get percolated through the topology in realtime and appear on the website, with no batch jobs required in between steps. Storm has been really integral to realizing this goal.
+</p>
+</td>
+</tr>
+
+
+
+<tr>
+<td>
+<a href="http://www.wego.com/">Wego</a>
+</td>
+<td>
+<p>About Wego, we are one of the world’s most comprehensive travel metasearch engines, operating in 42 markets worldwide and used by millions of travelers to save time, pay less and travel more. We compare and display real-time flights, hotel pricing and availability from hundreds of leading travel sites from all around the world on one simple screen.</p>
+
+<p>At the heart of our products, Storm helps us to stream real-time meta-search data from our partners to end-users. Since data comes from many sources and with different timing, Storm topology concept naturally solves concurrency issues while helping us to continuously merge, slice and clean all the data. Additionally with a few tricks and tools provided in Storm we can easily apply incremental update to improve the flow our data (1-5GB/minute).</p>
+ 
+<p>With its simplicity, scalability, and flexibility, Storm does not only improve our current products but more importantly changes the way we work with data. Instead of keeping data static and crunching it once a while, we constantly move data all around, making use of different technologies, evaluating new ideas and building new products. We stream critical data to memory for fast access while continuously crunching and directing huge amount of data into various engines so that we can evaluate and make use of data instantly. Previously, this kind of system requires to setup and maintain quite a few things but with Storm all we need is half day of coding and a few seconds to deploy. In this sense we never think Storm is to serve our products but rather to evolve our products.</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://rocketfuel.com/">RocketFuel</a>
+</td>
+<td>
+<p>
+At Rocket Fuel (an ad network) we are building a real time platform on top of Storm which imitates the time critical workflows of existing Hadoop based ETL pipeline. This platform tracks impressions, clicks, conversions, bid requests etc. in real time. We are using Kafka as message queue. To start with we are pushing per minute aggregations directly to MySQL, but we plan to go finer than one minute and may bring HBase in to the picture to handle increased write load. 
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://quicklizard.com/">QuickLizard</a>
+</td>
+<td>
+<p>
+QuickLizard builds solution for automated pricing for companies that have many products in their lists. Prices are influenced by multiple factors internal and external to company.
+</p>
+
+<p>
+Currently we use Storm to choose products that need to be priced. We get real time stream of events from client site and filters them to get much more light stream of products that need to be processed by our procedures to get price recommendation.
+</p>
+
+<p>
+In plans: use Storm also for real time data mining model calculation that should match products described on competitor sites to client products.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://spider.io/">spider.io</a>
+</td>
+<td>
+<p>
+At spider.io we've been using Storm as a core part of our classification engine since October 2011. We run Storm topologies to combine, analyse and classify real-time streams of internet traffic, to identify suspicious or undesirable website activity. Over the past 7 months we've expanded our use of Storm, so it now manages most of our real-time processing. Our classifications are displayed in a custom analytics dashboard, where Storm's distributed remote procedure call interface is used to gather data from our database and metadata services. DRPC allows us to increase the responsiveness of our user interface by distributing processing across a cluster of Amazon EC2 instances.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://8digits.com/">8digits</a>
+</td>
+<td>
+<p>
+At 8digits, we are using Storm in our analytics engine, which is one of the most crucial parts of our infrastructure. We are utilizing several cloud servers with multiple cores each for the purpose of running a real-time system making several complex calculations. Storm is a proven, solid and a powerful framework for most of the big-data problems.
+</p>
+</td>
+</tr>
+
+
+
+<tr>
+<td>
+<a href="https://www.alipay.com/">Alipay</a>
+</td>
+<td>
+<p>
+Alipay is China's leading third-party online payment platform. We are using Storm in many scenarios:
+</p>
+
+<ol>
+<li>
+Calculate realtime trade quantity, trade amount, the TOP N seller trading information, user register count. More than 100 million messages per day.
+</li>
+<li>
+Log processing, more than 6T data per day.
+</li>
+</ol>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://navisite.com/">NaviSite</a>
+</td>
+<td>
+<p>
+We are using Storm as part of our server event log monitoring/auditing system.  We send log messages from thousands of servers into a RabbitMQ cluster and then use Storm to check each message against a set of regular expressions.  If there is a match (&lt; 1% of messages), then the message is sent to a bolt that stores data in a Mongo database.  Right now we are handling a load of somewhere around 5-10k messages per second, however we tested our existing RabbitMQ + Storm clusters up to about 50k per second.  We have plans to do real time intrusion detection as an enhancement to the current log message reporting system. 
+</p>
+
+<p>
+We have Storm deployed on the NaviSite Cloud platform.  We have a ZK cluster of 3 small VMs, 1 Nimbus VM and 16 dual core/4GB VMs as supervisors.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://www.paywithglyph.com">Glyph</a>
+</td>
+<td>
+<p>
+Glyph is in the business of providing credit card rewards intelligence to consumers. At a given point of sale Glyph suggest its users what are the best cards to be used at a given merchant location that will provide maximum rewards. Glyph also provide suggestion on the cards the user should carry to earn maximum rewards based on his personal spending habits. Glyph provides this information to the user by retrieving and analyzing credit card transactions from banks. Storm is used in Glyph to perform this retrieval and analysis in realtime. We are using Memcached in conjuction with Storm for handling sessions. We are impressed by how Storm makes high availability and reliability of Glyph services possible. We are now using Storm and Clojure in building Glyph data analytics and insights services. We have open-sourced node-drpc wrapper module for easy Storm DRPC integration with NodeJS.
+</p>
+</td>
+</tr>
+<tr>
+<td>
+<a href="http://heartbyte.com/">Heartbyte</a>
+</td>
+<td>
+<p>
+At Heartbyte, Storm is a central piece of our realtime audience participation platform.  We are often required to process a 'vote' per second from hundreds of thousands of mobile devices simultaneously and process / aggregate all of the data within a second.  Further, we are finding that Storm is a great alternative to other ingest tools for Hadoop/HBase, which we use for batch processing after our events conclude.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://2lemetry.com/">2lemetry</a>
+</td>
+<td>
+<p>
+2lemetry uses Storm to power it's real time analytics on top of the m2m.io offering. 2lemetry is partnered with Sprint, Verizon, AT&T, and Arrow Electronics to power IoT applications world wide. Some of 2lemetry's larger projects include RTX, Kontron, and Intel. 2lemetry also works with many professional sporting teams to parse data in real time. 2lemetry receives events for every touch of the ball in every MLS soccer match. Storm is used to look for trends like passing tendencies as they develop during the game. 
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.nodeable.com/">Nodeable</a>
+</td>
+<td>
+<p>
+Nodeable uses Storm to deliver real-time continuous computation of the data we consume. Storm has made it significantly easier for us to scale our service more efficiently while ensuring the data we deliver is timely and accurate.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="https://twitsprout.com/">TwitSprout</a>
+</td>
+<td>
+<p>
+At TwitSprout, we use Storm to analyze activity on Twitter to monitor mentions of keywords (mostly client product and brand names) and trigger alerts when activity around a certain keyword spikes above normal levels. We also use Storm to back the data behind the live-infographics we produce for events sponsored by our clients. The infographics are usually in the form of a live dashboard that helps measure the audience buzz across social media as it relates to the event in realtime.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.happyelements.com/">HappyElements</a>
+</td>
+<td>
+<p>
+<a href="http://www.happyelements.com">HappyElements</a> is a leading social game developer on Facebook and other SNS platforms. We developed a real time data analysis program based on storm to analyze user activity in real time.  Storm is very easy to use, stable, scalable and maintainable.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.idexx.com/view/xhtml/en_us/corporate/home.jsf">IDEXX Laboratories</a>
+</td>
+<td>
+<p>
+IDEXX Laboratories is the leading maker of software and diagnostic instruments for the veterinary market. We collect and analyze veterinary medical data from thousands of veterinary clinics across the US. We recently embarked on a project to upgrade our aging data processing infrastructure that was unable to keep up with the rapid increase in the volume, velocity and variety of data that we were processing.
+</p>
+
+<p>
+We are utilizing the Storm system to take in the data that is extracted from the medical records in a number of different schemas, transform it into a standard schema that we created and store it in an Oracle RDBMS database. It is basically a souped up distributed ETL system. Storm takes on the plumbing necessary for a distributed system and is very easy to write code for. The ability to create small pieces of functionality and connect them together gives us the ultimate flexibility to parallelize each of the pieces differently.
+</p>
+
+<p>
+Our current cluster consists of four supervisor machines running 110 tasks inside 32 worker processes. We run two different topologies which receive messages and communicate with each other via RabbitMQ. The whole thing is deployed on Amazon Web Services and utilizes S3 for some intermediate storage, Redis as a key/value store and Oracle RDS for RDBMS storage. The bolts are all written in Java using the Spring framework with Hibernate as an ORM.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.umeng.com/">Umeng</a>
+</td>
+<td>
+Umeng is the leading and largest provider of mobile app analytics and developer services platform in China. Storm powers Umeng's realtime analytics platform, processing billions of data points per day and growing. We also use Storm in other products which requires realtime processing and it has become the core infrastructure in our company. 
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.admaster.com.cn/">Admaster</a>
+</td>
+<td>
+<p>
+We provide monitoring and precise delivery for Internet advertising. We use Storm to do the following:
+</p>
+
+<ol>
+<li>Calculate PV, UV of every advertisement.</li>
+<li>Simple data cleaning: filter out data which format error, filter out cheating data (the pv more than certain value)</li>
+</ol>
+Our cluster has 8 nodes, process several billions messages per day, about 200GB.
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://socialmetrix.com/en/">SocialMetrix</a>
+</td>
+<td>
+<p>
+Since its release, Storm was a perfect fit to our needs of real time monitoring. Its powerful API, easy administration and deploy, enabled us to rapidly build solutions to monitor presidential elections, several major events and currently it is the processing core of our new product "Socialmetrix Eventia".
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://needium.com/">Needium</a>
+</td>
+<td>
+<p>
+At Needium we love Ruby and JRuby. The Storm platform offers the right balance between simplicity, flexibility and scalability. We created RedStorm, a Ruby DSL for Storm, to keep on using Ruby on top of the power of Storm by leveraging Storm's JVM foundation with JRuby. We currently use Storm as our Twitter realtime data processing pipeline. We have Storm topologies for content filtering, geolocalisation and classification. Storm allows us to architecture our pipeline for the Twitter full firehose scale.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://parse.ly/">Parse.ly</a>
+</td>
+<td>
+<p>
+Parse.ly is using Storm for its web/content analytics system. We have a home-grown data processing and storage system built with Python and Celery, with backend stores in Redis and MongoDB. We are now using Storm for real-time unique visitor counting and are exploring options for using it for some of our richer data sources such as social share data and semantic content metadata.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.parc.com/">PARC</a>
+</td>
+<td>
+<p>
+High Performance Graph Analytics & Real-time Insights Research team at PARC uses Storm as one of the building blocks of their PARC Analytics Cloud infrastructure which comprises of Nebula based Openstack, Hadoop, SAP HANA, Storm, PARC Graph Analytics, and machine learning toolbox to enable researchers to process real-time data feeds from Sensors, web, network, social media, and security traces and easily ingest any other real-time data feeds of interest for PARC researchers.
+</p>
+<p>
+PARC researchers are working with number of industry collaborators developing new tools, algorithms, and models to analyze massive amounts of e-commerce, web clickstreams, 3rd party syndicated data, cohort data, social media data streams, and structured data from RDBMS, NOSQL, and NEWSQL systems in near real-time. PARC  team is developing a reference architecture and benchmarks for their near real-time automated insight discovery platform combining the power of all above tools and PARC’s applied research in machine learning, graph analytics, reasoning, clustering, and contextual recommendations. The High Performance Graph Analytics & Real-time Insights research at PARC is headed by Surendra Reddy<http://www.linkedin.com/in/skreddy>.  If you are interested to learn more about our use/experience of using Storm or to know more about our research or to collaborate with PARC in this area, please feel free to contact sureddy@parc.com.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://gumgum.com/">GumGum</a>
+</td>
+<td>
+<p>
+GumGum, the leading in-image advertising platform for publishers and brands, uses Storm to produce real-time data. Storm and Trident-based topologies consume various ad-related events from Kafka and persist the aggregations in MySQL and HBase. This architecture will eventually replace most existing daily Hadoop map reduce jobs. There are also plans for Kafka + Storm to replace existing distributed queue processing infrastructure built with Amazon SQS.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.crowdflower.com/">CrowdFlower</a>
+</td>
+<td>
+<p>
+CrowdFlower is using Storm with Kafka to generalize our data stream
+aggregation and realtime computation infrastructure. We replaced our
+homegrown aggregation solutions with Storm because it simplified the
+creation of fault tolerant systems. We were already using Zookeeper
+and Kafka, so Storm allowed us to build more generic abstractions for
+our analytics using tools that we had already deployed and
+battle-tested in production.
+</p>
+
+<p>
+We are currently writing to DynamoDB from Storm, so we are able to
+scale our capacity quickly by bringing up additional supervisors and
+tweaking the throughput on our Dynamo tables. We look forward to
+exploring other uses for Storm in our system, especially with the
+recent release of Trident.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.dsbox.com">Digital Sandbox</a>
+</td>
+<td>
+<p>
+At Digital Sandbox we use Storm to enable our open source information feed monitoring system.  The system uses Storm to constantly monitor and pull data from structured and unstructured information sources across the internet.  For each found item, our topology applies natural language processing based concept analysis, temporal analysis, geospatial analytics and a prioritization algorithm to enable users to monitor large special events, public safety operations, and topics of interest to a multitude of individual users and teams.
+</p>
+ 
+<p>
+Our system is built using Storm for feed retrieval and annotation, Python with Flask and jQuery for business logic and web interfaces, and MongoDB for data persistence. We use NTLK for natural language processing and the WordNet, GeoNames, and OpenStreetMap databases to enable feed item concept extraction and geolocation.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://hallo.me/">Hallo</a>
+</td>
+<td>
+With several mainstream celebrities and very popular YouTubers using Hallo to communicate with their fans, we needed a good solution to notify users via push notifications and make sure that the celebrity messages were delivered to follower timelines in near realtime. Our initial approach for broadcast push notifications would take anywhere from 2-3 hours. After re-engineering our solution on top of Storm, that time has been cut down to 5 minutes on a very small cluster. With the user base growing and user need for realtime communication, we are very happy knowing that we can easily scale Storm by adding nodes to maintain a baseline QoS for our users.
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://keepcon.com/">Keepcon</a>
+</td>
+<td>
+We provide moderation services for classifieds, kids communities, newspapers, chat rooms, facebook fan pages, youtube channels, reviews, and all kind of UGC. We use storm for the integration with our clients, find evidences within each text, persisting on cassandra and elastic search and sending results back to our clients.
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.visiblemeasures.com/">Visible Measures</a>
+</td>
+<td>
+<p>
+Visible Measures powers video campaigns and analytics for publishers and
+advertisers, tracking data for hundreds of million of videos, and billions
+of views. We are using Storm to process viewing behavior data in real time and make
+the information immediately available to our customers. We read events from
+various push and pull sources, including a Kestrel queue, filter and
+enrich the events in Storm topologies, and persist the events to Redis,
+HDFS and Vertica for real-time analytics and archiving. We are currently
+experimenting with Trident topologies, and figuring out how to move more
+of our Hadoop-based batch processing into Storm.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.o2mc.eu/en/">O2mc</a>
+</td>
+<td>
+<p>
+One of the core products of O2mc is called O2mc Community. O2mc Community performs multilingual, realtime sentiment analysis with very low latency and distributes the analyzed results to numerous clients. The input is extracted from source systems like Twitter, Facebook, e-mail and many more. After the analysis has taken place on Storm, the results are streamed to any output system ranging from HTTP streaming to clients to direct database insertion to an external business process engine to kickstart a process.</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.theladders.com">The Ladders</a>
+</td>
+<td>
+<p>
+TheLadders has been committed to finding the right person for the right job since 2003. We're using Storm in a variety of ways and are happy with its versatility, robustness, and ease of development. We use Storm in conjunction with RabbitMQ for such things as sending hiring alerts: when a recruiter submits a job to our site, Storm processes that event and will aggregate jobseekers whose profiles match the position. That list is subsequently batch-processed to send an email to the list of jobseekers. We also use Storm to persist events for Business Intelligence and internal event tracking. We're continuing to find uses for Storm where fast, asynchronous, real-time event processing is a must.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://semlab.nl">SemLab</a>
+</td>
+<td>
+<p>
+SemLab develops software for knowledge discovery and information support. Our ViewerPro platform uses information extraction, natural language processing and semantic web technologies to extract structured data from unstructured sources, in domains such as financial news feeds and legal documents. We have succesfully adapted ViewerPro's processing framework to run on top of Storm. The transition to Storm has made ViewerPro a much more scalable product, allowing us to process more in less time.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://visualrevenue.com/">Visual Revenue</a>
+</td>
+<td>
+<p>
+Here at Visual Revenue, we built a decision support system to help online editors to make choices on what, when, and where to promote their content in real-time. Storm is the backbone our real-time data processing and aggregation pipelines.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.peerindex.com/">PeerIndex</a>
+</td>
+<td>
+<p>
+PeerIndex is working to deliver influence at scale. PeerIndex does this by exposing services built on top of our Influence Graph; a directed graph of who is influencing whom on the web. PeerIndex gathers data from a number of social networks to create the Influence Graph. We use Storm to process our social data, to provide real-time aggregations, and to crawl the web, before storing our data in a manner most suitable for our Hadoop based systems to batch process. Storm provided us with an intuitive API and has slotted in well with the rest of our architecture. PeerIndex looks forward to further investing resources into our Storm based real-time analytics.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://ants.vn">ANTS.VN</a>
+</td>
+<td>
+<p>
+Big Data in Advertising is Vietnam's unique platform combines ad serving, a real-time bidding (RTB) exchange, Ad Server, Analytics, yield optimization, and content valuation to deliver the highest revenue across every desktop, tablet, and mobile screen. At ANTS.VN we use Storm to process large amounts of data to provide data real time, improve our Ad quality. This platform tracks impressions, clicks, conversions, bid requests etc. in real time. Together with Kafka, Redis, memcached and Cassandra based messaging, Storm enables us to build low-latency fault-tolerant distributed systems with ease.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.wayfair.com">Wayfair</a>
+</td>
+<td>
+<p>
+At Wayfair, we use storm as a platform to drive our core order processing pipeline as an event driven system. Storm allows us to reliably process tens of thousands of orders daily while providing us the assurance of seamless process scalability as our order load increases. Given the project’s ease of use and the immense support of the community, we’ve managed to implement our bolts in php, construct a simple puppet module for configuration management, and quickly solve arising issues. We can now focus most of our development efforts in the business layer, check out more information on how we use storm <a href="http://engineering.wayfair.com/stormin-oms/">in our engineering blog</a>. </p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://innoquant.com/">InnoQuant</a>
+</td>
+<td>
+<p>
+At InnoQuant, we use Storm as a backbone of our real-time big data analytics engine in MOCA platform. MOCA is a next generation, mobile-backend-as-a-service platform (MBaaS). It provides brands and app developers with real-time in-app tracking, context-aware push messaging, user micro-segmentation based on profile, time and geo-context as well as big data analytics. Storm-based pipeline is fed with events captured by native mobile SDKs (iOS, Android), scales nicely with connected mobile app users, delivers stream-based metrics and aggregations, and finally integrates with the rest of MOCA infrastructure, including columnar storage (Cassandra) and graph storage (Titan).
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://www.fliptop.com/">Fliptop</a>
+</td>
+<td>
+<p>
+Fliptop is a customer intelligence platform which allows customers to integrating their contacts, and campaign data, to enhance their prospect with social identities, and to find their best leads, and most influential customers. We have been using Storm for various tasks which requires scalability and reliability, including integrating with sales/marketing platform, data appending for contacts/leads, and computing scoring of contacts/leads. It's one of our most robust and scalable infrastructure.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.trovit.com/">Trovit</a>
+</td>
+<td>
+<p>
+Trovit is a search engine for classified ads present in 39 countries and different business categories (Real Estate, Cars, Jobs, Rentals, Products and Deals). Currently we use Storm to process and index ads in a distributed and low latency fashion. Combined with other technologies like Hadoop, Hbase and Solr has allowed us to build a scalable and low latency platform to serve search results to the end user.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.openx.com/">OpenX</a>
+</td>
+<td>
+<p>
+OpenX is a unique platform combines ad serving, a real-time bidding (RTB) exchange, yield optimization, and content valuation to deliver the highest revenue across every desktop, tablet, and mobile screen
+At OpenX we use Storm to process large amounts of data to provide real time Analytics. Storm provides us to process data real time to improve our Ad quality.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://keen.io/">Keen IO</a>
+</td>
+<td>
+<p>
+Keen IO is an analytics backend-as-a-service. The Keen IO API makes it easy for customers to do internal analytics or expose analytics features to their customers. Keen IO uses Storm (DRPC) to query billion-event data sets at very low latencies. We also use Storm to control our ingestion pipeline, sourcing data from Kafka and storing it in Cassandra.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://liveperson.com/">LivePerson</a>
+</td>
+<td>
+<p>
+LivePerson is a provider of Interaction-Service over the web. Interaction between an agent and a visitor in site can be achieved using phone call, chat, banners, etc.Using Storm, LivePerson can collect and process visitor data and provide information in real time to the agents about the visitor behavior. Moreover, LivePerson gets to better decisions about how to react to visitors in a way that best addresses their needs.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://yieldbot.com/">YieldBot</a>
+</td>
+<td>
+<p>
+Yieldbot connects ads to the real-time consumer intent streaming within premium publishers. To do this, Yieldbot leverages Storm for a wide variety of real-time processing tasks. We've open sourced our clojure DSL for writing trident topologies, marceline, which we use extensively. Events are read from Kafka, most state is stored in Cassandra, and we heavily use Storm's DRPC features. Our Storm use cases range from HTML processing, to hotness-style trending, to probabilistic rankings and cardinalities. Storm topologies touch virtually all of the events generated by the Yieldbot platform.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://equinix.com/">Equinix</a>
+</td>
+<td>
+<p>
+At Equinix, we use a number of Storm topologies to process and persist various data streams generated by sensors in our data centers. We also use Storm for real-time monitoring of different infrastructure components. Other few topologies are used for processing logs in real-time for internal IT systems  which also provide insights in user behavior.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://minewhat.com/">MineWhat</a>
+</td>
+<td>
+<p>
+MineWhat provides actionable analytics for ecommerce spanning every SKU,brand and category in the store. We use Storm to process raw click stream ingestion from Kafka and compute live analytics. Storm topologies powers our complex product to user interaction analysis. Multi language feature in storm is really kick-ass, we have bolts written in Node.js, Python and Ruby. Storm has been in our production site since Nov 2012.
+</p>
+</td>
+</tr>
+
+
+<tr>
+<td>
+<a href="http://www.360.cn/">Qihoo 360</a>
+</td>
+<td>
+<p>
+360 have deployed about 50 realtime applications on top of storm including web page analysis, log processing, image processing, voice processing, etc.
+</p>
+<p>
+The use case of storm at 360 is a bit special since we deployed storm on thounds of servers which are not dedicated for storm. Storm just use little cpu/memory/network resource on each server. However theses storm clusters leverage idle resources of servers at nearly zero cost to provide great computing power and it's realtime. It's amazing.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.holidaycheck.com/">HolidayCheck</a>
+</td>
+<td>
+<p>
+HolidayCheck is an online travel site and agency available in 10
+languages worldwide visited by 30 million people a month.
+We use Storm to deliver real-time hotel and holiday package offers
+from multiple providers - reservation systems and affiliate travel
+networks - in a low latency fashion based on user-selected criteria.
+In further reservation steps we use DRPC for vacancy checks and
+bookings of chosen offers. Along with Storm in the system for offers
+delivery we use Scala, Akka, Hazelcast, Drools and MongoDB. Real-time
+offer stream is delivered outside of the system back to the front-end
+via websocket connections.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://dataminelab.com/">DataMine Lab</a>
+</td>
+<td>
+<p>
+DataMine Lab is a consulting company integrating Storm into its
+portfolio of technologies. Storm powers range of our customers'
+systems allowing us to build real time analytics on tens of millions
+of visitors to the advertising platforms we helped to create. Together
+with Redis, Cassandra and Hadoop, Storm allows us to provide real-time
+distributed data platform at a global scale.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.wizecommerce.com/">Wize Commerce</a>
+</td>
+<td>
+<p>
+Wize Commerce® is the smartest way to grow your digital business. For over ten years, we have been helping clients maximize their revenue and traffic using optimization technologies that operate at massive scale, and across digital ecosystems. We own and operate leading comparison shopping engines including Nextag®, PriceMachineTM, and <a href="http://guenstiger.de">guenstiger.de</a>, and provide services to a wide ecosystem of partner sites that use our e-commerce platform. These sites together drive over $1B in annual merchant sales.
+</p>
+<p>
+We use storm to power our core platform infrastructure and it has become a vital component of our search indexing system & Cassandra storage. Along with KAFKA, STORM has reduced our end-to-end latencies from several hours to few minutes, and being largest comparison shopping sites operator, pushing price updates to the live site is very important and storm helps a lot achieve the same. We are extensively using storm in production since Q1 2013.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://metamarkets.com">Metamarkets</a>
+</td>
+<td>
+<p>At Metamarkets, Apache Storm is used to process real-time event data streamed from Apache Kafka message brokers, and then to load that data into a <a href="http://druid.io">Druid cluster</a>, the low-latency data store at the heart of our real-time analytics service. Our Storm topologies perform various operations, ranging from simple filtering of "outdated" events, to transformations such as ID-to-name lookups, to complex multi-stream joins. Since our service is intended to respond to ad-hoc queries within seconds of ingesting events, the speed, flexibility, and robustness of those topologies make Storm a key piece of our real-time stack.</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.mightytravels.com">Mighty Travels</a>
+</td>
+<td>
+<p>We are using Storm to process real-time search data stream and
+application logs. The part we like best about Storm is the ease of
+scaling up basically just by throwing more machines at it.</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.polecat.co">Polecat</a>
+</td>
+<td>
+<p>Polecat's digital analyisis platform, MeaningMine, allows users to search all on-line news, blogs and social media in real-time and run bespoke analysis in order to inform corporate strategy and decision making for some of the world largest companies and governmental organisations.</p>
+<p>
+Polecat uses Storm to run an application we've called the 'Data Munger'.  We run many different topologies on a multi host storm cluster to process tens of millions of online articles and posts that we collect each day.  Storm handles our analysis of these documents so that we can provide insight on realtime data to our clients.  We output our results from Storm into one of many large Apache Solr clusters for our end user applications to query (Polecat is also a contributor to Solr).  We first starting developing our app to run on storm back in June 2012 and it has been live since roughly September 2012.  We've found Storm to be an excellent fit for our needs here, and we've always found it extremely robust and fast.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="https://www.skylight.io/">Skylight by Tilde</a>
+</td>
+<td>
+<p>Skylight is a production profiler for Ruby on Rails apps that focuses on providing detailed information about your running application that you can explore in an intuitive way. We use Storm to process traces from our agent into data structures that we can slice and dice for you in our web app.</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.ad4game.com/">Ad4Game</a>
+</td>
+<td>
+<p>We are an advertising network and we use Storm to calculate priorities in real time to know which ads to show for which website, visitor and country.</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.impetus.com/">Impetus Technologies</a>
+</td>
+<td>
+<p>StreamAnalytix, a product of Impetus Technologies enables enterprises to analyze and respond to events in real-time at Big Data scale. Based on Apache Storm, StreamAnalytix is designed to rapidly build and deploy streaming analytics applications for any industry vertical, any data format, and any use case. This high-performance scalable platform comes with a pre-integrated package of components like Cassandra, Storm, Kafka and more. In addition, it also brings together the proven open source technology stack with Hadoop and NoSQL to provide massive scalability, dynamic data pipelines, and a visual designer for rapid application development.</p>
+<p>
+Through StreamAnalytix, the users can ingest, store and analyze millions of events per second and discover exceptions, patterns, and trends through live dashboards. It also provides seamless integration with indexing store (ElasticSearch) and NoSQL database (HBase, Cassandra, and Oracle NoSQL) for writing data in real-time. With the use of Storm, the product delivers high business value solutions such as log analytics, streaming ETL, deep social listening, Real-time marketing, business process acceleration and predictive maintenance.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.akazoo.com/en">Akazoo</a>
+</td>
+<td>
+<p>
+Akazoo is a platform providing music streaming services.  Storm is the backbone of all our real-time analytical processing. We use it for tracking and analyzing application events and for various other stuff, including recommendations and parallel task execution.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.mapillary.com">Mapillary</a>
+</td>
+<td>
+<p>
+At Mapillary we use storm for a wide variety of tasks. Having a system which is 100% based on kafka input storm and trident makes reasoning about our data a breeze.  
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.gutscheinrausch.de/">Gutscheinrausch.de</a>
+</td>
+<td>
+<p>
+We recently upgraded our existing IT infrastructure, using Storm as one of our main tools.
+Each day we collect sales, clicks, visits and various ecommerce metrics from various different systems (webpages, affiliate reportings, networks, tracking-scripts etc). We process this continually generated data using Storm before entering it into the backend systems for further use.
+</p>
+<p>
+Using Storm we were able to decouple our heterogeneous frontend-systems from our backends and take load off the data warehouse applications by inputting pre-processed data. This way we can easy collect and process all data and then do realtime OLAP queries using our propietary data warehouse technology.
+</p>
+<p>
+We are mostly impressed by the high speed, low maintenance approach Storm has provided us with. Also being able to easily scale up the system using more machines is a big plus. Since we're a small team it allows us to focus more on our core business instead of the underlying technology. You could say it has taken our hearts by storm!
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.appriver.com">AppRiver</a>
+</td>
+<td>
+<p>
+We are using Storm to track internet threats from varied sources around the web.  It is always fast and reliable.
+</p>
+</td>
+</tr>
+
+<tr>
+<td>
+<a href="http://www.mercadolibre.com/">MercadoLibre</a>
+</td>
+<td>
+</td>
+</tr>
+
+
+</table>

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Project-ideas.md
----------------------------------------------------------------------
diff --git a/docs/Project-ideas.md b/docs/Project-ideas.md
new file mode 100644
index 0000000..aa022ea
--- /dev/null
+++ b/docs/Project-ideas.md
@@ -0,0 +1,6 @@
+---
+layout: documentation
+---
+ * **DSLs for non-JVM languages:** These DSL's should be all-inclusive and not require any Java for the creation of topologies, spouts, or bolts. Since topologies are [Thrift](http://thrift.apache.org/) structs, Nimbus is a Thrift service, and bolts can be written in any language, this is possible.
+ * **Online machine learning algorithms:** Something like [Mahout](http://mahout.apache.org/) but for online algorithms
+ * **Suite of performance benchmarks:** These benchmarks should test Storm's performance on CPU and IO intensive workloads. There should be benchmarks for different classes of applications, such as stream processing (where throughput is the priority) and distributed RPC (where latency is the priority). 

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/README.md
----------------------------------------------------------------------
diff --git a/docs/README.md b/docs/README.md
index c8ec761..b26d3ff 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,12 +1,13 @@
 # Apache Storm Website and Documentation
-This is the source for the Storm website and documentation. It is statically generated using [jekyll](http://jekyllrb.com).
+This is the source for the Release specific part of the Apache Storm website and documentation. It is statically generated using [jekyll](http://jekyllrb.com).
 
 ## Generate Javadoc
 
 You have to generate javadoc on project root before generating document site.
 
 ```
-mvn clean install -Pdist # you may skip tests with `-DskipTests=true` to save time
+mvn javadoc:javadoc
+mvn javadoc:aggregate -DreportOutputDirectory=./docs/ -DdestDir=javadocs
 ```
 
 You need to create distribution package with gpg certificate. Please refer [here](https://github.com/apache/storm/blob/master/DEVELOPER.md#packaging).
@@ -30,18 +31,31 @@ Point your browser to http://localhost:4000
 
 By default, jekyll will generate the site in a `_site` directory.
 
+This will only show the portion of the documentation that is specific to this release.
 
-## Publishing the Website
-In order to publish the website, you must have committer access to Storm's subversion repository.
+## Adding a new release to the website
+In order to add a new relase, you must have committer access to Storm's subversion repository at https://svn.apache.org/repos/asf/storm/site.
 
-The Storm website is published using Apache svnpubsub. Any changes committed to subversion will be automatically published to storm.apache.org.
-
-To publish changes, tell jekyll to generate the site in the `publish` directory of subversion, then commit the changes:
+Release documentation is placed under the releases directory named after the release version.  Most metadata about the release will be generated automatically from the name using a jekyll plugin.  Or by plaing them in the _data/releases.yml file.
 
+To create a new release run the following from the main git directory
 
 ```
+mvn javadoc:javadoc
+mvn javadoc:aggregate -DreportOutputDirectory=./docs/ -DdestDir=javadocs
 cd docs
-jekyll build -d /path/to/svn/repo/publish
-cd /path/to/svn/repo/publish
+mkdir ${path_to_svn}/releases/${release_name}
+cp -r *.md images/ javadocs/ ${path_to_svn}/releases/${release_name}
+cd ${path_to_svn}
+svn add releases/${release_name}
+svn commit
+```
+
+to publish a new release run
+
+```
+cd ${path_to_svn}
+jekyll build -d publish/
+svn add publish/ #Add any new files
 svn commit
 ```

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Rationale.md
----------------------------------------------------------------------
diff --git a/docs/Rationale.md b/docs/Rationale.md
new file mode 100644
index 0000000..214266e
--- /dev/null
+++ b/docs/Rationale.md
@@ -0,0 +1,31 @@
+---
+layout: documentation
+---
+The past decade has seen a revolution in data processing. MapReduce, Hadoop, and related technologies have made it possible to store and process data at scales previously unthinkable. Unfortunately, these data processing technologies are not realtime systems, nor are they meant to be. There's no hack that will turn Hadoop into a realtime system; realtime data processing has a fundamentally different set of requirements than batch processing.
+
+However, realtime data processing at massive scale is becoming more and more of a requirement for businesses. The lack of a "Hadoop of realtime" has become the biggest hole in the data processing ecosystem.
+
+Storm fills that hole.
+
+Before Storm, you would typically have to manually build a network of queues and workers to do realtime processing. Workers would process messages off a queue, update databases, and send new messages to other queues for further processing. Unfortunately, this approach has serious limitations:
+
+1. **Tedious**: You spend most of your development time configuring where to send messages, deploying workers, and deploying intermediate queues. The realtime processing logic that you care about corresponds to a relatively small percentage of your codebase.
+2. **Brittle**: There's little fault-tolerance. You're responsible for keeping each worker and queue up.
+3. **Painful to scale**: When the message throughput get too high for a single worker or queue, you need to partition how the data is spread around. You need to reconfigure the other workers to know the new locations to send messages. This introduces moving parts and new pieces that can fail.
+
+Although the queues and workers paradigm breaks down for large numbers of messages, message processing is clearly the fundamental paradigm for realtime computation. The question is: how do you do it in a way that doesn't lose data, scales to huge volumes of messages, and is dead-simple to use and operate?
+
+Storm satisfies these goals. 
+
+## Why Storm is important
+
+Storm exposes a set of primitives for doing realtime computation. Like how MapReduce greatly eases the writing of parallel batch processing, Storm's primitives greatly ease the writing of parallel realtime computation.
+
+The key properties of Storm are:
+
+1. **Extremely broad set of use cases**: Storm can be used for processing messages and updating databases (stream processing), doing a continuous query on data streams and streaming the results into clients (continuous computation), parallelizing an intense query like a search query on the fly (distributed RPC), and more. Storm's small set of primitives satisfy a stunning number of use cases.
+2. **Scalable**: Storm scales to massive numbers of messages per second. To scale a topology, all you have to do is add machines and increase the parallelism settings of the topology. As an example of Storm's scale, one of Storm's initial applications processed 1,000,000 messages per second on a 10 node cluster, including hundreds of database calls per second as part of the topology. Storm's usage of Zookeeper for cluster coordination makes it scale to much larger cluster sizes.
+3. **Guarantees no data loss**: A realtime system must have strong guarantees about data being successfully processed. A system that drops data has a very limited set of use cases. Storm guarantees that every message will be processed, and this is in direct contrast with other systems like S4. 
+4. **Extremely robust**: Unlike systems like Hadoop, which are notorious for being difficult to manage, Storm clusters just work. It is an explicit goal of the Storm project to make the user experience of managing Storm clusters as painless as possible.
+5. **Fault-tolerant**: If there are faults during execution of your computation, Storm will reassign tasks as necessary. Storm makes sure that a computation can run forever (or until you kill the computation).
+6. **Programming language agnostic**: Robust and scalable realtime processing shouldn't be limited to a single platform. Storm topologies and processing components can be defined in any language, making Storm accessible to nearly anyone.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Running-topologies-on-a-production-cluster.md
----------------------------------------------------------------------
diff --git a/docs/Running-topologies-on-a-production-cluster.md b/docs/Running-topologies-on-a-production-cluster.md
new file mode 100644
index 0000000..248c929
--- /dev/null
+++ b/docs/Running-topologies-on-a-production-cluster.md
@@ -0,0 +1,75 @@
+---
+layout: documentation
+---
+Running topologies on a production cluster is similar to running in [Local mode](Local-mode.html). Here are the steps:
+
+1) Define the topology (Use [TopologyBuilder](javadocs/backtype/storm/topology/TopologyBuilder.html) if defining using Java)
+
+2) Use [StormSubmitter](javadocs/backtype/storm/StormSubmitter.html) to submit the topology to the cluster. `StormSubmitter` takes as input the name of the topology, a configuration for the topology, and the topology itself. For example:
+
+```java
+Config conf = new Config();
+conf.setNumWorkers(20);
+conf.setMaxSpoutPending(5000);
+StormSubmitter.submitTopology("mytopology", conf, topology);
+```
+
+3) Create a jar containing your code and all the dependencies of your code (except for Storm -- the Storm jars will be added to the classpath on the worker nodes).
+
+If you're using Maven, the [Maven Assembly Plugin](http://maven.apache.org/plugins/maven-assembly-plugin/) can do the packaging for you. Just add this to your pom.xml:
+
+```xml
+  <plugin>
+    <artifactId>maven-assembly-plugin</artifactId>
+    <configuration>
+      <descriptorRefs>  
+        <descriptorRef>jar-with-dependencies</descriptorRef>
+      </descriptorRefs>
+      <archive>
+        <manifest>
+          <mainClass>com.path.to.main.Class</mainClass>
+        </manifest>
+      </archive>
+    </configuration>
+  </plugin>
+```
+Then run mvn assembly:assembly to get an appropriately packaged jar. Make sure you [exclude](http://maven.apache.org/plugins/maven-assembly-plugin/examples/single/including-and-excluding-artifacts.html) the Storm jars since the cluster already has Storm on the classpath.
+
+4) Submit the topology to the cluster using the `storm` client, specifying the path to your jar, the classname to run, and any arguments it will use:
+
+`storm jar path/to/allmycode.jar org.me.MyTopology arg1 arg2 arg3`
+
+`storm jar` will submit the jar to the cluster and configure the `StormSubmitter` class to talk to the right cluster. In this example, after uploading the jar `storm jar` calls the main function on `org.me.MyTopology` with the arguments "arg1", "arg2", and "arg3".
+
+You can find out how to configure your `storm` client to talk to a Storm cluster on [Setting up development environment](Setting-up-development-environment.html).
+
+### Common configurations
+
+There are a variety of configurations you can set per topology. A list of all the configurations you can set can be found [here](javadocs/backtype/storm/Config.html). The ones prefixed with "TOPOLOGY" can be overridden on a topology-specific basis (the other ones are cluster configurations and cannot be overridden). Here are some common ones that are set for a topology:
+
+1. **Config.TOPOLOGY_WORKERS**: This sets the number of worker processes to use to execute the topology. For example, if you set this to 25, there will be 25 Java processes across the cluster executing all the tasks. If you had a combined 150 parallelism across all components in the topology, each worker process will have 6 tasks running within it as threads.
+2. **Config.TOPOLOGY_ACKERS**: This sets the number of tasks that will track tuple trees and detect when a spout tuple has been fully processed. Ackers are an integral part of Storm's reliability model and you can read more about them on [Guaranteeing message processing](Guaranteeing-message-processing.html).
+3. **Config.TOPOLOGY_MAX_SPOUT_PENDING**: This sets the maximum number of spout tuples that can be pending on a single spout task at once (pending means the tuple has not been acked or failed yet). It is highly recommended you set this config to prevent queue explosion.
+4. **Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS**: This is the maximum amount of time a spout tuple has to be fully completed before it is considered failed. This value defaults to 30 seconds, which is sufficient for most topologies. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for more information on how Storm's reliability model works.
+5. **Config.TOPOLOGY_SERIALIZATIONS**: You can register more serializers to Storm using this config so that you can use custom types within tuples.
+
+
+### Killing a topology
+
+To kill a topology, simply run:
+
+`storm kill {stormname}`
+
+Give the same name to `storm kill` as you used when submitting the topology.
+
+Storm won't kill the topology immediately. Instead, it deactivates all the spouts so that they don't emit any more tuples, and then Storm waits Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS seconds before destroying all the workers. This gives the topology enough time to complete any tuples it was processing when it got killed.
+
+### Updating a running topology
+
+To update a running topology, the only option currently is to kill the current topology and resubmit a new one. A planned feature is to implement a `storm swap` command that swaps a running topology with a new one, ensuring minimal downtime and no chance of both topologies processing tuples at the same time. 
+
+### Monitoring topologies
+
+The best place to monitor a topology is using the Storm UI. The Storm UI provides information about errors happening in tasks and fine-grained stats on the throughput and latency performance of each component of each running topology.
+
+You can also look at the worker logs on the cluster machines.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/SECURITY.md
----------------------------------------------------------------------
diff --git a/docs/SECURITY.md b/docs/SECURITY.md
new file mode 100644
index 0000000..495061a
--- /dev/null
+++ b/docs/SECURITY.md
@@ -0,0 +1,79 @@
+---
+title: Running Apache Storm Securely
+layout: documentation
+documentation: true
+---
+# Running Apache Storm Securely
+
+The current release of Apache Storm offers no authentication or authorization.
+It does not encrypt any data being sent across the network, and does not 
+attempt to restrict access to data stored on the local file system or in
+Apache Zookeeper.  As such there are a number of different precautions you may
+want to enact outside of storm itself to be sure storm is running securely.
+
+The exact detail of how to setup these precautions varies a lot and is beyond
+the scope of this document.
+
+## Network Security
+
+It is generally a good idea to enable a firewall and restrict incoming network
+connections to only those originating from the cluster itself and from trusted
+hosts and services, a complete list of ports storm uses are below. 
+
+If the data your cluster is processing is sensitive it might be best to setup
+IPsec to encrypt all traffic being sent between the hosts in the cluster.
+
+### Ports
+
+| Default Port | Storm Config | Client Hosts/Processes | Server |
+|--------------|--------------|------------------------|--------|
+| 2181 | `storm.zookeeper.port` | Nimbus, Supervisors, and Worker processes | Zookeeper |
+| 6627 | `nimbus.thrift.port` | Storm clients, Supervisors, and UI | Nimbus |
+| 8080 | `ui.port` | Client Web Browsers | UI |
+| 8000 | `logviewer.port` | Client Web Browsers | Logviewer |
+| 3772 | `drpc.port` | External DRPC Clients | DRPC |
+| 3773 | `drpc.invocations.port` | Worker Processes | DRPC |
+| 670{0,1,2,3} | `supervisor.slots.ports` | Worker Processes | Worker Processes |
+
+### UI/Logviewer
+
+The UI and logviewer processes provide a way to not only see what a cluster is
+doing, but also manipulate running topologies.  In general these processes should
+not be exposed except to users of the cluster.  It is often simplest to restrict
+these ports to only accept connections from local hosts, and then front them with another web server,
+like Apache httpd, that can authenticate/authorize incoming connections and
+proxy the connection to the storm process.  To make this work the ui process must have
+logviewer.port set to the port of the proxy in its storm.yaml, while the logviewers
+must have it set to the actual port that they are going to bind to.
+
+### Nimbus
+
+Nimbus's Thrift port should be locked down as it can be used to control the entire
+cluster including running arbitrary user code on different nodes in the cluster.
+Ideally access to it is restricted to nodes within the cluster and possibly some gateway
+nodes that allow authorized users to log into them and run storm client commands.
+
+### DRPC
+
+Each DRPC server has two different ports.  The invocations port is accessed by worker
+processes within the cluster.  The other port is accessed by external clients that
+want to query the topology.  The external port should be restricted to hosts that you
+want to be able to do queries.
+
+### Supervisors
+
+Supervisors are only clients they are not servers, and as such don't need special restrictions.
+
+### Workers
+
+Worker processes receive data from each other.  There is the option to encrypt this data using
+Blowfish by setting `topology.tuple.serializer` to `backtype.storm.security.serialization.BlowfishTupleSerializer`
+and setting `topology.tuple.serializer.blowfish.key` to a secret key you want your topology to use.
+
+### Zookeeper
+
+Zookeeper uses other ports for communications within the ensemble the details of which
+are beyond the scope of this document.  You should look at restricting Zookeeper access
+as well, because storm does not set up any ACLs for the data it write to Zookeeper.
+
+  


[16/16] storm git commit: STORM-1617: 0.10.x release docs

Posted by bo...@apache.org.
STORM-1617: 0.10.x release docs


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/7e959832
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/7e959832
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/7e959832

Branch: refs/heads/0.10.x-branch
Commit: 7e95983258c3112eacfa76895784372cd2cfa5e0
Parents: cf0cdbb
Author: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Authored: Sat Mar 19 12:33:31 2016 -0500
Committer: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Committed: Sat Mar 19 12:33:31 2016 -0500

----------------------------------------------------------------------
 docs/Clojure-DSL.md                             |   8 +-
 docs/Command-line-client.md                     |   8 +-
 docs/Common-patterns.md                         |  18 +-
 docs/Concepts.md                                |  26 +-
 docs/Configuration.md                           |   6 +-
 docs/Contributing-to-Storm.md                   |   4 +-
 docs/Creating-a-new-Storm-project.md            |   8 +-
 docs/DSLs-and-multilang-adapters.md             |   5 +-
 docs/Daemon-Fault-Tolerance.md                  |  30 +
 ...Defining-a-non-jvm-language-dsl-for-storm.md |   4 +-
 docs/Distributed-RPC.md                         |   4 +-
 docs/FAQ.md                                     |  14 +-
 docs/Guaranteeing-message-processing.md         |  10 +-
 docs/Hooks.md                                   |   4 +-
 docs/Implementation-docs.md                     |   3 +-
 docs/Kestrel-and-Storm.md                       |   4 +-
 docs/Lifecycle-of-a-topology.md                 |  72 +-
 docs/Local-mode.md                              |   2 +
 docs/Maven.md                                   |  52 +-
 docs/Message-passing-implementation.md          |  36 +-
 docs/Metrics.md                                 |  20 +-
 docs/Multilang-protocol.md                      |  76 +-
 docs/Rationale.md                               |   2 +
 ...unning-topologies-on-a-production-cluster.md |   4 +-
 docs/SECURITY.md                                | 459 +++++++++-
 docs/STORM-UI-REST-API.md                       |   2 +-
 docs/Serialization.md                           |   8 +-
 docs/Setting-up-a-Storm-cluster.md              |  46 +-
 docs/Setting-up-development-environment.md      |  16 +-
 docs/Spout-implementations.md                   |   2 +
 docs/Structure-of-the-codebase.md               |  92 +-
 docs/Support-for-non-java-languages.md          |   2 +
 docs/Transactional-topologies.md                |  14 +-
 docs/Trident-API-Overview.md                    | 228 ++++-
 docs/Trident-spouts.md                          |  10 +-
 docs/Trident-state.md                           |  15 +-
 docs/Trident-tutorial.md                        |   5 +-
 docs/Troubleshooting.md                         |  40 +-
 docs/Tutorial.md                                |  22 +-
 ...nding-the-parallelism-of-a-Storm-topology.md |  30 +-
 docs/Using-non-JVM-languages-with-Storm.md      |   1 +
 docs/flux.md                                    | 835 +++++++++++++++++++
 docs/images/topology.png                        | Bin 64740 -> 23147 bytes
 docs/index.md                                   | 118 ++-
 docs/storm-eventhubs.md                         |  40 +
 docs/storm-hbase.md                             | 241 ++++++
 docs/storm-hdfs.md                              | 368 ++++++++
 docs/storm-hive.md                              | 111 +++
 docs/storm-jdbc.md                              | 285 +++++++
 docs/storm-kafka.md                             | 287 +++++++
 docs/storm-redis.md                             | 258 ++++++
 51 files changed, 3599 insertions(+), 356 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Clojure-DSL.md
----------------------------------------------------------------------
diff --git a/docs/Clojure-DSL.md b/docs/Clojure-DSL.md
index b3109fa..234febe 100644
--- a/docs/Clojure-DSL.md
+++ b/docs/Clojure-DSL.md
@@ -1,7 +1,9 @@
 ---
+title: Clojure DSL
 layout: documentation
+documentation: true
 ---
-Storm comes with a Clojure DSL for defining spouts, bolts, and topologies. The Clojure DSL has access to everything the Java API exposes, so if you're a Clojure user you can code Storm topologies without touching Java at all. The Clojure DSL is defined in the source in the [backtype.storm.clojure](https://github.com/apache/incubator-storm/blob/0.5.3/src/clj/backtype/storm/clojure.clj) namespace.
+Storm comes with a Clojure DSL for defining spouts, bolts, and topologies. The Clojure DSL has access to everything the Java API exposes, so if you're a Clojure user you can code Storm topologies without touching Java at all. The Clojure DSL is defined in the source in the [backtype.storm.clojure]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/clojure.clj) namespace.
 
 This page outlines all the pieces of the Clojure DSL, including:
 
@@ -15,7 +17,7 @@ This page outlines all the pieces of the Clojure DSL, including:
 
 To define a topology, use the `topology` function. `topology` takes in two arguments: a map of "spout specs" and a map of "bolt specs". Each spout and bolt spec wires the code for the component into the topology by specifying things like inputs and parallelism.
 
-Let's take a look at an example topology definition [from the storm-starter project](https://github.com/nathanmarz/storm-starter/blob/master/src/clj/storm/starter/clj/word_count.clj):
+Let's take a look at an example topology definition [from the storm-starter project]({{page.git-blob-base}}/examples/storm-starter/src/clj/storm/starter/clj/word_count.clj):
 
 ```clojure
 (topology
@@ -201,7 +203,7 @@ The signature for `defspout` looks like the following:
 
 If you leave out the option map, it defaults to {:prepare true}. The output declaration for `defspout` has the same syntax as `defbolt`.
 
-Here's an example `defspout` implementation from [storm-starter](https://github.com/nathanmarz/storm-starter/blob/master/src/clj/storm/starter/clj/word_count.clj):
+Here's an example `defspout` implementation from [storm-starter]({{page.git-blob-base}}/examples/storm-starter/src/clj/storm/starter/clj/word_count.clj):
 
 ```clojure
 (defspout sentence-spout ["sentence"]

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Command-line-client.md
----------------------------------------------------------------------
diff --git a/docs/Command-line-client.md b/docs/Command-line-client.md
index 0e645d7..4634467 100644
--- a/docs/Command-line-client.md
+++ b/docs/Command-line-client.md
@@ -1,7 +1,9 @@
 ---
+title: Command Line Client
 layout: documentation
+documentation: true
 ---
-This page describes all the commands that are possible with the "storm" command line client. To learn how to set up your "storm" client to talk to a remote cluster, follow the instructions in [Setting up development environment](Setting-up-a-development-environment.html).
+This page describes all the commands that are possible with the "storm" command line client. To learn how to set up your "storm" client to talk to a remote cluster, follow the instructions in [Setting up development environment](Setting-up-development-environment.html).
 
 These commands are:
 
@@ -45,12 +47,14 @@ Deactivates the specified topology's spouts.
 
 ### rebalance
 
-Syntax: `storm rebalance topology-name [-w wait-time-secs]`
+Syntax: `storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]*`
 
 Sometimes you may wish to spread out where the workers for a topology are running. For example, let's say you have a 10 node cluster running 4 workers per node, and then let's say you add another 10 nodes to the cluster. You may wish to have Storm spread out the workers for the running topology so that each node runs 2 workers. One way to do this is to kill the topology and resubmit it, but Storm provides a "rebalance" command that provides an easier way to do this. 
 
 Rebalance will first deactivate the topology for the duration of the message timeout (overridable with the -w flag) and then redistribute the workers evenly around the cluster. The topology will then return to its previous state of activation (so a deactivated topology will still be deactivated and an activated topology will go back to being activated).
 
+The rebalance command can also be used to change the parallelism of a running topology. Use the -n and -e switches to change the number of workers or number of executors of a component respectively.
+
 ### repl
 
 Syntax: `storm repl`

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Common-patterns.md
----------------------------------------------------------------------
diff --git a/docs/Common-patterns.md b/docs/Common-patterns.md
index 3f8c979..1c97f6d 100644
--- a/docs/Common-patterns.md
+++ b/docs/Common-patterns.md
@@ -1,5 +1,7 @@
 ---
+title: Common Topology Patterns
 layout: documentation
+documentation: true
 ---
 
 This page lists a variety of common patterns in Storm topologies.
@@ -62,14 +64,26 @@ A common continuous computation done on Storm is a "streaming top N" of some sor
 This approach obviously doesn't scale to large streams since the entire stream has to go through one task. A better way to do the computation is to do many top N's in parallel across partitions of the stream, and then merge those top N's together to get the global top N. The pattern looks like this:
 
 ```java
-builder.setBolt("rank", new RankObjects(), parallellism)
+builder.setBolt("rank", new RankObjects(), parallelism)
   .fieldsGrouping("objects", new Fields("value"));
 builder.setBolt("merge", new MergeObjects())
   .globalGrouping("rank");
 ```
 
-This pattern works because of the fields grouping done by the first bolt which gives the partitioning you need for this to be semantically correct. You can see an example of this pattern in storm-starter [here](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/RollingTopWords.java).
+This pattern works because of the fields grouping done by the first bolt which gives the partitioning you need for this to be semantically correct. You can see an example of this pattern in storm-starter [here]({{page.git-blob-base}}/examples/storm-starter/src/jvm/storm/starter/RollingTopWords.java).
 
+If however you have a known skew in the data being processed it can be advantageous to use partialKeyGrouping instead of fieldsGrouping.  This will distribute the load for each key between two downstream bolts instead of a single one.
+
+```java
+builder.setBolt("count", new CountObjects(), parallelism)
+  .partialKeyGrouping("objects", new Fields("value"));
+builder.setBolt("rank" new AggregateCountsAndRank(), parallelism)
+  .fieldsGrouping("count", new Fields("key"))
+builder.setBolt("merge", new MergeRanksObjects())
+  .globalGrouping("rank");
+``` 
+
+The topology needs an extra layer of processing to aggregate the partial counts from the upstream bolts but this only processes aggregated values now so the bolt it is not subject to the load caused by the skewed data. You can see an example of this pattern in storm-starter [here]({{page.git-blob-base}}/examples/storm-starter/src/jvm/storm/starter/SkewedRollingTopWords.java).
 
 ### TimeCacheMap for efficiently keeping a cache of things that have been recently updated
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Concepts.md
----------------------------------------------------------------------
diff --git a/docs/Concepts.md b/docs/Concepts.md
index 33779f2..01dbd11 100644
--- a/docs/Concepts.md
+++ b/docs/Concepts.md
@@ -1,5 +1,7 @@
 ---
+title: Concepts
 layout: documentation
+documentation: true
 ---
 
 This page lists the main concepts of Storm and links to resources where you can find more information. The concepts discussed are:
@@ -35,14 +37,12 @@ Every stream is given an id when declared. Since single-stream spouts and bolts
 * [Tuple](javadocs/backtype/storm/tuple/Tuple.html): streams are composed of tuples
 * [OutputFieldsDeclarer](javadocs/backtype/storm/topology/OutputFieldsDeclarer.html): used to declare streams and their schemas
 * [Serialization](Serialization.html): Information about Storm's dynamic typing of tuples and declaring custom serializations
-* [ISerialization](javadocs/backtype/storm/serialization/ISerialization.html): custom serializers must implement this interface
-* [CONFIG.TOPOLOGY_SERIALIZATIONS](javadocs/backtype/storm/Config.html#TOPOLOGY_SERIALIZATIONS): custom serializers can be registered using this configuration
 
 ### Spouts
 
 A spout is a source of streams in a topology. Generally spouts will read tuples from an external source and emit them into the topology (e.g. a Kestrel queue or the Twitter API). Spouts can either be __reliable__ or __unreliable__. A reliable spout is capable of replaying a tuple if it failed to be processed by Storm, whereas an unreliable spout forgets about the tuple as soon as it is emitted.
 
-Spouts can emit more than one stream. To do so, declare multiple streams using the `declareStream` method of [OutputFieldsDeclarer](javadocs/backtype/storm/topology/OutputFieldsDeclarer.html) and specify the stream to emit to when using the `emit` method on [SpoutOutputCollector](javadocs/backtype/storm/spout/SpoutOutputCollector.html). 
+Spouts can emit more than one stream. To do so, declare multiple streams using the `declareStream` method of [OutputFieldsDeclarer](javadocs/backtype/storm/topology/OutputFieldsDeclarer.html) and specify the stream to emit to when using the `emit` method on [SpoutOutputCollector](javadocs/backtype/storm/spout/SpoutOutputCollector.html).
 
 The main method on spouts is `nextTuple`. `nextTuple` either emits a new tuple into the topology or simply returns if there are no new tuples to emit. It is imperative that `nextTuple` does not block for any spout implementation, because Storm calls all the spout methods on the same thread.
 
@@ -50,7 +50,7 @@ The other main methods on spouts are `ack` and `fail`. These are called when Sto
 
 **Resources:**
 
-* [IRichSpout](javadocs/backtype/storm/topology/IRichSpout.html): this is the interface that spouts must implement. 
+* [IRichSpout](javadocs/backtype/storm/topology/IRichSpout.html): this is the interface that spouts must implement.
 * [Guaranteeing message processing](Guaranteeing-message-processing.html)
 
 ### Bolts
@@ -61,7 +61,7 @@ Bolts can do simple stream transformations. Doing complex stream transformations
 
 Bolts can emit more than one stream. To do so, declare multiple streams using the `declareStream` method of [OutputFieldsDeclarer](javadocs/backtype/storm/topology/OutputFieldsDeclarer.html) and specify the stream to emit to when using the `emit` method on [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html).
 
-When you declare a bolt's input streams, you always subscribe to specific streams of another component. If you want to subscribe to all the streams of another component, you have to subscribe to each one individually. [InputDeclarer](javadocs/backtype/storm/topology/InputDeclarer.html) has syntactic sugar for subscribing to streams declared on the default stream id. Saying `declarer.shuffleGrouping("1")` subscribes to the default stream on component "1" and is equivalent to `declarer.shuffleGrouping("1", DEFAULT_STREAM_ID)`. 
+When you declare a bolt's input streams, you always subscribe to specific streams of another component. If you want to subscribe to all the streams of another component, you have to subscribe to each one individually. [InputDeclarer](javadocs/backtype/storm/topology/InputDeclarer.html) has syntactic sugar for subscribing to streams declared on the default stream id. Saying `declarer.shuffleGrouping("1")` subscribes to the default stream on component "1" and is equivalent to `declarer.shuffleGrouping("1", DEFAULT_STREAM_ID)`.
 
 The main method in bolts is the `execute` method which takes in as input a new tuple. Bolts emit new tuples using the [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html) object. Bolts must call the `ack` method on the `OutputCollector` for every tuple they process so that Storm knows when tuples are completed (and can eventually determine that its safe to ack the original spout tuples). For the common case of processing an input tuple, emitting 0 or more tuples based on that tuple, and then acking the input tuple, Storm provides an [IBasicBolt](javadocs/backtype/storm/topology/IBasicBolt.html) interface which does the acking automatically.
 
@@ -78,21 +78,21 @@ Its perfectly fine to launch new threads in bolts that do processing asynchronou
 
 Part of defining a topology is specifying for each bolt which streams it should receive as input. A stream grouping defines how that stream should be partitioned among the bolt's tasks.
 
-There are seven built-in stream groupings in Storm, and you can implement a custom stream grouping by implementing the [CustomStreamGrouping](javadocs/backtype/storm/grouping/CustomStreamGrouping.html) interface:
+There are eight built-in stream groupings in Storm, and you can implement a custom stream grouping by implementing the [CustomStreamGrouping](javadocs/backtype/storm/grouping/CustomStreamGrouping.html) interface:
 
 1. **Shuffle grouping**: Tuples are randomly distributed across the bolt's tasks in a way such that each bolt is guaranteed to get an equal number of tuples.
 2. **Fields grouping**: The stream is partitioned by the fields specified in the grouping. For example, if the stream is grouped by the "user-id" field, tuples with the same "user-id" will always go to the same task, but tuples with different "user-id"'s may go to different tasks.
-3. **All grouping**: The stream is replicated across all the bolt's tasks. Use this grouping with care.
-4. **Global grouping**: The entire stream goes to a single one of the bolt's tasks. Specifically, it goes to the task with the lowest id.
-5. **None grouping**: This grouping specifies that you don't care how the stream is grouped. Currently, none groupings are equivalent to shuffle groupings. Eventually though, Storm will push down bolts with none groupings to execute in the same thread as the bolt or spout they subscribe from (when possible).
-6. **Direct grouping**: This is a special kind of grouping. A stream grouped this way means that the __producer__ of the tuple decides which task of the consumer will receive this tuple. Direct groupings can only be declared on streams that have been declared as direct streams. Tuples emitted to a direct stream must be emitted using one of the [emitDirect](javadocs/backtype/storm/task/OutputCollector.html#emitDirect(int, int, java.util.List) methods. A bolt can get the task ids of its consumers by either using the provided [TopologyContext](javadocs/backtype/storm/task/TopologyContext.html) or by keeping track of the output of the `emit` method in [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html) (which returns the task ids that the tuple was sent to).  
-7. **Local or shuffle grouping**: If the target bolt has one or more tasks in the same worker process, tuples will be shuffled to just those in-process tasks. Otherwise, this acts like a normal shuffle grouping.
+3. **Partial Key grouping**: The stream is partitioned by the fields specified in the grouping, like the Fields grouping, but are load balanced between two downstream bolts, which provides better utilization of resources when the incoming data is skewed. [This paper](https://melmeric.files.wordpress.com/2014/11/the-power-of-both-choices-practical-load-balancing-for-distributed-stream-processing-engines.pdf) provides a good explanation of how it works and the advantages it provides.
+4. **All grouping**: The stream is replicated across all the bolt's tasks. Use this grouping with care.
+5. **Global grouping**: The entire stream goes to a single one of the bolt's tasks. Specifically, it goes to the task with the lowest id.
+6. **None grouping**: This grouping specifies that you don't care how the stream is grouped. Currently, none groupings are equivalent to shuffle groupings. Eventually though, Storm will push down bolts with none groupings to execute in the same thread as the bolt or spout they subscribe from (when possible).
+7. **Direct grouping**: This is a special kind of grouping. A stream grouped this way means that the __producer__ of the tuple decides which task of the consumer will receive this tuple. Direct groupings can only be declared on streams that have been declared as direct streams. Tuples emitted to a direct stream must be emitted using one of the [emitDirect](javadocs/backtype/storm/task/OutputCollector.html#emitDirect(int, int, java.util.List) methods. A bolt can get the task ids of its consumers by either using the provided [TopologyContext](javadocs/backtype/storm/task/TopologyContext.html) or by keeping track of the output of the `emit` method in [OutputCollector](javadocs/backtype/storm/task/OutputCollector.html) (which returns the task ids that the tuple was sent to).
+8. **Local or shuffle grouping**: If the target bolt has one or more tasks in the same worker process, tuples will be shuffled to just those in-process tasks. Otherwise, this acts like a normal shuffle grouping.
 
 **Resources:**
 
 * [TopologyBuilder](javadocs/backtype/storm/topology/TopologyBuilder.html): use this class to define topologies
 * [InputDeclarer](javadocs/backtype/storm/topology/InputDeclarer.html): this object is returned whenever `setBolt` is called on `TopologyBuilder` and is used for declaring a bolt's input streams and how those streams should be grouped
-* [CoordinatedBolt](javadocs/backtype/storm/task/CoordinatedBolt.html): this bolt is useful for distributed RPC topologies and makes heavy use of direct streams and direct groupings
 
 ### Reliability
 
@@ -104,7 +104,7 @@ This is all explained in much more detail in [Guaranteeing message processing](G
 
 ### Tasks
 
-Each spout or bolt executes as many tasks across the cluster. Each task corresponds to one thread of execution, and stream groupings define how to send tuples from one set of tasks to another set of tasks. You set the parallelism for each spout or bolt in the `setSpout` and `setBolt` methods of [TopologyBuilder](javadocs/backtype/storm/topology/TopologyBuilder.html). 
+Each spout or bolt executes as many tasks across the cluster. Each task corresponds to one thread of execution, and stream groupings define how to send tuples from one set of tasks to another set of tasks. You set the parallelism for each spout or bolt in the `setSpout` and `setBolt` methods of [TopologyBuilder](javadocs/backtype/storm/topology/TopologyBuilder.html).
 
 ### Workers
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Configuration.md
----------------------------------------------------------------------
diff --git a/docs/Configuration.md b/docs/Configuration.md
index 8e8ca77..83f4ef7 100644
--- a/docs/Configuration.md
+++ b/docs/Configuration.md
@@ -1,9 +1,11 @@
 ---
+title: Configuration
 layout: documentation
+documentation: true
 ---
 Storm has a variety of configurations for tweaking the behavior of nimbus, supervisors, and running topologies. Some configurations are system configurations and cannot be modified on a topology by topology basis, whereas other configurations can be modified per topology. 
 
-Every configuration has a default value defined in [defaults.yaml](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml) in the Storm codebase. You can override these configurations by defining a storm.yaml in the classpath of Nimbus and the supervisors. Finally, you can define a topology-specific configuration that you submit along with your topology when using [StormSubmitter](javadocs/backtype/storm/StormSubmitter.html). However, the topology-specific configuration can only override configs prefixed with "TOPOLOGY".
+Every configuration has a default value defined in [defaults.yaml]({{page.git-blob-base}}/conf/defaults.yaml) in the Storm codebase. You can override these configurations by defining a storm.yaml in the classpath of Nimbus and the supervisors. Finally, you can define a topology-specific configuration that you submit along with your topology when using [StormSubmitter](javadocs/backtype/storm/StormSubmitter.html). However, the topology-specific configuration can only override configs prefixed with "TOPOLOGY".
 
 Storm 0.7.0 and onwards lets you override configuration on a per-bolt/per-spout basis. The only configurations that can be overriden this way are:
 
@@ -23,7 +25,7 @@ The preference order for configuration values is defaults.yaml < storm.yaml < to
 **Resources:**
 
 * [Config](javadocs/backtype/storm/Config.html): a listing of all configurations as well as a helper class for creating topology specific configurations
-* [defaults.yaml](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml): the default values for all configurations
+* [defaults.yaml]({{page.git-blob-base}}/conf/defaults.yaml): the default values for all configurations
 * [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html): explains how to create and configure a Storm cluster
 * [Running topologies on a production cluster](Running-topologies-on-a-production-cluster.html): lists useful configurations when running topologies on a cluster
 * [Local mode](Local-mode.html): lists useful configurations when using local mode

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Contributing-to-Storm.md
----------------------------------------------------------------------
diff --git a/docs/Contributing-to-Storm.md b/docs/Contributing-to-Storm.md
index dff23fb..fdc5835 100644
--- a/docs/Contributing-to-Storm.md
+++ b/docs/Contributing-to-Storm.md
@@ -1,5 +1,7 @@
 ---
+title: Contributing
 layout: documentation
+documentation: true
 ---
 
 ### Getting started with contributing
@@ -12,7 +14,7 @@ The [Implementation docs](Implementation-docs.html) section of the wiki gives de
 
 ### Contribution process
 
-Contributions to the Storm codebase should be sent as GitHub pull requests. If there's any problems to the pull request we can iterate on it using GitHub's commenting features.
+Contributions to the Storm codebase should be sent as [GitHub](https://github.com/apache/storm) pull requests. If there's any problems to the pull request we can iterate on it using GitHub's commenting features.
 
 For small patches, feel free to submit pull requests directly for them. For larger contributions, please use the following process. The idea behind this process is to prevent any wasted work and catch design issues early on:
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Creating-a-new-Storm-project.md
----------------------------------------------------------------------
diff --git a/docs/Creating-a-new-Storm-project.md b/docs/Creating-a-new-Storm-project.md
index feb49b8..35ab1eb 100644
--- a/docs/Creating-a-new-Storm-project.md
+++ b/docs/Creating-a-new-Storm-project.md
@@ -1,18 +1,18 @@
 ---
+title: Creating a New Storm Project
 layout: documentation
+documentation: true
 ---
 This page outlines how to set up a Storm project for development. The steps are:
 
 1. Add Storm jars to classpath
 2. If using multilang, add multilang dir to classpath
 
-Follow along to see how to set up the [storm-starter](http://github.com/nathanmarz/storm-starter) project in Eclipse.
+Follow along to see how to set up the [storm-starter]({{page.git-blob-base}}/examples/storm-starter) project in Eclipse.
 
 ### Add Storm jars to classpath
 
-You'll need the Storm jars on your classpath to develop Storm topologies. Using [Maven](Maven.html) is highly recommended. [Here's an example](https://github.com/nathanmarz/storm-starter/blob/master/m2-pom.xml) of how to setup your pom.xml for a Storm project. If you don't want to use Maven, you can include the jars from the Storm release on your classpath. 
-
-[storm-starter](http://github.com/nathanmarz/storm-starter) uses [Leiningen](http://github.com/technomancy/leiningen) for build and dependency resolution. You can install leiningen by downloading [this script](https://raw.github.com/technomancy/leiningen/stable/bin/lein), placing it on your path, and making it executable. To retrieve the dependencies for Storm, simply run `lein deps` in the project root.
+You'll need the Storm jars on your classpath to develop Storm topologies. Using [Maven](Maven.html) is highly recommended. [Here's an example]({{page.git-blob-base}}/examples/storm-starter/pom.xml) of how to setup your pom.xml for a Storm project. If you don't want to use Maven, you can include the jars from the Storm release on your classpath.
 
 To set up the classpath in Eclipse, create a new Java project, include `src/jvm/` as a source path, and make sure all the jars in `lib/` and `lib/dev/` are in the `Referenced Libraries` section of the project.
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/DSLs-and-multilang-adapters.md
----------------------------------------------------------------------
diff --git a/docs/DSLs-and-multilang-adapters.md b/docs/DSLs-and-multilang-adapters.md
index 31bd453..0ed5450 100644
--- a/docs/DSLs-and-multilang-adapters.md
+++ b/docs/DSLs-and-multilang-adapters.md
@@ -1,9 +1,10 @@
 ---
+title: Storm DSLs and Multi-Lang Adapters
 layout: documentation
+documentation: true
 ---
 * [Scala DSL](https://github.com/velvia/ScalaStorm)
 * [JRuby DSL](https://github.com/colinsurprenant/redstorm)
 * [Clojure DSL](Clojure-DSL.html)
 * [Storm/Esper integration](https://github.com/tomdz/storm-esper): Streaming SQL on top of Storm
-* [io-storm](https://github.com/gphat/io-storm): Perl multilang adapter
-* [storm-php](https://github.com/lazyshot/storm-php): PHP multilang adapter
+* [io-storm](https://github.com/dan-blanchard/io-storm): Perl multilang adapter

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Daemon-Fault-Tolerance.md
----------------------------------------------------------------------
diff --git a/docs/Daemon-Fault-Tolerance.md b/docs/Daemon-Fault-Tolerance.md
new file mode 100644
index 0000000..1af681a
--- /dev/null
+++ b/docs/Daemon-Fault-Tolerance.md
@@ -0,0 +1,30 @@
+---
+title: Daemon Fault Tolerance
+layout: documentation
+documentation: true
+---
+Storm has several different daemon processes.  Nimbus that schedules workers, supervisors that launch and kill workers, the log viewer that gives access to logs, and the UI that shows the status of a cluster.
+
+## What happens when a worker dies?
+
+When a worker dies, the supervisor will restart it. If it continuously fails on startup and is unable to heartbeat to Nimbus, Nimbus will reschedule the worker.
+
+## What happens when a node dies?
+
+The tasks assigned to that machine will time-out and Nimbus will reassign those tasks to other machines.
+
+## What happens when Nimbus or Supervisor daemons die?
+
+The Nimbus and Supervisor daemons are designed to be fail-fast (process self-destructs whenever any unexpected situation is encountered) and stateless (all state is kept in Zookeeper or on disk). As described in [Setting up a Storm cluster](Setting-up-a-Storm-cluster.html), the Nimbus and Supervisor daemons must be run under supervision using a tool like daemontools or monit. So if the Nimbus or Supervisor daemons die, they restart like nothing happened.
+
+Most notably, no worker processes are affected by the death of Nimbus or the Supervisors. This is in contrast to Hadoop, where if the JobTracker dies, all the running jobs are lost. 
+
+## Is Nimbus a single point of failure?
+
+If you lose the Nimbus node, the workers will still continue to function. Additionally, supervisors will continue to restart workers if they die. However, without Nimbus, workers won't be reassigned to other machines when necessary (like if you lose a worker machine). 
+
+So the answer is that Nimbus is "sort of" a SPOF. In practice, it's not a big deal since nothing catastrophic happens when the Nimbus daemon dies. There are plans to make Nimbus highly available in the future.
+
+## How does Storm guarantee data processing?
+
+Storm provides mechanisms to guarantee data processing even if nodes die or messages are lost. See [Guaranteeing message processing](Guaranteeing-message-processing.html) for the details.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Defining-a-non-jvm-language-dsl-for-storm.md
----------------------------------------------------------------------
diff --git a/docs/Defining-a-non-jvm-language-dsl-for-storm.md b/docs/Defining-a-non-jvm-language-dsl-for-storm.md
index f52f4ab..7096a43 100644
--- a/docs/Defining-a-non-jvm-language-dsl-for-storm.md
+++ b/docs/Defining-a-non-jvm-language-dsl-for-storm.md
@@ -1,7 +1,9 @@
 ---
+title: Defining a Non-JVM DSL for Storm
 layout: documentation
+documentation: true
 ---
-The right place to start to learn how to make a non-JVM DSL for Storm is [storm-core/src/storm.thrift](https://github.com/apache/incubator-storm/blob/master/storm-core/src/storm.thrift). Since Storm topologies are just Thrift structures, and Nimbus is a Thrift daemon, you can create and submit topologies in any language.
+The right place to start to learn how to make a non-JVM DSL for Storm is [storm-core/src/storm.thrift]({{page.git-blob-base}}/storm-core/src/storm.thrift). Since Storm topologies are just Thrift structures, and Nimbus is a Thrift daemon, you can create and submit topologies in any language.
 
 When you create the Thrift structs for spouts and bolts, the code for the spout or bolt is specified in the ComponentObject struct:
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Distributed-RPC.md
----------------------------------------------------------------------
diff --git a/docs/Distributed-RPC.md b/docs/Distributed-RPC.md
index fc75ee4..4af2702 100644
--- a/docs/Distributed-RPC.md
+++ b/docs/Distributed-RPC.md
@@ -1,5 +1,7 @@
 ---
+title: Distributed RPC
 layout: documentation
+documentation: true
 ---
 The idea behind distributed RPC (DRPC) is to parallelize the computation of really intense functions on the fly using Storm. The Storm topology takes in as input a stream of function arguments, and it emits an output stream of the results for each of those function calls. 
 
@@ -116,7 +118,7 @@ The reach of a URL is the number of unique people exposed to a URL on Twitter. T
 
 A single reach computation can involve thousands of database calls and tens of millions of follower records during the computation. It's a really, really intense computation. As you're about to see, implementing this function on top of Storm is dead simple. On a single machine, reach can take minutes to compute; on a Storm cluster, you can compute reach for even the hardest URLs in a couple seconds.
 
-A sample reach topology is defined in storm-starter [here](https://github.com/nathanmarz/storm-starter/blob/master/src/jvm/storm/starter/ReachTopology.java). Here's how you define the reach topology:
+A sample reach topology is defined in storm-starter [here]({{page.git-blob-base}}/examples/storm-starter/src/jvm/storm/starter/ReachTopology.java). Here's how you define the reach topology:
 
 ```java
 LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("reach");

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/FAQ.md
----------------------------------------------------------------------
diff --git a/docs/FAQ.md b/docs/FAQ.md
index 8ff7a6f..127c95c 100644
--- a/docs/FAQ.md
+++ b/docs/FAQ.md
@@ -1,5 +1,7 @@
 ---
+title: FAQ
 layout: documentation
+documentation: true
 ---
 
 ## Best Practices
@@ -26,7 +28,7 @@ layout: documentation
 
 ### Halp! I cannot see:
 
-* **my logs** Logs by default go to $STORM_HOME/logs. Check that you have write permissions to that directory. They are configured in the logback/cluster.xml (0.9) and log4j/*.properties in earlier versions.
+* **my logs** Logs by default go to $STORM_HOME/logs. Check that you have write permissions to that directory. They are configured in log4j2/{cluster, worker}.xml.
 * **final JVM settings** Add the `-XX+PrintFlagsFinal` commandline option in the childopts (see the conf file)
 * **final Java system properties** Add `Properties props = System.getProperties(); props.list(System.out);` near where you build your topology.
 
@@ -60,6 +62,10 @@ You can join streams with join, merge or multiReduce.
 
 At time of writing, you can't emit to multiple output streams from Trident -- see [STORM-68](https://issues.apache.org/jira/browse/STORM-68)
 
+### Why am I getting a NotSerializableException/IllegalStateException when my topology is being started up?
+
+Within the Storm lifecycle, the topology is instantiated and then serialized to byte format to be stored in ZooKeeper, prior to the topology being executed. Within this step, if a spout or bolt within the topology has an initialized unserializable property, serialization will fail. If there is a need for a field that is unserializable, initialize it within the bolt or spout's prepare method, which is run after the topology is delivered to the worker.
+
 ## Spouts
 
 ### What is a coordinator, and why are there several?
@@ -72,11 +78,11 @@ You should only store static data, and as little of it as possible, into the met
 
 ### How often is the 'emitPartitionBatchNew' function called?
 
-Since the MBC is the actual spout, all the tuples in a batch are just members of its tupletree. That means storm's "max spout pending" config effectively defines the number of concurrent batches trident runs. The MBC emits a new batch if it has fewer than max-spending tuples pending and if at least one [trident batch interval](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml#L115)'s worth of seconds has passed since the last batch.
+Since the MBC is the actual spout, all the tuples in a batch are just members of its tupletree. That means storm's "max spout pending" config effectively defines the number of concurrent batches trident runs. The MBC emits a new batch if it has fewer than max-spending tuples pending and if at least one [trident batch interval]({{page.git-blob-base}}/conf/defaults.yaml#L115)'s worth of seconds has passed since the last batch.
 
 ### If nothing was emitted does Trident slow down the calls?
 
-Yes, there's a pluggable "spout wait strategy"; the default is to sleep for a [configurable amount of time](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml#L110)
+Yes, there's a pluggable "spout wait strategy"; the default is to sleep for a [configurable amount of time]({{page.git-blob-base}}/conf/defaults.yaml#L110)
 
 ### OK, then what is the trident batch interval for?
 
@@ -107,7 +113,7 @@ You can't change the overall batch size once generated, but you can change the n
 
 ### How do I aggregate events by time?
 
-If have records with an immutable timestamp, and you would like to count, average or otherwise aggregate them into discrete time buckets, Trident is an excellent and scalable solution.
+If you have records with an immutable timestamp, and you would like to count, average or otherwise aggregate them into discrete time buckets, Trident is an excellent and scalable solution.
 
 Write an `Each` function that turns the timestamp into a time bucket: if the bucket size was "by hour", then the timestamp `2013-08-08 12:34:56` would be mapped to the `2013-08-08 12:00:00` time bucket, and so would everything else in the twelve o'clock hour. Then group on that timebucket and use a grouped persistentAggregate. The persistentAggregate uses a local cacheMap backed by a data store. Groups with many records require very few reads from the data store, and use efficient bulk reads and writes; as long as your data feed is relatively prompt Trident will make very efficient use of memory and network. Even if a server drops off line for a day, then delivers that full day's worth of data in a rush, the old results will be calmly retrieved and updated -- and without interfering with calculating the current results.
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Guaranteeing-message-processing.md
----------------------------------------------------------------------
diff --git a/docs/Guaranteeing-message-processing.md b/docs/Guaranteeing-message-processing.md
index 91d4384..932ff0d 100644
--- a/docs/Guaranteeing-message-processing.md
+++ b/docs/Guaranteeing-message-processing.md
@@ -1,7 +1,10 @@
 ---
+title: Guaranteeing Message Processing
 layout: documentation
+documentation: true
 ---
-Storm guarantees that each message coming off a spout will be fully processed. This page describes how Storm accomplishes this guarantee and what you have to do as a user to benefit from Storm's reliability capabilities.
+Storm offers several different levels of guaranteed message processing, includeing best effort, at least once, and exactly once through [Trident](Trident-tutorial.html). 
+This page describes how Storm can guarantee at least once processing.
 
 ### What does it mean for a message to be "fully processed"?
 
@@ -129,12 +132,11 @@ In contrast, bolts that do aggregations or joins may delay acking a tuple until
 
 ### How do I make my applications work correctly given that tuples can be replayed?
 
-As always in software design, the answer is "it depends." Storm 0.7.0 introduced the "transactional topologies" feature, which enables you to get fully fault-tolerant exactly-once messaging semantics for most computations. Read more about transactional topologies [here](Transactional-topologies.html). 
-
+As always in software design, the answer is "it depends." If you really want exactly once semantics use the [Trident](Trident-tutorial.html) API. In some cases, like with a lot of analytics, dropping data is OK so disabling the fault tolerance by setting the number of acker bolts to 0 [Config.TOPOLOGY_ACKERS](javadocs/backtype/storm/Config.html#TOPOLOGY_ACKERS).  But in some cases you want to be sure that everything was processed at least once and nothing was dropped.  This is especially useful if all operations are idenpotent or if deduping can happen aferwards.
 
 ### How does Storm implement reliability in an efficient way?
 
-A Storm topology has a set of special "acker" tasks that track the DAG of tuples for every spout tuple. When an acker sees that a DAG is complete, it sends a message to the spout task that created the spout tuple to ack the message. You can set the number of acker tasks for a topology in the topology configuration using [Config.TOPOLOGY_ACKERS](javadocs/backtype/storm/Config.html#TOPOLOGY_ACKERS). Storm defaults TOPOLOGY_ACKERS to one task -- you will need to increase this number for topologies processing large amounts of messages. 
+A Storm topology has a set of special "acker" tasks that track the DAG of tuples for every spout tuple. When an acker sees that a DAG is complete, it sends a message to the spout task that created the spout tuple to ack the message. You can set the number of acker tasks for a topology in the topology configuration using [Config.TOPOLOGY_ACKERS](javadocs/backtype/storm/Config.html#TOPOLOGY_ACKERS). Storm defaults TOPOLOGY_ACKERS to one task per worker.
 
 The best way to understand Storm's reliability implementation is to look at the lifecycle of tuples and tuple DAGs. When a tuple is created in a topology, whether in a spout or a bolt, it is given a random 64 bit id. These ids are used by ackers to track the tuple DAG for every spout tuple.
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Hooks.md
----------------------------------------------------------------------
diff --git a/docs/Hooks.md b/docs/Hooks.md
index bbe87a9..01cfa92 100644
--- a/docs/Hooks.md
+++ b/docs/Hooks.md
@@ -1,7 +1,9 @@
 ---
+title: Hooks
 layout: documentation
+documentation: true
 ---
 Storm provides hooks with which you can insert custom code to run on any number of events within Storm. You create a hook by extending the [BaseTaskHook](javadocs/backtype/storm/hooks/BaseTaskHook.html) class and overriding the appropriate method for the event you want to catch. There are two ways to register your hook:
 
-1. In the open method of your spout or prepare method of your bolt using the [TopologyContext#addTaskHook](javadocs/backtype/storm/task/TopologyContext.html) method.
+1. In the open method of your spout or prepare method of your bolt using the [TopologyContext](javadocs/backtype/storm/task/TopologyContext.html#addTaskHook) method.
 2. Through the Storm configuration using the ["topology.auto.task.hooks"](javadocs/backtype/storm/Config.html#TOPOLOGY_AUTO_TASK_HOOKS) config. These hooks are automatically registered in every spout or bolt, and are useful for doing things like integrating with a custom monitoring system.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Implementation-docs.md
----------------------------------------------------------------------
diff --git a/docs/Implementation-docs.md b/docs/Implementation-docs.md
index f01083a..15f088e 100644
--- a/docs/Implementation-docs.md
+++ b/docs/Implementation-docs.md
@@ -1,12 +1,13 @@
 ---
+title: Storm Internal Implementation
 layout: documentation
+documentation: true
 ---
 This section of the wiki is dedicated to explaining how Storm is implemented. You should have a good grasp of how to use Storm before reading these sections. 
 
 - [Structure of the codebase](Structure-of-the-codebase.html)
 - [Lifecycle of a topology](Lifecycle-of-a-topology.html)
 - [Message passing implementation](Message-passing-implementation.html)
-- [Acking framework implementation](Acking-framework-implementation.html)
 - [Metrics](Metrics.html)
 - How transactional topologies work
    - subtopology for TransactionalSpout

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Kestrel-and-Storm.md
----------------------------------------------------------------------
diff --git a/docs/Kestrel-and-Storm.md b/docs/Kestrel-and-Storm.md
index e16b0d9..cb80139 100644
--- a/docs/Kestrel-and-Storm.md
+++ b/docs/Kestrel-and-Storm.md
@@ -1,11 +1,13 @@
 ---
+title: Storm and Kestrel
 layout: documentation
+documentation: true
 ---
 This page explains how to use to Storm to consume items from a Kestrel cluster.
 
 ## Preliminaries
 ### Storm
-This tutorial uses examples from the [storm-kestrel](https://github.com/nathanmarz/storm-kestrel) project and the [storm-starter](https://github.com/nathanmarz/storm-starter) project. It's recommended that you clone those projects and follow along with the examples. Read [Setting up development environment](https://github.com/apache/incubator-storm/wiki/Setting-up-development-environment) and [Creating a new Storm project](https://github.com/apache/incubator-storm/wiki/Creating-a-new-Storm-project) to get your machine set up.
+This tutorial uses examples from the [storm-kestrel](https://github.com/nathanmarz/storm-kestrel) project and the [storm-starter](http://github.com/apache/storm/blob/{{page.version}}/examples/storm-starter) project. It's recommended that you clone those projects and follow along with the examples. Read [Setting up development environment](Setting-up-development-environment.html) and [Creating a new Storm project](Creating-a-new-Storm-project.html) to get your machine set up.
 ### Kestrel
 It assumes you are able to run locally a Kestrel server as described [here](https://github.com/nathanmarz/storm-kestrel).
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Lifecycle-of-a-topology.md
----------------------------------------------------------------------
diff --git a/docs/Lifecycle-of-a-topology.md b/docs/Lifecycle-of-a-topology.md
index 4919be8..6436206 100644
--- a/docs/Lifecycle-of-a-topology.md
+++ b/docs/Lifecycle-of-a-topology.md
@@ -1,5 +1,7 @@
 ---
+title: Lifecycle of a Storm Topology
 layout: documentation
+documentation: true
 ---
 (**NOTE**: this page is based on the 0.7.1 code; many things have changed since then, including a split between tasks and executors, and a reorganization of the code under `storm-core/src` rather than `src/`.)
 
@@ -7,74 +9,74 @@ This page explains in detail the lifecycle of a topology from running the "storm
 
 First a couple of important notes about topologies:
 
-1. The actual topology that runs is different than the topology the user specifies. The actual topology has implicit streams and an implicit "acker" bolt added to manage the acking framework (used to guarantee data processing). The implicit topology is created via the [system-topology!](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/common.clj#L188) function.
+1. The actual topology that runs is different than the topology the user specifies. The actual topology has implicit streams and an implicit "acker" bolt added to manage the acking framework (used to guarantee data processing). The implicit topology is created via the [system-topology!](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/common.clj#L188) function.
 2. `system-topology!` is used in two places:
-  - when Nimbus is creating tasks for the topology [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L316)
-  - in the worker so it knows where it needs to route messages to [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L90)
+  - when Nimbus is creating tasks for the topology [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L316)
+  - in the worker so it knows where it needs to route messages to [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L90)
 
 ## Starting a topology
 
-- "storm jar" command executes your class with the specified arguments. The only special thing that "storm jar" does is set the "storm.jar" environment variable for use by `StormSubmitter` later. [code](https://github.com/apache/incubator-storm/blob/0.7.1/bin/storm#L101)
+- "storm jar" command executes your class with the specified arguments. The only special thing that "storm jar" does is set the "storm.jar" environment variable for use by `StormSubmitter` later. [code](https://github.com/apache/storm/blob/0.7.1/bin/storm#L101)
 - When your code uses `StormSubmitter.submitTopology`, `StormSubmitter` takes the following actions:
-  - First, `StormSubmitter` uploads the jar if it hasn't been uploaded before. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/jvm/backtype/storm/StormSubmitter.java#L83)
-    - Jar uploading is done via Nimbus's Thrift interface [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/storm.thrift#L200)
+  - First, `StormSubmitter` uploads the jar if it hasn't been uploaded before. [code](https://github.com/apache/storm/blob/0.7.1/src/jvm/backtype/storm/StormSubmitter.java#L83)
+    - Jar uploading is done via Nimbus's Thrift interface [code](https://github.com/apache/storm/blob/0.7.1/src/storm.thrift#L200)
     - `beginFileUpload` returns a path in Nimbus's inbox
     - 15 kilobytes are uploaded at a time through `uploadChunk`
     - `finishFileUpload` is called when it's finished uploading
-    - Here is Nimbus's implementation of those Thrift methods: [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L694)
-  - Second, `StormSubmitter` calls `submitTopology` on the Nimbus thrift interface [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/jvm/backtype/storm/StormSubmitter.java#L60)
+    - Here is Nimbus's implementation of those Thrift methods: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L694)
+  - Second, `StormSubmitter` calls `submitTopology` on the Nimbus thrift interface [code](https://github.com/apache/storm/blob/0.7.1/src/jvm/backtype/storm/StormSubmitter.java#L60)
     - The topology config is serialized using JSON (JSON is used so that writing DSL's in any language is as easy as possible)
     - Notice that the Thrift `submitTopology` call takes in the Nimbus inbox path where the jar was uploaded
 
-- Nimbus receives the topology submission. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L639)
-- Nimbus normalizes the topology configuration. The main purpose of normalization is to ensure that every single task will have the same serialization registrations, which is critical for getting serialization working correctly. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L557) 
-- Nimbus sets up the static state for the topology [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L661)
+- Nimbus receives the topology submission. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L639)
+- Nimbus normalizes the topology configuration. The main purpose of normalization is to ensure that every single task will have the same serialization registrations, which is critical for getting serialization working correctly. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L557)
+- Nimbus sets up the static state for the topology [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L661)
     - Jars and configs are kept on local filesystem because they're too big for Zookeeper. The jar and configs are copied into the path {nimbus local dir}/stormdist/{topology id}
     - `setup-storm-static` writes task -> component mapping into ZK
     - `setup-heartbeats` creates a ZK "directory" in which tasks can heartbeat
-- Nimbus calls `mk-assignment` to assign tasks to machines [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L458) 
-    - Assignment record definition is here: [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/common.clj#L25)
+- Nimbus calls `mk-assignment` to assign tasks to machines [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L458)
+    - Assignment record definition is here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/common.clj#L25)
     - Assignment contains:
       - `master-code-dir`: used by supervisors to download the correct jars/configs for the topology from Nimbus
       - `task->node+port`: Map from a task id to the worker that task should be running on. (A worker is identified by a node/port pair)
       - `node->host`: A map from node id to hostname. This is used so workers know which machines to connect to to communicate with other workers. Node ids are used to identify supervisors so that multiple supervisors can be run on one machine. One place this is done is with Mesos integration.
       - `task->start-time-secs`: Contains a map from task id to the timestamp at which Nimbus launched that task. This is used by Nimbus when monitoring topologies, as tasks are given a longer timeout to heartbeat when they're first launched (the launch timeout is configured by "nimbus.task.launch.secs" config)
-- Once topologies are assigned, they're initially in a deactivated mode. `start-storm` writes data into Zookeeper so that the cluster knows the topology is active and can start emitting tuples from spouts. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L504)
+- Once topologies are assigned, they're initially in a deactivated mode. `start-storm` writes data into Zookeeper so that the cluster knows the topology is active and can start emitting tuples from spouts. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L504)
 
 - TODO cluster state diagram (show all nodes and what's kept everywhere)
 
 - Supervisor runs two functions in the background:
-    - `synchronize-supervisor`: This is called whenever assignments in Zookeeper change and also every 10 seconds. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L241)
-      - Downloads code from Nimbus for topologies assigned to this machine for which it doesn't have the code yet. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L258)
-      - Writes into local filesystem what this node is supposed to be running. It writes a map from port -> LocalAssignment. LocalAssignment contains a topology id as well as the list of task ids for that worker. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L13)
-    - `sync-processes`: Reads from the LFS what `synchronize-supervisor` wrote and compares that to what's actually running on the machine. It then starts/stops worker processes as necessary to synchronize. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L177)
+    - `synchronize-supervisor`: This is called whenever assignments in Zookeeper change and also every 10 seconds. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L241)
+      - Downloads code from Nimbus for topologies assigned to this machine for which it doesn't have the code yet. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L258)
+      - Writes into local filesystem what this node is supposed to be running. It writes a map from port -> LocalAssignment. LocalAssignment contains a topology id as well as the list of task ids for that worker. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L13)
+    - `sync-processes`: Reads from the LFS what `synchronize-supervisor` wrote and compares that to what's actually running on the machine. It then starts/stops worker processes as necessary to synchronize. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/supervisor.clj#L177)
     
-- Worker processes start up through the `mk-worker` function [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L67)
-  - Worker connects to other workers and starts a thread to monitor for changes. So if a worker gets reassigned, the worker will automatically reconnect to the other worker's new location. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L123)
-  - Monitors whether a topology is active or not and stores that state in the `storm-active-atom` variable. This variable is used by tasks to determine whether or not to call `nextTuple` on the spouts. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L155)
-  - The worker launches the actual tasks as threads within it [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L178)
-- Tasks are set up through the `mk-task` function [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L160)
-  - Tasks set up routing function which takes in a stream and an output tuple and returns a list of task ids to send the tuple to [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L207) (there's also a 3-arity version used for direct streams)
-  - Tasks set up the spout-specific or bolt-specific code with [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L241)
+- Worker processes start up through the `mk-worker` function [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L67)
+  - Worker connects to other workers and starts a thread to monitor for changes. So if a worker gets reassigned, the worker will automatically reconnect to the other worker's new location. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L123)
+  - Monitors whether a topology is active or not and stores that state in the `storm-active-atom` variable. This variable is used by tasks to determine whether or not to call `nextTuple` on the spouts. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L155)
+  - The worker launches the actual tasks as threads within it [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L178)
+- Tasks are set up through the `mk-task` function [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L160)
+  - Tasks set up routing function which takes in a stream and an output tuple and returns a list of task ids to send the tuple to [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L207) (there's also a 3-arity version used for direct streams)
+  - Tasks set up the spout-specific or bolt-specific code with [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L241)
    
 ## Topology Monitoring
 
 - Nimbus monitors the topology during its lifetime
-   - Schedules recurring task on the timer thread to check the topologies [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L623)
-   - Nimbus's behavior is represented as a finite state machine [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L98)
-   - The "monitor" event is called on a topology every "nimbus.monitor.freq.secs", which calls `reassign-topology` through `reassign-transition` [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L497)
+   - Schedules recurring task on the timer thread to check the topologies [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L623)
+   - Nimbus's behavior is represented as a finite state machine [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L98)
+   - The "monitor" event is called on a topology every "nimbus.monitor.freq.secs", which calls `reassign-topology` through `reassign-transition` [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L497)
    - `reassign-topology` calls `mk-assignments`, the same function used to assign the topology the first time. `mk-assignments` is also capable of incrementally updating a topology
       - `mk-assignments` checks heartbeats and reassigns workers as necessary
       - Any reassignments change the state in ZK, which will trigger supervisors to synchronize and start/stop workers
       
 ## Killing a topology
 
-- "storm kill" command runs this code which just calls the Nimbus Thrift interface to kill the topology: [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/command/kill_topology.clj)
-- Nimbus receives the kill command [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L671)
-- Nimbus applies the "kill" transition to the topology [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L676)
-- The kill transition function changes the status of the topology to "killed" and schedules the "remove" event to run "wait time seconds" in the future. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L63)
+- "storm kill" command runs this code which just calls the Nimbus Thrift interface to kill the topology: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/command/kill_topology.clj)
+- Nimbus receives the kill command [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L671)
+- Nimbus applies the "kill" transition to the topology [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L676)
+- The kill transition function changes the status of the topology to "killed" and schedules the "remove" event to run "wait time seconds" in the future. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L63)
    - The wait time defaults to the topology message timeout but can be overridden with the -w flag in the "storm kill" command
    - This causes the topology to be deactivated for the wait time before its actually shut down. This gives the topology a chance to finish processing what it's currently processing before shutting down the workers
-   - Changing the status during the kill transition ensures that the kill protocol is fault-tolerant to Nimbus crashing. On startup, if the status of the topology is "killed", Nimbus schedules the remove event to run "wait time seconds" in the future [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L111)
-- Removing a topology cleans out the assignment and static information from ZK [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L116)
-- A separate cleanup thread runs the `do-cleanup` function which will clean up the heartbeat dir and the jars/configs stored locally. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L577)
+   - Changing the status during the kill transition ensures that the kill protocol is fault-tolerant to Nimbus crashing. On startup, if the status of the topology is "killed", Nimbus schedules the remove event to run "wait time seconds" in the future [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L111)
+- Removing a topology cleans out the assignment and static information from ZK [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L116)
+- A separate cleanup thread runs the `do-cleanup` function which will clean up the heartbeat dir and the jars/configs stored locally. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/nimbus.clj#L577)

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Local-mode.md
----------------------------------------------------------------------
diff --git a/docs/Local-mode.md b/docs/Local-mode.md
index 1f98e36..f871a73 100644
--- a/docs/Local-mode.md
+++ b/docs/Local-mode.md
@@ -1,5 +1,7 @@
 ---
+title: Local Mode
 layout: documentation
+documentation: true
 ---
 Local mode simulates a Storm cluster in process and is useful for developing and testing topologies. Running topologies in local mode is similar to running topologies [on a cluster](Running-topologies-on-a-production-cluster.html). 
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Maven.md
----------------------------------------------------------------------
diff --git a/docs/Maven.md b/docs/Maven.md
index 85828da..0c09c2c 100644
--- a/docs/Maven.md
+++ b/docs/Maven.md
@@ -1,56 +1,22 @@
 ---
+title: Maven
 layout: documentation
+documentation: true
 ---
-To develop topologies, you'll need the Storm jars on your classpath. You should either include the unpacked jars in the classpath for your project or use Maven to include Storm as a development dependency. Storm is hosted on Clojars (a Maven repository). To include Storm in your project as a development dependency, add the following to your pom.xml:
+To develop topologies, you'll need the Storm jars on your classpath. You should either include the unpacked jars in the classpath for your project or use Maven to include Storm as a development dependency. Storm is hosted on Maven Central. To include Storm in your project as a development dependency, add the following to your pom.xml:
 
-```xml
-<repository>
-  <id>clojars.org</id>
-  <url>http://clojars.org/repo</url>
-</repository>
-```
 
 ```xml
 <dependency>
-  <groupId>storm</groupId>
-  <artifactId>storm</artifactId>
-  <version>0.7.2</version>
-  <scope>test</scope>
+  <groupId>org.apache.storm</groupId>
+  <artifactId>storm-core</artifactId>
+  <version>{{page.version}}</version>
+  <scope>provided</scope>
 </dependency>
 ```
 
-[Here's an example](https://github.com/nathanmarz/storm-starter/blob/master/m2-pom.xml) of a pom.xml for a Storm project.
-
-If Maven isn't your thing, check out [leiningen](https://github.com/technomancy/leiningen). Leiningen is a build tool for Clojure, but it can be used for pure Java projects as well. Leiningen makes builds and dependency management using Maven dead-simple. Here's an example project.clj for a pure-Java Storm project:
-
-```clojure
-(defproject storm-starter "0.0.1-SNAPSHOT"
-  :java-source-path "src/jvm"
-  :javac-options {:debug "true" :fork "true"}
-  :jvm-opts ["-Djava.library.path=/usr/local/lib:/opt/local/lib:/usr/lib"]
-  :dependencies []
-  :dev-dependencies [
-                     [storm "0.7.2"]
-                     ])
-```
-
-You can fetch dependencies using `lein deps`, build the project with `lein compile`, and make a jar suitable for submitting to a cluster with `lein uberjar`.
-
-### Using Storm as a library
-
-If you want to use Storm as a library (e.g., use the Distributed RPC client) and have the Storm dependency jars be distributed with your application, there's a separate Maven dependency called "storm/storm-lib". The only difference between this dependency and the usual "storm/storm" is that storm-lib does not have any logging configured.
+[Here's an example]({{page.git-blob-base}}/examples/storm-starter/pom.xml) of a pom.xml for a Storm project.
 
 ### Developing Storm
 
-You will want to
-
-	bash ./bin/install_zmq.sh   # install the jzmq dependency
-	lein sub install
-
-Build javadocs with
-
-	bash ./bin/javadoc.sh
-
-### Building a Storm Release
-
-Use the file `bin/build_release.sh` to make a zipfile like the ones you would download (and like what the bin files require in order to run daemons).
+Please refer to [DEVELOPER.md]({{page.git-blob-base}}/DEVELOPER.md) for more details.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Message-passing-implementation.md
----------------------------------------------------------------------
diff --git a/docs/Message-passing-implementation.md b/docs/Message-passing-implementation.md
index f22a5aa..a17f66a 100644
--- a/docs/Message-passing-implementation.md
+++ b/docs/Message-passing-implementation.md
@@ -1,28 +1,30 @@
 ---
+title: Message Passing Implementation
 layout: documentation
+documentation: true
 ---
 (Note: this walkthrough is out of date as of 0.8.0. 0.8.0 revamped the message passing infrastructure to be based on the Disruptor)
 
 This page walks through how emitting and transferring tuples works in Storm.
 
 - Worker is responsible for message transfer
-   - `refresh-connections` is called every "task.refresh.poll.secs" or whenever assignment in ZK changes. It manages connections to other workers and maintains a mapping from task -> worker [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L123)
-   - Provides a "transfer function" that is used by tasks to send tuples to other tasks. The transfer function takes in a task id and a tuple, and it serializes the tuple and puts it onto a "transfer queue". There is a single transfer queue for each worker. [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L56)
-   - The serializer is thread-safe [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/jvm/backtype/storm/serialization/KryoTupleSerializer.java#L26)
-   - The worker has a single thread which drains the transfer queue and sends the messages to other workers [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L185)
-   - Message sending happens through this protocol: [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/messaging/protocol.clj)
-   - The implementation for distributed mode uses ZeroMQ [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/messaging/zmq.clj)
-   - The implementation for local mode uses in memory Java queues (so that it's easy to use Storm locally without needing to get ZeroMQ installed) [code](https://github.com/apache/incubator-storm/blob/0.7.1/src/clj/backtype/storm/messaging/local.clj)
+   - `refresh-connections` is called every "task.refresh.poll.secs" or whenever assignment in ZK changes. It manages connections to other workers and maintains a mapping from task -> worker [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L123)
+   - Provides a "transfer function" that is used by tasks to send tuples to other tasks. The transfer function takes in a task id and a tuple, and it serializes the tuple and puts it onto a "transfer queue". There is a single transfer queue for each worker. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L56)
+   - The serializer is thread-safe [code](https://github.com/apache/storm/blob/0.7.1/src/jvm/backtype/storm/serialization/KryoTupleSerializer.java#L26)
+   - The worker has a single thread which drains the transfer queue and sends the messages to other workers [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L185)
+   - Message sending happens through this protocol: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/messaging/protocol.clj)
+   - The implementation for distributed mode uses ZeroMQ [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/messaging/zmq.clj)
+   - The implementation for local mode uses in memory Java queues (so that it's easy to use Storm locally without needing to get ZeroMQ installed) [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/messaging/local.clj)
 - Receiving messages in tasks works differently in local mode and distributed mode
-   - In local mode, the tuple is sent directly to an in-memory queue for the receiving task [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/messaging/local.clj#L21)
-   - In distributed mode, each worker listens on a single TCP port for incoming messages and then routes those messages in-memory to tasks. The TCP port is called a "virtual port", because it receives [task id, message] and then routes it to the actual task. [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/worker.clj#L204)
-      - The virtual port implementation is here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/zilch/virtual_port.clj)
-      - Tasks listen on an in-memory ZeroMQ port for messages from the virtual port [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L201)
-        - Bolts listen here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L489)
-        - Spouts listen here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L382)
+   - In local mode, the tuple is sent directly to an in-memory queue for the receiving task [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/messaging/local.clj#L21)
+   - In distributed mode, each worker listens on a single TCP port for incoming messages and then routes those messages in-memory to tasks. The TCP port is called a "virtual port", because it receives [task id, message] and then routes it to the actual task. [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/worker.clj#L204)
+      - The virtual port implementation is here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/zilch/virtual_port.clj)
+      - Tasks listen on an in-memory ZeroMQ port for messages from the virtual port [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L201)
+        - Bolts listen here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L489)
+        - Spouts listen here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L382)
 - Tasks are responsible for message routing. A tuple is emitted either to a direct stream (where the task id is specified) or a regular stream. In direct streams, the message is only sent if that bolt subscribes to that direct stream. In regular streams, the stream grouping functions are used to determine the task ids to send the tuple to.
-  - Tasks have a routing map from {stream id} -> {component id} -> {stream grouping function} [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L198)
-  - The "tasks-fn" returns the task ids to send the tuples to for either regular stream emit or direct stream emit [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L207)
+  - Tasks have a routing map from {stream id} -> {component id} -> {stream grouping function} [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L198)
+  - The "tasks-fn" returns the task ids to send the tuples to for either regular stream emit or direct stream emit [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L207)
   - After getting the output task ids, bolts and spouts use the transfer-fn provided by the worker to actually transfer the tuples
-      - Bolt transfer code here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L429)
-      - Spout transfer code here: [code](https://github.com/apache/incubator-storm/blob/master/src/clj/backtype/storm/daemon/task.clj#L329)
+      - Bolt transfer code here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L429)
+      - Spout transfer code here: [code](https://github.com/apache/storm/blob/0.7.1/src/clj/backtype/storm/daemon/task.clj#L329)

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Metrics.md
----------------------------------------------------------------------
diff --git a/docs/Metrics.md b/docs/Metrics.md
index f43f8c7..b2521b1 100644
--- a/docs/Metrics.md
+++ b/docs/Metrics.md
@@ -1,5 +1,7 @@
 ---
+title: Storm Metrics
 layout: documentation
+documentation: true
 ---
 Storm exposes a metrics interface to report summary statistics across the full topology.
 It's used internally to track the numbers you see in the Nimbus UI console: counts of executes and acks; average process latency per bolt; worker heap usage; and so forth.
@@ -10,13 +12,13 @@ Metrics have to implement just one method, `getValueAndReset` -- do any remainin
 
 Storm gives you these metric types:
 
-* [AssignableMetric]() -- set the metric to the explicit value you supply. Useful if it's an external value or in the case that you are already calculating the summary statistic yourself.
-* [CombinedMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/CombinedMetric.java) -- generic interface for metrics that can be updated associatively. 
-* [CountMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/CountMetric.java) -- a running total of the supplied values. Call `incr()` to increment by one, `incrBy(n)` to add/subtract the given number.
-  - [MultiCountMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/MultiCountMetric.java) -- a hashmap of count metrics.
-* [ReducedMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/ReducedMetric.java)
-  - [MeanReducer](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/MeanReducer.java) -- track a running average of values given to its `reduce()` method. (It accepts `Double`, `Integer` or `Long` values, and maintains the internal average as a `Double`.) Despite his reputation, the MeanReducer is actually a pretty nice guy in person.
-  - [MultiReducedMetric](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/metric/api/MultiReducedMetric.java) -- a hashmap of reduced metrics.
+* [AssignableMetric]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/metric/api/AssignableMetric.java) -- set the metric to the explicit value you supply. Useful if it's an external value or in the case that you are already calculating the summary statistic yourself.
+* [CombinedMetric]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/metric/api/CombinedMetric.java) -- generic interface for metrics that can be updated associatively. 
+* [CountMetric]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/metric/api/CountMetric.java) -- a running total of the supplied values. Call `incr()` to increment by one, `incrBy(n)` to add/subtract the given number.
+  - [MultiCountMetric]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/metric/api/MultiCountMetric.java) -- a hashmap of count metrics.
+* [ReducedMetric]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/metric/api/ReducedMetric.java)
+  - [MeanReducer]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/metric/api/MeanReducer.java) -- track a running average of values given to its `reduce()` method. (It accepts `Double`, `Integer` or `Long` values, and maintains the internal average as a `Double`.) Despite his reputation, the MeanReducer is actually a pretty nice guy in person.
+  - [MultiReducedMetric]({{page.git-blob-base}}/storm-core/src/jvm/backtype/storm/metric/api/MultiReducedMetric.java) -- a hashmap of reduced metrics.
 
 
 ### Metric Consumer
@@ -28,7 +30,7 @@ Storm gives you these metric types:
 
 ### Builtin Metrics
 
-The [builtin metrics](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj) instrument Storm itself.
+The [builtin metrics]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj) instrument Storm itself.
 
-[builtin_metrics.clj](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj) sets up data structures for the built-in metrics, and facade methods that the other framework components can use to update them. The metrics themselves are calculated in the calling code -- see for example [`ack-spout-msg`](https://github.com/apache/incubator-storm/blob/46c3ba7/storm-core/src/clj/backtype/storm/daemon/executor.clj#358)  in `clj/b/s/daemon/daemon/executor.clj`
+[builtin_metrics.clj]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj) sets up data structures for the built-in metrics, and facade methods that the other framework components can use to update them. The metrics themselves are calculated in the calling code -- see for example [`ack-spout-msg`]({{page.git-blob-base}}/storm-core/src/clj/backtype/storm/daemon/executor.clj#358)  in `clj/b/s/daemon/daemon/executor.clj`
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Multilang-protocol.md
----------------------------------------------------------------------
diff --git a/docs/Multilang-protocol.md b/docs/Multilang-protocol.md
index a3cb22c..2a90059 100644
--- a/docs/Multilang-protocol.md
+++ b/docs/Multilang-protocol.md
@@ -1,5 +1,7 @@
 ---
+title: Multi-Lang Protocol
 layout: documentation
+documentation: true
 ---
 This page explains the multilang protocol as of Storm 0.7.1. Versions prior to 0.7.1 used a somewhat different protocol, documented [here](Storm-multi-language-protocol-(versions-0.7.0-and-below\).html).
 
@@ -47,7 +49,7 @@ STDIN and STDOUT.
 
 The initial handshake is the same for both types of shell components:
 
-* STDIN: Setup info. This is a JSON object with the Storm configuration, Topology context, and a PID directory, like this:
+* STDIN: Setup info. This is a JSON object with the Storm configuration, a PID directory, and a topology context, like this:
 
 ```
 {
@@ -55,15 +57,37 @@ The initial handshake is the same for both types of shell components:
         "topology.message.timeout.secs": 3,
         // etc
     },
+    "pidDir": "...",
     "context": {
         "task->component": {
             "1": "example-spout",
             "2": "__acker",
-            "3": "example-bolt"
+            "3": "example-bolt1",
+            "4": "example-bolt2"
         },
-        "taskid": 3
-    },
-    "pidDir": "..."
+        "taskid": 3,
+        // Everything below this line is only available in Storm 0.10.0+
+        "componentid": "example-bolt"
+        "stream->target->grouping": {
+        	"default": {
+        		"example-bolt2": {
+        			"type": "SHUFFLE"}}},
+        "streams": ["default"],
+ 		"stream->outputfields": {"default": ["word"]},
+	    "source->stream->grouping": {
+	    	"example-spout": {
+	    		"default": {
+	    			"type": "FIELDS",
+	    			"fields": ["word"]
+	    		}
+	    	}
+	    }
+	    "source->stream->fields": {
+	    	"example-spout": {
+	    		"default": ["word"]
+	    	}
+	    }
+	}
 }
 ```
 
@@ -71,6 +95,15 @@ Your script should create an empty file named with its PID in this directory. e.
 the PID is 1234, so an empty file named 1234 is created in the directory. This
 file lets the supervisor know the PID so it can shutdown the process later on.
 
+As of Storm 0.10.0, the context sent by Storm to shell components has been
+enhanced substantially to include all aspects of the topology context available
+to JVM components.  One key addition is the ability to determine a shell
+component's source and targets (i.e., inputs and outputs) in the topology via
+the `stream->target->grouping` and `source->stream->grouping` dictionaries.  At
+the innermost level of these nested dictionaries, groupings are represented as
+a dictionary that minimally has a `type` key, but can also have a `fields` key
+to specify which fields are involved in a `FIELDS` grouping.
+
 * STDOUT: Your PID, in a JSON object, like `{"pid": 1234}`. The shell component will log the PID to its log.
 
 What happens next depends on the type of component:
@@ -219,3 +252,36 @@ A "log" will log a message in the worker log. It looks like:
 
 * Note that, as of version 0.7.1, there is no longer any need for a
   shell bolt to 'sync'.
+
+### Handling Heartbeats (0.9.3 and later)
+
+As of Storm 0.9.3, heartbeats have been between ShellSpout/ShellBolt and their
+multi-lang subprocesses to detect hanging/zombie subprocesses.  Any libraries
+for interfacing with Storm via multi-lang must take the following actions
+regarding hearbeats:
+
+#### Spout
+
+Shell spouts are synchronous, so subprocesses always send `sync` commands at the
+end of `next()`,  so you should not have to do much to support heartbeats for
+spouts.  That said, you must not let subprocesses sleep more than the worker
+timeout during `next()`.
+
+#### Bolt
+
+Shell bolts are asynchronous, so a ShellBolt will send heartbeat tuples to its
+subprocess periodically.  Heartbeat tuple looks like:
+
+```
+{
+	"id": "-6955786537413359385",
+	"comp": "1",
+	"stream": "__heartbeat",
+	// this shell bolt's system task id
+	"task": -1,
+	"tuple": []
+}
+```
+
+When subprocess receives heartbeat tuple, it must send a `sync` command back to
+ShellBolt.

http://git-wip-us.apache.org/repos/asf/storm/blob/7e959832/docs/Rationale.md
----------------------------------------------------------------------
diff --git a/docs/Rationale.md b/docs/Rationale.md
index 214266e..45ff396 100644
--- a/docs/Rationale.md
+++ b/docs/Rationale.md
@@ -1,5 +1,7 @@
 ---
+title: Rationale
 layout: documentation
+documentation: true
 ---
 The past decade has seen a revolution in data processing. MapReduce, Hadoop, and related technologies have made it possible to store and process data at scales previously unthinkable. Unfortunately, these data processing technologies are not realtime systems, nor are they meant to be. There's no hack that will turn Hadoop into a realtime system; realtime data processing has a fundamentally different set of requirements than batch processing.
 


[09/16] storm git commit: STORM-1617: Release Specific Documentation 0.9.x

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/STORM-UI-REST-API.md
----------------------------------------------------------------------
diff --git a/docs/STORM-UI-REST-API.md b/docs/STORM-UI-REST-API.md
new file mode 100644
index 0000000..2109ab2
--- /dev/null
+++ b/docs/STORM-UI-REST-API.md
@@ -0,0 +1,678 @@
+---
+title: Storm UI REST API
+layout: documentation
+documentation: true
+---
+
+# Storm UI REST API
+
+The Storm UI daemon provides a REST API that allows you to interact with a Storm cluster, which includes retrieving
+metrics data and configuration information as well as management operations such as starting or stopping topologies.
+
+
+# Data format
+
+The REST API returns JSON responses and supports JSONP.
+Clients can pass a callback query parameter to wrap JSON in the callback function.
+
+
+# Using the UI REST API
+
+_Note: It is recommended to ignore undocumented elements in the JSON response because future versions of Storm may not_
+_support those elements anymore._
+
+
+## REST API Base URL
+
+The REST API is part of the UI daemon of Storm (started by `storm ui`) and thus runs on the same host and port as the
+Storm UI (the UI daemon is often run on the same host as the Nimbus daemon).  The port is configured by `ui.port`,
+which is set to `8080` by default (see [defaults.yaml](conf/defaults.yaml)).
+
+The API base URL would thus be:
+
+    http://<ui-host>:<ui-port>/api/v1/...
+
+You can use a tool such as `curl` to talk to the REST API:
+
+    # Request the cluster configuration.
+    # Note: We assume ui.port is configured to the default value of 8080.
+    $ curl http://<ui-host>:8080/api/v1/cluster/configuration
+
+##Impersonating a user in secure environment
+In a secure environment an authenticated user can impersonate another user. To impersonate a user the caller must pass
+`doAsUser` param or header with value set to the user that the request needs to be performed as. Please see SECURITY.MD
+to learn more about how to setup impersonation ACLs and authorization. The rest API uses the same configs and acls that
+are used by nimbus.
+
+Examples:
+
+```no-highlight
+ 1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1425844354\?doAsUser=testUSer1
+ 2. curl 'http://localhost:8080/api/v1/topology/wordcount-1-1425844354/activate' -X POST -H 'doAsUser:testUSer1'
+```
+
+## GET Operations
+
+### /api/v1/cluster/configuration (GET)
+
+Returns the cluster configuration.
+
+Sample response (does not include all the data fields):
+
+```json
+  {
+    "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+    "topology.tick.tuple.freq.secs": null,
+    "topology.builtin.metrics.bucket.size.secs": 60,
+    "topology.fall.back.on.java.serialization": true,
+    "topology.max.error.report.per.interval": 5,
+    "zmq.linger.millis": 5000,
+    "topology.skip.missing.kryo.registrations": false,
+    "storm.messaging.netty.client_worker_threads": 1,
+    "ui.childopts": "-Xmx768m",
+    "storm.zookeeper.session.timeout": 20000,
+    "nimbus.reassign": true,
+    "topology.trident.batch.emit.interval.millis": 500,
+    "storm.messaging.netty.flush.check.interval.ms": 10,
+    "nimbus.monitor.freq.secs": 10,
+    "logviewer.childopts": "-Xmx128m",
+    "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+    "topology.executor.send.buffer.size": 1024,
+    }
+```
+
+### /api/v1/cluster/summary (GET)
+
+Returns cluster summary information such as nimbus uptime or number of supervisors.
+
+Response fields:
+
+|Field  |Value|Description
+|---	|---	|---
+|stormVersion|String| Storm version|
+|nimbusUptime|String| Shows how long the cluster is running|
+|supervisors|Integer| Number of supervisors running|
+|topologies| Integer| Number of topologies running| 
+|slotsTotal| Integer|Total number of available worker slots|
+|slotsUsed| Integer| Number of worker slots used|
+|slotsFree| Integer |Number of worker slots available|
+|executorsTotal| Integer |Total number of executors|
+|tasksTotal| Integer |Total tasks|
+
+Sample response:
+
+```json
+   {
+    "stormVersion": "0.9.2-incubating-SNAPSHOT",
+    "nimbusUptime": "3m 53s",
+    "supervisors": 1,
+    "slotsTotal": 4,
+    "slotsUsed": 3,
+    "slotsFree": 1,
+    "executorsTotal": 28,
+    "tasksTotal": 28
+    }
+```
+
+### /api/v1/supervisor/summary (GET)
+
+Returns summary information for all supervisors.
+
+Response fields:
+
+|Field  |Value|Description|
+|---	|---	|---
+|id| String | Supervisor's id|
+|host| String| Supervisor's host name|
+|uptime| String| Shows how long the supervisor is running|
+|slotsTotal| Integer| Total number of available worker slots for this supervisor|
+|slotsUsed| Integer| Number of worker slots used on this supervisor|
+
+Sample response:
+
+```json
+{
+    "supervisors": [
+        {
+            "id": "0b879808-2a26-442b-8f7d-23101e0c3696",
+            "host": "10.11.1.7",
+            "uptime": "5m 58s",
+            "slotsTotal": 4,
+            "slotsUsed": 3
+        }
+    ]
+}
+```
+
+### /api/v1/topology/summary (GET)
+
+Returns summary information for all topologies.
+
+Response fields:
+
+|Field  |Value | Description|
+|---	|---	|---
+|id| String| Topology Id|
+|name| String| Topology Name|
+|status| String| Topology Status|
+|uptime| String|  Shows how long the topology is running|
+|tasksTotal| Integer |Total number of tasks for this topology|
+|workersTotal| Integer |Number of workers used for this topology|
+|executorsTotal| Integer |Number of executors used for this topology|
+
+Sample response:
+
+```json
+{
+    "topologies": [
+        {
+            "id": "WordCount3-1-1402960825",
+            "name": "WordCount3",
+            "status": "ACTIVE",
+            "uptime": "6m 5s",
+            "tasksTotal": 28,
+            "workersTotal": 3,
+            "executorsTotal": 28
+        }
+    ]
+}
+```
+
+### /api/v1/topology/:id (GET)
+
+Returns topology information and statistics.  Substitute id with topology id.
+
+Request parameters:
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|window    |String. Default value :all-time| Window duration for metrics in seconds|
+|sys       |String. Values 1 or 0. Default value 0| Controls including sys stats part of the response|
+
+
+Response fields:
+
+|Field  |Value |Description|
+|---	|---	|---
+|id| String| Topology Id|
+|name| String |Topology Name|
+|uptime| String |How long the topology has been running|
+|status| String |Current status of the topology, e.g. "ACTIVE"|
+|tasksTotal| Integer |Total number of tasks for this topology|
+|workersTotal| Integer |Number of workers used for this topology|
+|executorsTotal| Integer |Number of executors used for this topology|
+|msgTimeout| Integer | Number of seconds a tuple has before the spout considers it failed |
+|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"|
+|topologyStats| Array | Array of all the topology related stats per time window|
+|topologyStats.windowPretty| String |Duration passed in HH:MM:SS format|
+|topologyStats.window| String |User requested time window for metrics|
+|topologyStats.emitted| Long |Number of messages emitted in given window|
+|topologyStats.trasferred| Long |Number messages transferred in given window|
+|topologyStats.completeLatency| String (double value returned in String format) |Total latency for processing the message|
+|topologyStats.acked| Long |Number of messages acked in given window|
+|topologyStats.failed| Long |Number of messages failed in given window|
+|spouts| Array | Array of all the spout components in the topology|
+|spouts.spoutId| String |Spout id|
+|spouts.executors| Integer |Number of executors for the spout|
+|spouts.emitted| Long |Number of messages emitted in given window |
+|spouts.completeLatency| String (double value returned in String format) |Total latency for processing the message|
+|spouts.transferred| Long |Total number of messages  transferred in given window|
+|spouts.tasks| Integer |Total number of tasks for the spout|
+|spouts.lastError| String |Shows the last error happened in a spout|
+|spouts.errorLapsedSecs| Integer | Number of seconds elapsed since that last error happened in a spout|
+|spouts.errorWorkerLogLink| String | Link to the worker log that reported the exception |
+|spouts.acked| Long |Number of messages acked|
+|spouts.failed| Long |Number of messages failed|
+|bolts| Array | Array of bolt components in the topology|
+|bolts.boltId| String |Bolt id|
+|bolts.capacity| String (double value returned in String format) |This value indicates number of messages executed * average execute latency / time window|
+|bolts.processLatency| String (double value returned in String format)  |Average time of the bolt to ack a message after it was received|
+|bolts.executeLatency| String (double value returned in String format) |Average time to run the execute method of the bolt|
+|bolts.executors| Integer |Number of executor tasks in the bolt component|
+|bolts.tasks| Integer |Number of instances of bolt|
+|bolts.acked| Long |Number of tuples acked by the bolt|
+|bolts.failed| Long |Number of tuples failed by the bolt|
+|bolts.lastError| String |Shows the last error occurred in the bolt|
+|bolts.errorLapsedSecs| Integer |Number of seconds elapsed since that last error happened in a bolt|
+|bolts.errorWorkerLogLink| String | Link to the worker log that reported the exception |
+|bolts.emitted| Long |Number of tuples emitted|
+
+Examples:
+
+```no-highlight
+ 1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825
+ 2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?sys=1
+ 3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?window=600
+```
+
+Sample response:
+
+```json
+ {
+    "name": "WordCount3",
+    "id": "WordCount3-1-1402960825",
+    "workersTotal": 3,
+    "window": "600",
+    "status": "ACTIVE",
+    "tasksTotal": 28,
+    "executorsTotal": 28,
+    "uptime": "29m 19s",
+    "msgTimeout": 30,
+    "windowHint": "10m 0s",
+    "topologyStats": [
+        {
+            "windowPretty": "10m 0s",
+            "window": "600",
+            "emitted": 397960,
+            "transferred": 213380,
+            "completeLatency": "0.000",
+            "acked": 213460,
+            "failed": 0
+        },
+        {
+            "windowPretty": "3h 0m 0s",
+            "window": "10800",
+            "emitted": 1190260,
+            "transferred": 638260,
+            "completeLatency": "0.000",
+            "acked": 638280,
+            "failed": 0
+        },
+        {
+            "windowPretty": "1d 0h 0m 0s",
+            "window": "86400",
+            "emitted": 1190260,
+            "transferred": 638260,
+            "completeLatency": "0.000",
+            "acked": 638280,
+            "failed": 0
+        },
+        {
+            "windowPretty": "All time",
+            "window": ":all-time",
+            "emitted": 1190260,
+            "transferred": 638260,
+            "completeLatency": "0.000",
+            "acked": 638280,
+            "failed": 0
+        }
+    ],
+    "spouts": [
+        {
+            "executors": 5,
+            "emitted": 28880,
+            "completeLatency": "0.000",
+            "transferred": 28880,
+            "acked": 0,
+            "spoutId": "spout",
+            "tasks": 5,
+            "lastError": "",
+            "errorLapsedSecs": null,
+            "failed": 0
+        }
+    ],
+        "bolts": [
+        {
+            "executors": 12,
+            "emitted": 184580,
+            "transferred": 0,
+            "acked": 184640,
+            "executeLatency": "0.048",
+            "tasks": 12,
+            "executed": 184620,
+            "processLatency": "0.043",
+            "boltId": "count",
+            "lastError": "",
+            "errorLapsedSecs": null,
+            "capacity": "0.003",
+            "failed": 0
+        },
+        {
+            "executors": 8,
+            "emitted": 184500,
+            "transferred": 184500,
+            "acked": 28820,
+            "executeLatency": "0.024",
+            "tasks": 8,
+            "executed": 28780,
+            "processLatency": "2.112",
+            "boltId": "split",
+            "lastError": "",
+            "errorLapsedSecs": null,
+            "capacity": "0.000",
+            "failed": 0
+        }
+    ],
+    "configuration": {
+        "storm.id": "WordCount3-1-1402960825",
+        "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+        "topology.tick.tuple.freq.secs": null,
+        "topology.builtin.metrics.bucket.size.secs": 60,
+        "topology.fall.back.on.java.serialization": true,
+        "topology.max.error.report.per.interval": 5,
+        "zmq.linger.millis": 5000,
+        "topology.skip.missing.kryo.registrations": false,
+        "storm.messaging.netty.client_worker_threads": 1,
+        "ui.childopts": "-Xmx768m",
+        "storm.zookeeper.session.timeout": 20000,
+        "nimbus.reassign": true,
+        "topology.trident.batch.emit.interval.millis": 500,
+        "storm.messaging.netty.flush.check.interval.ms": 10,
+        "nimbus.monitor.freq.secs": 10,
+        "logviewer.childopts": "-Xmx128m",
+        "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+        "topology.executor.send.buffer.size": 1024,
+        "storm.local.dir": "storm-local",
+        "storm.messaging.netty.buffer_size": 5242880,
+        "supervisor.worker.start.timeout.secs": 120,
+        "topology.enable.message.timeouts": true,
+        "nimbus.cleanup.inbox.freq.secs": 600,
+        "nimbus.inbox.jar.expiration.secs": 3600,
+        "drpc.worker.threads": 64,
+        "topology.worker.shared.thread.pool.size": 4,
+        "nimbus.host": "hw10843.local",
+        "storm.messaging.netty.min_wait_ms": 100,
+        "storm.zookeeper.port": 2181,
+        "transactional.zookeeper.port": null,
+        "topology.executor.receive.buffer.size": 1024,
+        "transactional.zookeeper.servers": null,
+        "storm.zookeeper.root": "/storm",
+        "storm.zookeeper.retry.intervalceiling.millis": 30000,
+        "supervisor.enable": true,
+        "storm.messaging.netty.server_worker_threads": 1
+    }
+}
+```
+
+
+### /api/v1/topology/:id/component/:component (GET)
+
+Returns detailed metrics and executor information
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|component |String (required)| Component Id |
+|window    |String. Default value :all-time| window duration for metrics in seconds|
+|sys       |String. Values 1 or 0. Default value 0| controls including sys stats part of the response|
+
+Response fields:
+
+|Field  |Value |Description|
+|---	|---	|---
+|id   | String | Component id|
+|name | String | Topology name|
+|componentType | String | component type: SPOUT or BOLT|
+|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"|
+|executors| Integer |Number of executor tasks in the component|
+|componentErrors| Array of Errors | List of component errors|
+|componentErrors.time| Long | Timestamp when the exception occurred |
+|componentErrors.errorHost| String | host name for the error|
+|componentErrors.errorPort| String | port for the error|
+|componentErrors.error| String |Shows the error happened in a component|
+|componentErrors.errorLapsedSecs| Integer | Number of seconds elapsed since the error happened in a component |
+|componentErrors.errorWorkerLogLink| String | Link to the worker log that reported the exception |
+|topologyId| String | Topology id|
+|tasks| Integer |Number of instances of component|
+|window    |String. Default value "All Time" | window duration for metrics in seconds|
+|spoutSummary or boltStats| Array |Array of component stats. **Please note this element tag can be spoutSummary or boltStats depending on the componentType**|
+|spoutSummary.windowPretty| String |Duration passed in HH:MM:SS format|
+|spoutSummary.window| String | window duration for metrics in seconds|
+|spoutSummary.emitted| Long |Number of messages emitted in given window |
+|spoutSummary.completeLatency| String (double value returned in String format) |Total latency for processing the message|
+|spoutSummary.transferred| Long |Total number of messages  transferred in given window|
+|spoutSummary.acked| Long |Number of messages acked|
+|spoutSummary.failed| Long |Number of messages failed|
+|boltStats.windowPretty| String |Duration passed in HH:MM:SS format|
+|boltStats..window| String | window duration for metrics in seconds|
+|boltStats.transferred| Long |Total number of messages  transferred in given window|
+|boltStats.processLatency| String (double value returned in String format)  |Average time of the bolt to ack a message after it was received|
+|boltStats.acked| Long |Number of messages acked|
+|boltStats.failed| Long |Number of messages failed|
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout
+2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?sys=1
+3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?window=600
+```
+
+Sample response:
+
+```json
+{
+    "name": "WordCount3",
+    "id": "spout",
+    "componentType": "spout",
+    "windowHint": "10m 0s",
+    "executors": 5,
+    "componentErrors":[{"time": 1406006074000,
+                        "errorHost": "10.11.1.70",
+                        "errorPort": 6701,
+                        "errorWorkerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
+                        "errorLapsedSecs": 16,
+                        "error": "java.lang.RuntimeException: java.lang.StringIndexOutOfBoundsException: Some Error\n\tat backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:128)\n\tat backtype.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:99)\n\tat backtype.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:80)\n\tat backtype...more.."
+    }],
+    "topologyId": "WordCount3-1-1402960825",
+    "tasks": 5,
+    "window": "600",
+    "spoutSummary": [
+        {
+            "windowPretty": "10m 0s",
+            "window": "600",
+            "emitted": 28500,
+            "transferred": 28460,
+            "completeLatency": "0.000",
+            "acked": 0,
+            "failed": 0
+        },
+        {
+            "windowPretty": "3h 0m 0s",
+            "window": "10800",
+            "emitted": 127640,
+            "transferred": 127440,
+            "completeLatency": "0.000",
+            "acked": 0,
+            "failed": 0
+        },
+        {
+            "windowPretty": "1d 0h 0m 0s",
+            "window": "86400",
+            "emitted": 127640,
+            "transferred": 127440,
+            "completeLatency": "0.000",
+            "acked": 0,
+            "failed": 0
+        },
+        {
+            "windowPretty": "All time",
+            "window": ":all-time",
+            "emitted": 127640,
+            "transferred": 127440,
+            "completeLatency": "0.000",
+            "acked": 0,
+            "failed": 0
+        }
+    ],
+    "outputStats": [
+        {
+            "stream": "__metrics",
+            "emitted": 40,
+            "transferred": 0,
+            "completeLatency": "0",
+            "acked": 0,
+            "failed": 0
+        },
+        {
+            "stream": "default",
+            "emitted": 28460,
+            "transferred": 28460,
+            "completeLatency": "0",
+            "acked": 0,
+            "failed": 0
+        }
+    ],
+    "executorStats": [
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
+            "emitted": 5720,
+            "port": 6701,
+            "completeLatency": "0.000",
+            "transferred": 5720,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "43m 4s",
+            "id": "[24-24]",
+            "failed": 0
+        },
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log",
+            "emitted": 5700,
+            "port": 6703,
+            "completeLatency": "0.000",
+            "transferred": 5700,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "42m 57s",
+            "id": "[25-25]",
+            "failed": 0
+        },
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6702.log",
+            "emitted": 5700,
+            "port": 6702,
+            "completeLatency": "0.000",
+            "transferred": 5680,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "42m 57s",
+            "id": "[26-26]",
+            "failed": 0
+        },
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
+            "emitted": 5700,
+            "port": 6701,
+            "completeLatency": "0.000",
+            "transferred": 5680,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "43m 4s",
+            "id": "[27-27]",
+            "failed": 0
+        },
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log",
+            "emitted": 5680,
+            "port": 6703,
+            "completeLatency": "0.000",
+            "transferred": 5680,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "42m 57s",
+            "id": "[28-28]",
+            "failed": 0
+        }
+    ]
+}
+```
+
+## POST Operations
+
+### /api/v1/topology/:id/activate (POST)
+
+Activates a topology.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+
+Sample Response:
+
+```json
+{"topologyOperation":"activate","topologyId":"wordcount-1-1420308665","status":"success"}
+```
+
+
+### /api/v1/topology/:id/deactivate (POST)
+
+Deactivates a topology.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+
+Sample Response:
+
+```json
+{"topologyOperation":"deactivate","topologyId":"wordcount-1-1420308665","status":"success"}
+```
+
+
+### /api/v1/topology/:id/rebalance/:wait-time (POST)
+
+Rebalances a topology.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|wait-time |String (required)| Wait time before rebalance happens |
+|rebalanceOptions| Json (optional) | topology rebalance options |
+
+
+Sample rebalanceOptions json:
+
+```json
+{"rebalanceOptions" : {"numWorkers" : 2, "executors" : {"spout" :4, "count" : 10}}, "callback" : "foo"}
+```
+
+Examples:
+
+```no-highlight
+curl  -i -b ~/cookiejar.txt -c ~/cookiejar.txt -X POST  
+-H "Content-Type: application/json" 
+-d  '{"rebalanceOptions": {"numWorkers": 2, "executors": { "spout" : "5", "split": 7, "count": 5 }}, "callback":"foo"}' 
+http://localhost:8080/api/v1/topology/wordcount-1-1420308665/rebalance/0
+```
+
+Sample Response:
+
+```json
+{"topologyOperation":"rebalance","topologyId":"wordcount-1-1420308665","status":"success"}
+```
+
+
+
+### /api/v1/topology/:id/kill/:wait-time (POST)
+
+Kills a topology.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|wait-time |String (required)| Wait time before rebalance happens |
+
+Caution: Small wait times (0-5 seconds) may increase the probability of triggering the bug reported in
+[STORM-112](https://issues.apache.org/jira/browse/STORM-112), which may result in broker Supervisor
+daemons.
+
+Sample Response:
+
+```json
+{"topologyOperation":"kill","topologyId":"wordcount-1-1420308665","status":"success"}
+```
+
+## API errors
+
+The API returns 500 HTTP status codes in case of any errors.
+
+Sample response:
+
+```json
+{
+  "error": "Internal Server Error",
+  "errorMessage": "java.lang.NullPointerException\n\tat clojure.core$name.invoke(core.clj:1505)\n\tat backtype.storm.ui.core$component_page.invoke(core.clj:752)\n\tat backtype.storm.ui.core$fn__7766.invoke(core.clj:782)\n\tat compojure.core$make_route$fn__5755.invoke(core.clj:93)\n\tat compojure.core$if_route$fn__5743.invoke(core.clj:39)\n\tat compojure.core$if_method$fn__5736.invoke(core.clj:24)\n\tat compojure.core$routing$fn__5761.invoke(core.clj:106)\n\tat clojure.core$some.invoke(core.clj:2443)\n\tat compojure.core$routing.doInvoke(core.clj:106)\n\tat clojure.lang.RestFn.applyTo(RestFn.java:139)\n\tat clojure.core$apply.invoke(core.clj:619)\n\tat compojure.core$routes$fn__5765.invoke(core.clj:111)\n\tat ring.middleware.reload$wrap_reload$fn__6880.invoke(reload.clj:14)\n\tat backtype.storm.ui.core$catch_errors$fn__7800.invoke(core.clj:836)\n\tat ring.middleware.keyword_params$wrap_keyword_params$fn__6319.invoke(keyword_params.clj:27)\n\tat ring.middleware.nested_params$wrap_nest
 ed_params$fn__6358.invoke(nested_params.clj:65)\n\tat ring.middleware.params$wrap_params$fn__6291.invoke(params.clj:55)\n\tat ring.middleware.multipart_params$wrap_multipart_params$fn__6386.invoke(multipart_params.clj:103)\n\tat ring.middleware.flash$wrap_flash$fn__6675.invoke(flash.clj:14)\n\tat ring.middleware.session$wrap_session$fn__6664.invoke(session.clj:43)\n\tat ring.middleware.cookies$wrap_cookies$fn__6595.invoke(cookies.clj:160)\n\tat ring.adapter.jetty$proxy_handler$fn__6112.invoke(jetty.clj:16)\n\tat ring.adapter.jetty.proxy$org.mortbay.jetty.handler.AbstractHandler$0.handle(Unknown Source)\n\tat org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)\n\tat org.mortbay.jetty.Server.handle(Server.java:326)\n\tat org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)\n\tat org.mortbay.jetty.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:928)\n\tat org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:549)\n\tat org.mortb
 ay.jetty.HttpParser.parseAvailable(HttpParser.java:212)\n\tat org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)\n\tat org.mortbay.jetty.bio.SocketConnector$Connection.run(SocketConnector.java:228)\n\tat org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)\n"
+}
+```

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Serialization-(prior-to-0.6.0).md
----------------------------------------------------------------------
diff --git a/docs/Serialization-(prior-to-0.6.0).md b/docs/Serialization-(prior-to-0.6.0).md
new file mode 100644
index 0000000..e4a0d4f
--- /dev/null
+++ b/docs/Serialization-(prior-to-0.6.0).md
@@ -0,0 +1,50 @@
+---
+layout: documentation
+---
+Tuples can be comprised of objects of any types. Since Storm is a distributed system, it needs to know how to serialize and deserialize objects when they're passed between tasks. By default Storm can serialize ints, shorts, longs, floats, doubles, bools, bytes, strings, and byte arrays, but if you want to use another type in your tuples, you'll need to implement a custom serializer.
+
+### Dynamic typing
+
+There are no type declarations for fields in a Tuple. You put objects in fields and Storm figures out the serialization dynamically. Before we get to the interface for serialization, let's spend a moment understanding why Storm's tuples are dynamically typed.
+
+Adding static typing to tuple fields would add large amount of complexity to Storm's API. Hadoop, for example, statically types its keys and values but requires a huge amount of annotations on the part of the user. Hadoop's API is a burden to use and the "type safety" isn't worth it. Dynamic typing is simply easier to use.
+
+Further than that, it's not possible to statically type Storm's tuples in any reasonable way. Suppose a Bolt subscribes to multiple streams. The tuples from all those streams may have different types across the fields. When a Bolt receives a `Tuple` in `execute`, that tuple could have come from any stream and so could have any combination of types. There might be some reflection magic you can do to declare a different method for every tuple stream a bolt subscribes to, but Storm opts for the simpler, straightforward approach of dynamic typing.
+
+Finally, another reason for using dynamic typing is so Storm can be used in a straightforward manner from dynamically typed languages like Clojure and JRuby.
+
+### Custom serialization
+
+Let's dive into Storm's API for defining custom serializations. There are two steps you need to take as a user to create a custom serialization: implement the serializer, and register the serializer to Storm.
+
+#### Creating a serializer
+
+Custom serializers implement the [ISerialization](javadocs/backtype/storm/serialization/ISerialization.html) interface. Implementations specify how to serialize and deserialize types into a binary format.
+
+The interface looks like this:
+
+```java
+public interface ISerialization<T> {
+    public boolean accept(Class c);
+    public void serialize(T object, DataOutputStream stream) throws IOException;
+    public T deserialize(DataInputStream stream) throws IOException;
+}
+```
+
+Storm uses the `accept` method to determine if a type can be serialized by this serializer. Remember, Storm's tuples are dynamically typed so Storm determines what serializer to use at runtime.
+
+`serialize` writes the object out to the output stream in binary format. The field must be written in a way such that it can be deserialized later. For example, if you're writing out a list of objects, you'll need to write out the size of the list first so that you know how many elements to deserialize.
+
+`deserialize` reads the serialized object off of the stream and returns it.
+
+You can see example serialization implementations in the source for [SerializationFactory](https://github.com/apache/incubator-storm/blob/0.5.4/src/jvm/backtype/storm/serialization/SerializationFactory.java)
+
+#### Registering a serializer
+
+Once you create a serializer, you need to tell Storm it exists. This is done through the Storm configuration (See [Concepts](Concepts.html) for information about how configuration works in Storm). You can register serializations either through the config given when submitting a topology or in the storm.yaml files across your cluster.
+
+Serializer registrations are done through the Config.TOPOLOGY_SERIALIZATIONS config and is simply a list of serialization class names.
+
+Storm provides helpers for registering serializers in a topology config. The [Config](javadocs/backtype/storm/Config.html) class has a method called `addSerialization` that takes in a serializer class to add to the config.
+
+There's an advanced config called Config.TOPOLOGY_SKIP_MISSING_SERIALIZATIONS. If you set this to true, Storm will ignore any serializations that are registered but do not have their code available on the classpath. Otherwise, Storm will throw errors when it can't find a serialization. This is useful if you run many topologies on a cluster that each have different serializations, but you want to declare all the serializations across all topologies in the `storm.yaml` files.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Serialization.md
----------------------------------------------------------------------
diff --git a/docs/Serialization.md b/docs/Serialization.md
new file mode 100644
index 0000000..4c271b4
--- /dev/null
+++ b/docs/Serialization.md
@@ -0,0 +1,60 @@
+---
+layout: documentation
+---
+This page is about how the serialization system in Storm works for versions 0.6.0 and onwards. Storm used a different serialization system prior to 0.6.0 which is documented on [Serialization (prior to 0.6.0)](Serialization-\(prior-to-0.6.0\).html). 
+
+Tuples can be comprised of objects of any types. Since Storm is a distributed system, it needs to know how to serialize and deserialize objects when they're passed between tasks.
+
+Storm uses [Kryo](http://code.google.com/p/kryo/) for serialization. Kryo is a flexible and fast serialization library that produces small serializations.
+
+By default, Storm can serialize primitive types, strings, byte arrays, ArrayList, HashMap, HashSet, and the Clojure collection types. If you want to use another type in your tuples, you'll need to register a custom serializer.
+
+### Dynamic typing
+
+There are no type declarations for fields in a Tuple. You put objects in fields and Storm figures out the serialization dynamically. Before we get to the interface for serialization, let's spend a moment understanding why Storm's tuples are dynamically typed.
+
+Adding static typing to tuple fields would add large amount of complexity to Storm's API. Hadoop, for example, statically types its keys and values but requires a huge amount of annotations on the part of the user. Hadoop's API is a burden to use and the "type safety" isn't worth it. Dynamic typing is simply easier to use.
+
+Further than that, it's not possible to statically type Storm's tuples in any reasonable way. Suppose a Bolt subscribes to multiple streams. The tuples from all those streams may have different types across the fields. When a Bolt receives a `Tuple` in `execute`, that tuple could have come from any stream and so could have any combination of types. There might be some reflection magic you can do to declare a different method for every tuple stream a bolt subscribes to, but Storm opts for the simpler, straightforward approach of dynamic typing.
+
+Finally, another reason for using dynamic typing is so Storm can be used in a straightforward manner from dynamically typed languages like Clojure and JRuby.
+
+### Custom serialization
+
+As mentioned, Storm uses Kryo for serialization. To implement custom serializers, you need to register new serializers with Kryo. It's highly recommended that you read over [Kryo's home page](http://code.google.com/p/kryo/) to understand how it handles custom serialization.
+
+Adding custom serializers is done through the "topology.kryo.register" property in your topology config. It takes a list of registrations, where each registration can take one of two forms:
+
+1. The name of a class to register. In this case, Storm will use Kryo's `FieldsSerializer` to serialize the class. This may or may not be optimal for the class -- see the Kryo docs for more details.
+2. A map from the name of a class to register to an implementation of [com.esotericsoftware.kryo.Serializer](http://code.google.com/p/kryo/source/browse/trunk/src/com/esotericsoftware/kryo/Serializer.java).
+
+Let's look at an example.
+
+```
+topology.kryo.register:
+  - com.mycompany.CustomType1
+  - com.mycompany.CustomType2: com.mycompany.serializer.CustomType2Serializer
+  - com.mycompany.CustomType3
+```
+
+`com.mycompany.CustomType1` and `com.mycompany.CustomType3` will use the `FieldsSerializer`, whereas `com.mycompany.CustomType2` will use `com.mycompany.serializer.CustomType2Serializer` for serialization.
+
+Storm provides helpers for registering serializers in a topology config. The [Config](javadocs/backtype/storm/Config.html) class has a method called `registerSerialization` that takes in a registration to add to the config.
+
+There's an advanced config called `Config.TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS`. If you set this to true, Storm will ignore any serializations that are registered but do not have their code available on the classpath. Otherwise, Storm will throw errors when it can't find a serialization. This is useful if you run many topologies on a cluster that each have different serializations, but you want to declare all the serializations across all topologies in the `storm.yaml` files.
+
+### Java serialization
+
+If Storm encounters a type for which it doesn't have a serialization registered, it will use Java serialization if possible. If the object can't be serialized with Java serialization, then Storm will throw an error.
+
+Beware that Java serialization is extremely expensive, both in terms of CPU cost as well as the size of the serialized object. It is highly recommended that you register custom serializers when you put the topology in production. The Java serialization behavior is there so that it's easy to prototype new topologies.
+
+You can turn off the behavior to fall back on Java serialization by setting the `Config.TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION` config to false.
+
+### Component-specific serialization registrations
+
+Storm 0.7.0 lets you set component-specific configurations (read more about this at [Configuration](Configuration.html)). Of course, if one component defines a serialization that serialization will need to be available to other bolts -- otherwise they won't be able to receive messages from that component!
+
+When a topology is submitted, a single set of serializations is chosen to be used by all components in the topology for sending messages. This is done by merging the component-specific serializer registrations with the regular set of serialization registrations. If two components define serializers for the same class, one of the serializers is chosen arbitrarily.
+
+To force a serializer for a particular class if there's a conflict between two component-specific registrations, just define the serializer you want to use in the topology-specific configuration. The topology-specific configuration has precedence over component-specific configurations for serialization registrations.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Serializers.md
----------------------------------------------------------------------
diff --git a/docs/Serializers.md b/docs/Serializers.md
new file mode 100644
index 0000000..071c885
--- /dev/null
+++ b/docs/Serializers.md
@@ -0,0 +1,4 @@
+---
+layout: documentation
+---
+* [storm-json](https://github.com/rapportive-oss/storm-json): Simple JSON serializer for Storm

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Setting-up-a-Storm-cluster.md
----------------------------------------------------------------------
diff --git a/docs/Setting-up-a-Storm-cluster.md b/docs/Setting-up-a-Storm-cluster.md
new file mode 100644
index 0000000..e139523
--- /dev/null
+++ b/docs/Setting-up-a-Storm-cluster.md
@@ -0,0 +1,83 @@
+---
+layout: documentation
+---
+This page outlines the steps for getting a Storm cluster up and running. If you're on AWS, you should check out the [storm-deploy](https://github.com/nathanmarz/storm-deploy/wiki) project. [storm-deploy](https://github.com/nathanmarz/storm-deploy/wiki) completely automates the provisioning, configuration, and installation of Storm clusters on EC2. It also sets up Ganglia for you so you can monitor CPU, disk, and network usage.
+
+If you run into difficulties with your Storm cluster, first check for a solution is in the [Troubleshooting](Troubleshooting.html) page. Otherwise, email the mailing list.
+
+Here's a summary of the steps for setting up a Storm cluster:
+
+1. Set up a Zookeeper cluster
+2. Install dependencies on Nimbus and worker machines
+3. Download and extract a Storm release to Nimbus and worker machines
+4. Fill in mandatory configurations into storm.yaml
+5. Launch daemons under supervision using "storm" script and a supervisor of your choice
+
+### Set up a Zookeeper cluster
+
+Storm uses Zookeeper for coordinating the cluster. Zookeeper **is not** used for message passing, so the load Storm places on Zookeeper is quite low. Single node Zookeeper clusters should be sufficient for most cases, but if you want failover or are deploying large Storm clusters you may want larger Zookeeper clusters. Instructions for deploying Zookeeper are [here](http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html). 
+
+A few notes about Zookeeper deployment:
+
+1. It's critical that you run Zookeeper under supervision, since Zookeeper is fail-fast and will exit the process if it encounters any error case. See [here](http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_supervision) for more details. 
+2. It's critical that you set up a cron to compact Zookeeper's data and transaction logs. The Zookeeper daemon does not do this on its own, and if you don't set up a cron, Zookeeper will quickly run out of disk space. See [here](http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_maintenance) for more details.
+
+### Install dependencies on Nimbus and worker machines
+
+Next you need to install Storm's dependencies on Nimbus and the worker machines. These are:
+
+1. Java 6
+2. Python 2.6.6
+
+These are the versions of the dependencies that have been tested with Storm. Storm may or may not work with different versions of Java and/or Python.
+
+
+### Download and extract a Storm release to Nimbus and worker machines
+
+Next, download a Storm release and extract the zip file somewhere on Nimbus and each of the worker machines. The Storm releases can be downloaded [from here](http://github.com/apache/incubator-storm/downloads).
+
+### Fill in mandatory configurations into storm.yaml
+
+The Storm release contains a file at `conf/storm.yaml` that configures the Storm daemons. You can see the default configuration values [here](https://github.com/apache/incubator-storm/blob/master/conf/defaults.yaml). storm.yaml overrides anything in defaults.yaml. There's a few configurations that are mandatory to get a working cluster:
+
+1) **storm.zookeeper.servers**: This is a list of the hosts in the Zookeeper cluster for your Storm cluster. It should look something like:
+
+```yaml
+storm.zookeeper.servers:
+  - "111.222.333.444"
+  - "555.666.777.888"
+```
+
+If the port that your Zookeeper cluster uses is different than the default, you should set **storm.zookeeper.port** as well.
+
+2) **storm.local.dir**: The Nimbus and Supervisor daemons require a directory on the local disk to store small amounts of state (like jars, confs, and things like that). You should create that directory on each machine, give it proper permissions, and then fill in the directory location using this config. For example:
+
+```yaml
+storm.local.dir: "/mnt/storm"
+```
+
+3) **nimbus.host**: The worker nodes need to know which machine is the master in order to download topology jars and confs. For example:
+
+```yaml
+nimbus.host: "111.222.333.44"
+```
+
+4) **supervisor.slots.ports**: For each worker machine, you configure how many workers run on that machine with this config. Each worker uses a single port for receiving messages, and this setting defines which ports are open for use. If you define five ports here, then Storm will allocate up to five workers to run on this machine. If you define three ports, Storm will only run up to three. By default, this setting is configured to run 4 workers on the ports 6700, 6701, 6702, and 6703. For example:
+
+```yaml
+supervisor.slots.ports:
+    - 6700
+    - 6701
+    - 6702
+    - 6703
+```
+
+### Launch daemons under supervision using "storm" script and a supervisor of your choice
+
+The last step is to launch all the Storm daemons. It is critical that you run each of these daemons under supervision. Storm is a __fail-fast__ system which means the processes will halt whenever an unexpected error is encountered. Storm is designed so that it can safely halt at any point and recover correctly when the process is restarted. This is why Storm keeps no state in-process -- if Nimbus or the Supervisors restart, the running topologies are unaffected. Here's how to run the Storm daemons:
+
+1. **Nimbus**: Run the command "bin/storm nimbus" under supervision on the master machine.
+2. **Supervisor**: Run the command "bin/storm supervisor" under supervision on each worker machine. The supervisor daemon is responsible for starting and stopping worker processes on that machine.
+3. **UI**: Run the Storm UI (a site you can access from the browser that gives diagnostics on the cluster and topologies) by running the command "bin/storm ui" under supervision. The UI can be accessed by navigating your web browser to http://{nimbus host}:8080. 
+
+As you can see, running the daemons is very straightforward. The daemons will log to the logs/ directory in wherever you extracted the Storm release.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Setting-up-a-Storm-project-in-Eclipse.md
----------------------------------------------------------------------
diff --git a/docs/Setting-up-a-Storm-project-in-Eclipse.md b/docs/Setting-up-a-Storm-project-in-Eclipse.md
new file mode 100644
index 0000000..5137cd9
--- /dev/null
+++ b/docs/Setting-up-a-Storm-project-in-Eclipse.md
@@ -0,0 +1 @@
+- fill me in
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Setting-up-development-environment.md
----------------------------------------------------------------------
diff --git a/docs/Setting-up-development-environment.md b/docs/Setting-up-development-environment.md
new file mode 100644
index 0000000..07ba670
--- /dev/null
+++ b/docs/Setting-up-development-environment.md
@@ -0,0 +1,39 @@
+---
+layout: documentation
+---
+This page outlines what you need to do to get a Storm development environment set up. In summary, the steps are:
+
+1. Download a [Storm release](/releases.html) , unpack it, and put the unpacked `bin/` directory on your PATH
+2. To be able to start and stop topologies on a remote cluster, put the cluster information in `~/.storm/storm.yaml`
+
+More detail on each of these steps is below.
+
+### What is a development environment?
+
+Storm has two modes of operation: local mode and remote mode. In local mode, you can develop and test topologies completely in process on your local machine. In remote mode, you submit topologies for execution on a cluster of machines.
+
+A Storm development environment has everything installed so that you can develop and test Storm topologies in local mode, package topologies for execution on a remote cluster, and submit/kill topologies on a remote cluster.
+
+Let's quickly go over the relationship between your machine and a remote cluster. A Storm cluster is managed by a master node called "Nimbus". Your machine communicates with Nimbus to submit code (packaged as a jar) and topologies for execution on the cluster, and Nimbus will take care of distributing that code around the cluster and assigning workers to run your topology. Your machine uses a command line client called `storm` to communicate with Nimbus. The `storm` client is only used for remote mode; it is not used for developing and testing topologies in local mode.
+
+### Installing a Storm release locally
+
+If you want to be able to submit topologies to a remote cluster from your machine, you should install a Storm release locally. Installing a Storm release will give you the `storm` client that you can use to interact with remote clusters. To install Storm locally, download a release [from here](/releases.html) and unzip it somewhere on your computer. Then add the unpacked `bin/` directory onto your `PATH` and make sure the `bin/storm` script is executable.
+
+Installing a Storm release locally is only for interacting with remote clusters. For developing and testing topologies in local mode, it is recommended that you use Maven to include Storm as a dev dependency for your project. You can read more about using Maven for this purpose on [Maven](Maven.html). 
+
+### Starting and stopping topologies on a remote cluster
+
+The previous step installed the `storm` client on your machine which is used to communicate with remote Storm clusters. Now all you have to do is tell the client which Storm cluster to talk to. To do this, all you have to do is put the host address of the master in the `~/.storm/storm.yaml` file. It should look something like this:
+
+```
+nimbus.host: "123.45.678.890"
+```
+
+Alternatively, if you use the [storm-deploy](https://github.com/nathanmarz/storm-deploy) project to provision Storm clusters on AWS, it will automatically set up your ~/.storm/storm.yaml file. You can manually attach to a Storm cluster (or switch between multiple clusters) using the "attach" command, like so:
+
+```
+lein run :deploy --attach --name mystormcluster
+```
+
+More information is on the storm-deploy [wiki](https://github.com/nathanmarz/storm-deploy/wiki)

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Spout-implementations.md
----------------------------------------------------------------------
diff --git a/docs/Spout-implementations.md b/docs/Spout-implementations.md
new file mode 100644
index 0000000..10ddd42
--- /dev/null
+++ b/docs/Spout-implementations.md
@@ -0,0 +1,8 @@
+---
+layout: documentation
+---
+* [storm-kestrel](https://github.com/nathanmarz/storm-kestrel): Adapter to use Kestrel as a spout
+* [storm-amqp-spout](https://github.com/rapportive-oss/storm-amqp-spout): Adapter to use AMQP source as a spout
+* [storm-jms](https://github.com/ptgoetz/storm-jms): Adapter to use a JMS source as a spout
+* [storm-redis-pubsub](https://github.com/sorenmacbeth/storm-redis-pubsub): A spout that subscribes to a Redis pubsub stream
+* [storm-beanstalkd-spout](https://github.com/haitaoyao/storm-beanstalkd-spout): A spout that subscribes to a beanstalkd queue

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Storm-multi-language-protocol-(versions-0.7.0-and-below).md
----------------------------------------------------------------------
diff --git a/docs/Storm-multi-language-protocol-(versions-0.7.0-and-below).md b/docs/Storm-multi-language-protocol-(versions-0.7.0-and-below).md
new file mode 100644
index 0000000..1d4422f
--- /dev/null
+++ b/docs/Storm-multi-language-protocol-(versions-0.7.0-and-below).md
@@ -0,0 +1,122 @@
+---
+layout: documentation
+---
+This page explains the multilang protocol for versions 0.7.0 and below. The protocol changed in version 0.7.1.
+
+# Storm Multi-Language Protocol
+
+## The ShellBolt
+
+Support for multiple languages is implemented via the ShellBolt class.  This
+class implements the IBolt interfaces and implements the protocol for
+executing a script or program via the shell using Java's ProcessBuilder class.
+
+## Output fields
+
+Output fields are part of the Thrift definition of the topology. This means that when you multilang in Java, you need to create a bolt that extends ShellBolt, implements IRichBolt, and declared the fields in `declareOutputFields`. 
+You can learn more about this on [Concepts](Concepts.html)
+
+## Protocol Preamble
+
+A simple protocol is implemented via the STDIN and STDOUT of the executed
+script or program. A mix of simple strings and JSON encoded data are exchanged
+with the process making support possible for pretty much any language.
+
+# Packaging Your Stuff
+
+To run a ShellBolt on a cluster, the scripts that are shelled out to must be
+in the `resources/` directory within the jar submitted to the master.
+
+However, During development or testing on a local machine, the resources
+directory just needs to be on the classpath.
+
+## The Protocol
+
+Notes:
+* Both ends of this protocol use a line-reading mechanism, so be sure to
+trim off newlines from the input and to append them to your output.
+* All JSON inputs and outputs are terminated by a single line contained "end".
+* The bullet points below are written from the perspective of the script writer's
+STDIN and STDOUT.
+
+
+* Your script will be executed by the Bolt.
+* STDIN: A string representing a path. This is a PID directory.
+Your script should create an empty file named with it's pid in this directory. e.g.
+the PID is 1234, so an empty file named 1234 is created in the directory. This
+file lets the supervisor know the PID so it can shutdown the process later on.
+* STDOUT: Your PID. This is not JSON encoded, just a string. ShellBolt will log the PID to its log.
+* STDIN: (JSON) The Storm configuration.  Various settings and properties.
+* STDIN: (JSON) The Topology context
+* The rest happens in a while(true) loop
+* STDIN: A tuple! This is a JSON encoded structure like this:
+
+```
+{
+    // The tuple's id
+	"id": -6955786537413359385,
+	// The id of the component that created this tuple
+	"comp": 1,
+	// The id of the stream this tuple was emitted to
+	"stream": 1,
+	// The id of the task that created this tuple
+	"task": 9,
+	// All the values in this tuple
+	"tuple": ["snow white and the seven dwarfs", "field2", 3]
+}
+```
+
+* STDOUT: The results of your bolt, JSON encoded. This can be a sequence of acks, fails, emits, and/or logs. Emits look like:
+
+```
+{
+	"command": "emit",
+	// The ids of the tuples this output tuples should be anchored to
+	"anchors": [1231231, -234234234],
+	// The id of the stream this tuple was emitted to. Leave this empty to emit to default stream.
+	"stream": 1,
+	// If doing an emit direct, indicate the task to sent the tuple to
+	"task": 9,
+	// All the values in this tuple
+	"tuple": ["field1", 2, 3]
+}
+```
+
+An ack looks like:
+
+```
+{
+	"command": "ack",
+	// the id of the tuple to ack
+	"id": 123123
+}
+```
+
+A fail looks like:
+
+```
+{
+	"command": "fail",
+	// the id of the tuple to fail
+	"id": 123123
+}
+```
+
+A "log" will log a message in the worker log. It looks like:
+
+```
+{
+	"command": "log",
+	// the message to log
+	"msg": "hello world!"
+
+}
+```
+
+* STDOUT: emit "sync" as a single line by itself when the bolt has finished emitting/acking/failing and is ready for the next input
+
+### sync
+
+Note: This command is not JSON encoded, it is sent as a simple string.
+
+This lets the parent bolt know that the script has finished processing and is ready for another tuple.

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Structure-of-the-codebase.md
----------------------------------------------------------------------
diff --git a/docs/Structure-of-the-codebase.md b/docs/Structure-of-the-codebase.md
new file mode 100644
index 0000000..8ac66f4
--- /dev/null
+++ b/docs/Structure-of-the-codebase.md
@@ -0,0 +1,140 @@
+---
+layout: documentation
+---
+There are three distinct layers to Storm's codebase.
+
+First, Storm was designed from the very beginning to be compatible with multiple languages. Nimbus is a Thrift service and topologies are defined as Thrift structures. The usage of Thrift allows Storm to be used from any language.
+
+Second, all of Storm's interfaces are specified as Java interfaces. So even though there's a lot of Clojure in Storm's implementation, all usage must go through the Java API. This means that every feature of Storm is always available via Java.
+
+Third, Storm's implementation is largely in Clojure. Line-wise, Storm is about half Java code, half Clojure code. But Clojure is much more expressive, so in reality the great majority of the implementation logic is in Clojure. 
+
+The following sections explain each of these layers in more detail.
+
+### storm.thrift
+
+The first place to look to understand the structure of Storm's codebase is the [storm.thrift](https://github.com/apache/incubator-storm/blob/master/storm-core/src/storm.thrift) file.
+
+Storm uses [this fork](https://github.com/nathanmarz/thrift/tree/storm) of Thrift (branch 'storm') to produce the generated code. This "fork" is actually Thrift 7 with all the Java packages renamed to be `org.apache.thrift7`. Otherwise, it's identical to Thrift 7. This fork was done because of the lack of backwards compatibility in Thrift and the need for many people to use other versions of Thrift in their Storm topologies.
+
+Every spout or bolt in a topology is given a user-specified identifier called the "component id". The component id is used to specify subscriptions from a bolt to the output streams of other spouts or bolts. A [StormTopology](https://github.com/apache/incubator-storm/blob/master/storm-core/src/storm.thrift#L91) structure contains a map from component id to component for each type of component (spouts and bolts).
+
+Spouts and bolts have the same Thrift definition, so let's just take a look at the [Thrift definition for bolts](https://github.com/apache/incubator-storm/blob/master/storm-core/src/storm.thrift#L79). It contains a `ComponentObject` struct and a `ComponentCommon` struct.
+
+The `ComponentObject` defines the implementation for the bolt. It can be one of three types:
+
+1. A serialized java object (that implements [IBolt](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/task/IBolt.java))
+2. A `ShellComponent` object that indicates the implementation is in another language. Specifying a bolt this way will cause Storm to instantiate a [ShellBolt](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/task/ShellBolt.java) object to handle the communication between the JVM-based worker process and the non-JVM-based implementation of the component.
+3. A `JavaObject` structure which tells Storm the classname and constructor arguments to use to instantiate that bolt. This is useful if you want to define a topology in a non-JVM language. This way, you can make use of JVM-based spouts and bolts without having to create and serialize a Java object yourself.
+
+`ComponentCommon` defines everything else for this component. This includes:
+
+1. What streams this component emits and the metadata for each stream (whether it's a direct stream, the fields declaration)
+2. What streams this component consumes (specified as a map from component_id:stream_id to the stream grouping to use)
+3. The parallelism for this component
+4. The component-specific [configuration](https://github.com/apache/incubator-storm/wiki/Configuration) for this component
+
+Note that the structure spouts also have a `ComponentCommon` field, and so spouts can also have declarations to consume other input streams. Yet the Storm Java API does not provide a way for spouts to consume other streams, and if you put any input declarations there for a spout you would get an error when you tried to submit the topology. The reason that spouts have an input declarations field is not for users to use, but for Storm itself to use. Storm adds implicit streams and bolts to the topology to set up the [acking framework](https://github.com/apache/incubator-storm/wiki/Guaranteeing-message-processing), and two of these implicit streams are from the acker bolt to each spout in the topology. The acker sends "ack" or "fail" messages along these streams whenever a tuple tree is detected to be completed or failed. The code that transforms the user's topology into the runtime topology is located [here](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/back
 type/storm/daemon/common.clj#L279).
+
+### Java interfaces
+
+The interfaces for Storm are generally specified as Java interfaces. The main interfaces are:
+
+1. [IRichBolt](javadocs/backtype/storm/topology/IRichBolt.html)
+2. [IRichSpout](javadocs/backtype/storm/topology/IRichSpout.html)
+3. [TopologyBuilder](javadocs/backtype/storm/topology/TopologyBuilder.html)
+
+The strategy for the majority of the interfaces is to:
+
+1. Specify the interface using a Java interface
+2. Provide a base class that provides default implementations when appropriate
+
+You can see this strategy at work with the [BaseRichSpout](javadocs/backtype/storm/topology/base/BaseRichSpout.html) class. 
+
+Spouts and bolts are serialized into the Thrift definition of the topology as described above. 
+
+One subtle aspect of the interfaces is the difference between `IBolt` and `ISpout` vs. `IRichBolt` and `IRichSpout`. The main difference between them is the addition of the `declareOutputFields` method in the "Rich" versions of the interfaces. The reason for the split is that the output fields declaration for each output stream needs to be part of the Thrift struct (so it can be specified from any language), but as a user you want to be able to declare the streams as part of your class. What `TopologyBuilder` does when constructing the Thrift representation is call `declareOutputFields` to get the declaration and convert it into the Thrift structure. The conversion happens [at this portion](https://github.com/apache/incubator-storm/blob/master/storm-core/src/jvm/backtype/storm/topology/TopologyBuilder.java#L205) of the `TopologyBuilder` code. 
+
+
+### Implementation
+
+Specifying all the functionality via Java interfaces ensures that every feature of Storm is available via Java. Moreso, the focus on Java interfaces ensures that the user experience from Java-land is pleasant as well.
+
+The implementation of Storm, on the other hand, is primarily in Clojure. While the codebase is about 50% Java and 50% Clojure in terms of LOC, most of the implementation logic is in Clojure. There are two notable exceptions to this, and that is the [DRPC](https://github.com/apache/incubator-storm/wiki/Distributed-RPC) and [transactional topologies](https://github.com/apache/incubator-storm/wiki/Transactional-topologies) implementations. These are implemented purely in Java. This was done to serve as an illustration for how to implement a higher level abstraction on Storm. The DRPC and transactional topologies implementations are in the [backtype.storm.coordination](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/coordination), [backtype.storm.drpc](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/drpc), and [backtype.storm.transactional](https://github.com/apache/incubator-storm/tree/master/storm-core/src
 /jvm/backtype/storm/transactional) packages.
+
+Here's a summary of the purpose of the main Java packages and Clojure namespace:
+
+#### Java packages
+
+[backtype.storm.coordination](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/coordination): Implements the pieces required to coordinate batch-processing on top of Storm, which both DRPC and transactional topologies use. `CoordinatedBolt` is the most important class here.
+
+[backtype.storm.drpc](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/drpc): Implementation of the DRPC higher level abstraction
+
+[backtype.storm.generated](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/generated): The generated Thrift code for Storm (generated using [this fork](https://github.com/nathanmarz/thrift) of Thrift, which simply renames the packages to org.apache.thrift7 to avoid conflicts with other Thrift versions)
+
+[backtype.storm.grouping](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/grouping): Contains interface for making custom stream groupings
+
+[backtype.storm.hooks](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/hooks): Interfaces for hooking into various events in Storm, such as when tasks emit tuples, when tuples are acked, etc. User guide for hooks is [here](https://github.com/apache/incubator-storm/wiki/Hooks).
+
+[backtype.storm.serialization](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/serialization): Implementation of how Storm serializes/deserializes tuples. Built on top of [Kryo](http://code.google.com/p/kryo/).
+
+[backtype.storm.spout](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/spout): Definition of spout and associated interfaces (like the `SpoutOutputCollector`). Also contains `ShellSpout` which implements the protocol for defining spouts in non-JVM languages.
+
+[backtype.storm.task](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/task): Definition of bolt and associated interfaces (like `OutputCollector`). Also contains `ShellBolt` which implements the protocol for defining bolts in non-JVM languages. Finally, `TopologyContext` is defined here as well, which is provided to spouts and bolts so they can get data about the topology and its execution at runtime.
+
+[backtype.storm.testing](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/testing): Contains a variety of test bolts and utilities used in Storm's unit tests.
+
+[backtype.storm.topology](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/topology): Java layer over the underlying Thrift structure to provide a clean, pure-Java API to Storm (users don't have to know about Thrift). `TopologyBuilder` is here as well as the helpful base classes for the different spouts and bolts. The slightly-higher level `IBasicBolt` interface is here, which is a simpler way to write certain kinds of bolts.
+
+[backtype.storm.transactional](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/transactional): Implementation of transactional topologies.
+
+[backtype.storm.tuple](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/tuple): Implementation of Storm's tuple data model.
+
+[backtype.storm.utils](https://github.com/apache/incubator-storm/tree/master/storm-core/src/jvm/backtype/storm/tuple): Data structures and miscellaneous utilities used throughout the codebase.
+
+
+#### Clojure namespaces
+
+[backtype.storm.bootstrap](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/bootstrap.clj): Contains a helpful macro to import all the classes and namespaces that are used throughout the codebase.
+
+[backtype.storm.clojure](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/clojure.clj): Implementation of the Clojure DSL for Storm.
+
+[backtype.storm.cluster](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/cluster.clj): All Zookeeper logic used in Storm daemons is encapsulated in this file. This code manages how cluster state (like what tasks are running where, what spout/bolt each task runs as) is mapped to the Zookeeper "filesystem" API.
+
+[backtype.storm.command.*](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/command): These namespaces implement various commands for the `storm` command line client. These implementations are very short.
+
+[backtype.storm.config](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/config.clj): Implementation of config reading/parsing code for Clojure. Also has utility functions for determining what local path nimbus/supervisor/daemons should be using for various things. e.g. the `master-inbox` function will return the local path that Nimbus should use when jars are uploaded to it.
+
+[backtype.storm.daemon.acker](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/acker.clj): Implementation of the "acker" bolt, which is a key part of how Storm guarantees data processing.
+
+[backtype.storm.daemon.common](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/common.clj): Implementation of common functions used in Storm daemons, like getting the id for a topology based on the name, mapping a user's topology into the one that actually executes (with implicit acking streams and acker bolt added - see `system-topology!` function), and definitions for the various heartbeat and other structures persisted by Storm.
+
+[backtype.storm.daemon.drpc](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/drpc.clj): Implementation of the DRPC server for use with DRPC topologies.
+
+[backtype.storm.daemon.nimbus](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/nimbus.clj): Implementation of Nimbus.
+
+[backtype.storm.daemon.supervisor](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/supervisor.clj): Implementation of Supervisor.
+
+[backtype.storm.daemon.task](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/task.clj): Implementation of an individual task for a spout or bolt. Handles message routing, serialization, stats collection for the UI, as well as the spout-specific and bolt-specific execution implementations.
+
+[backtype.storm.daemon.worker](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/daemon/worker.clj): Implementation of a worker process (which will contain many tasks within). Implements message transferring and task launching.
+
+[backtype.storm.event](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/event.clj): Implements a simple asynchronous function executor. Used in various places in Nimbus and Supervisor to make functions execute in serial to avoid any race conditions.
+
+[backtype.storm.log](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/log.clj): Defines the functions used to log messages to log4j.
+
+[backtype.storm.messaging.*](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/messaging): Defines a higher level interface to implementing point to point messaging. In local mode Storm uses in-memory Java queues to do this; on a cluster, it uses ZeroMQ. The generic interface is defined in protocol.clj.
+
+[backtype.storm.stats](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/stats.clj): Implementation of stats rollup routines used when sending stats to ZK for use by the UI. Does things like windowed and rolling aggregations at multiple granularities.
+
+[backtype.storm.testing](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/testing.clj): Implementation of facilities used to test Storm topologies. Includes time simulation, `complete-topology` for running a fixed set of tuples through a topology and capturing the output, tracker topologies for having fine grained control over detecting when a cluster is "idle", and other utilities.
+
+[backtype.storm.thrift](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/thrift.clj): Clojure wrappers around the generated Thrift API to make working with Thrift structures more pleasant.
+
+[backtype.storm.timer](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/timer.clj): Implementation of a background timer to execute functions in the future or on a recurring interval. Storm couldn't use the [Timer](http://docs.oracle.com/javase/1.4.2/docs/api/java/util/Timer.html) class because it needed integration with time simulation in order to be able to unit test Nimbus and the Supervisor.
+
+[backtype.storm.ui.*](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/ui): Implementation of Storm UI. Completely independent from rest of code base and uses the Nimbus Thrift API to get data.
+
+[backtype.storm.util](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/util.clj): Contains generic utility functions used throughout the code base.
+ 
+[backtype.storm.zookeeper](https://github.com/apache/incubator-storm/blob/master/storm-core/src/clj/backtype/storm/zookeeper.clj): Clojure wrapper around the Zookeeper API and implements some "high-level" stuff like "mkdirs" and "delete-recursive".

http://git-wip-us.apache.org/repos/asf/storm/blob/cf0cdbb0/docs/Support-for-non-java-languages.md
----------------------------------------------------------------------
diff --git a/docs/Support-for-non-java-languages.md b/docs/Support-for-non-java-languages.md
new file mode 100644
index 0000000..724d106
--- /dev/null
+++ b/docs/Support-for-non-java-languages.md
@@ -0,0 +1,7 @@
+---
+layout: documentation
+---
+* [Scala DSL](https://github.com/velvia/ScalaStorm)
+* [JRuby DSL](https://github.com/colinsurprenant/storm-jruby)
+* [Clojure DSL](Clojure-DSL.html)
+* [io-storm](https://github.com/gphat/io-storm): Perl multilang adapter