You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@distributedlog.apache.org by si...@apache.org on 2016/09/13 07:34:31 UTC

[13/23] incubator-distributedlog git commit: DL-3: Move distributedlog website to apache

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/js/bootstrap/tab.js
----------------------------------------------------------------------
diff --git a/docs/js/bootstrap/tab.js b/docs/js/bootstrap/tab.js
new file mode 100755
index 0000000..7d533e8
--- /dev/null
+++ b/docs/js/bootstrap/tab.js
@@ -0,0 +1,155 @@
+/* ========================================================================
+ * Bootstrap: tab.js v3.3.6
+ * http://getbootstrap.com/javascript/#tabs
+ * ========================================================================
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // TAB CLASS DEFINITION
+  // ====================
+
+  var Tab = function (element) {
+    // jscs:disable requireDollarBeforejQueryAssignment
+    this.element = $(element)
+    // jscs:enable requireDollarBeforejQueryAssignment
+  }
+
+  Tab.VERSION = '3.3.6'
+
+  Tab.TRANSITION_DURATION = 150
+
+  Tab.prototype.show = function () {
+    var $this    = this.element
+    var $ul      = $this.closest('ul:not(.dropdown-menu)')
+    var selector = $this.data('target')
+
+    if (!selector) {
+      selector = $this.attr('href')
+      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
+    }
+
+    if ($this.parent('li').hasClass('active')) return
+
+    var $previous = $ul.find('.active:last a')
+    var hideEvent = $.Event('hide.bs.tab', {
+      relatedTarget: $this[0]
+    })
+    var showEvent = $.Event('show.bs.tab', {
+      relatedTarget: $previous[0]
+    })
+
+    $previous.trigger(hideEvent)
+    $this.trigger(showEvent)
+
+    if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) return
+
+    var $target = $(selector)
+
+    this.activate($this.closest('li'), $ul)
+    this.activate($target, $target.parent(), function () {
+      $previous.trigger({
+        type: 'hidden.bs.tab',
+        relatedTarget: $this[0]
+      })
+      $this.trigger({
+        type: 'shown.bs.tab',
+        relatedTarget: $previous[0]
+      })
+    })
+  }
+
+  Tab.prototype.activate = function (element, container, callback) {
+    var $active    = container.find('> .active')
+    var transition = callback
+      && $.support.transition
+      && ($active.length && $active.hasClass('fade') || !!container.find('> .fade').length)
+
+    function next() {
+      $active
+        .removeClass('active')
+        .find('> .dropdown-menu > .active')
+          .removeClass('active')
+        .end()
+        .find('[data-toggle="tab"]')
+          .attr('aria-expanded', false)
+
+      element
+        .addClass('active')
+        .find('[data-toggle="tab"]')
+          .attr('aria-expanded', true)
+
+      if (transition) {
+        element[0].offsetWidth // reflow for transition
+        element.addClass('in')
+      } else {
+        element.removeClass('fade')
+      }
+
+      if (element.parent('.dropdown-menu').length) {
+        element
+          .closest('li.dropdown')
+            .addClass('active')
+          .end()
+          .find('[data-toggle="tab"]')
+            .attr('aria-expanded', true)
+      }
+
+      callback && callback()
+    }
+
+    $active.length && transition ?
+      $active
+        .one('bsTransitionEnd', next)
+        .emulateTransitionEnd(Tab.TRANSITION_DURATION) :
+      next()
+
+    $active.removeClass('in')
+  }
+
+
+  // TAB PLUGIN DEFINITION
+  // =====================
+
+  function Plugin(option) {
+    return this.each(function () {
+      var $this = $(this)
+      var data  = $this.data('bs.tab')
+
+      if (!data) $this.data('bs.tab', (data = new Tab(this)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  var old = $.fn.tab
+
+  $.fn.tab             = Plugin
+  $.fn.tab.Constructor = Tab
+
+
+  // TAB NO CONFLICT
+  // ===============
+
+  $.fn.tab.noConflict = function () {
+    $.fn.tab = old
+    return this
+  }
+
+
+  // TAB DATA-API
+  // ============
+
+  var clickHandler = function (e) {
+    e.preventDefault()
+    Plugin.call($(this), 'show')
+  }
+
+  $(document)
+    .on('click.bs.tab.data-api', '[data-toggle="tab"]', clickHandler)
+    .on('click.bs.tab.data-api', '[data-toggle="pill"]', clickHandler)
+
+}(jQuery);

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/js/bootstrap/tooltip.js
----------------------------------------------------------------------
diff --git a/docs/js/bootstrap/tooltip.js b/docs/js/bootstrap/tooltip.js
new file mode 100755
index 0000000..7094b34
--- /dev/null
+++ b/docs/js/bootstrap/tooltip.js
@@ -0,0 +1,514 @@
+/* ========================================================================
+ * Bootstrap: tooltip.js v3.3.6
+ * http://getbootstrap.com/javascript/#tooltip
+ * Inspired by the original jQuery.tipsy by Jason Frame
+ * ========================================================================
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // TOOLTIP PUBLIC CLASS DEFINITION
+  // ===============================
+
+  var Tooltip = function (element, options) {
+    this.type       = null
+    this.options    = null
+    this.enabled    = null
+    this.timeout    = null
+    this.hoverState = null
+    this.$element   = null
+    this.inState    = null
+
+    this.init('tooltip', element, options)
+  }
+
+  Tooltip.VERSION  = '3.3.6'
+
+  Tooltip.TRANSITION_DURATION = 150
+
+  Tooltip.DEFAULTS = {
+    animation: true,
+    placement: 'top',
+    selector: false,
+    template: '<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',
+    trigger: 'hover focus',
+    title: '',
+    delay: 0,
+    html: false,
+    container: false,
+    viewport: {
+      selector: 'body',
+      padding: 0
+    }
+  }
+
+  Tooltip.prototype.init = function (type, element, options) {
+    this.enabled   = true
+    this.type      = type
+    this.$element  = $(element)
+    this.options   = this.getOptions(options)
+    this.$viewport = this.options.viewport && $($.isFunction(this.options.viewport) ? this.options.viewport.call(this, this.$element) : (this.options.viewport.selector || this.options.viewport))
+    this.inState   = { click: false, hover: false, focus: false }
+
+    if (this.$element[0] instanceof document.constructor && !this.options.selector) {
+      throw new Error('`selector` option must be specified when initializing ' + this.type + ' on the window.document object!')
+    }
+
+    var triggers = this.options.trigger.split(' ')
+
+    for (var i = triggers.length; i--;) {
+      var trigger = triggers[i]
+
+      if (trigger == 'click') {
+        this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
+      } else if (trigger != 'manual') {
+        var eventIn  = trigger == 'hover' ? 'mouseenter' : 'focusin'
+        var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'
+
+        this.$element.on(eventIn  + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
+        this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
+      }
+    }
+
+    this.options.selector ?
+      (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
+      this.fixTitle()
+  }
+
+  Tooltip.prototype.getDefaults = function () {
+    return Tooltip.DEFAULTS
+  }
+
+  Tooltip.prototype.getOptions = function (options) {
+    options = $.extend({}, this.getDefaults(), this.$element.data(), options)
+
+    if (options.delay && typeof options.delay == 'number') {
+      options.delay = {
+        show: options.delay,
+        hide: options.delay
+      }
+    }
+
+    return options
+  }
+
+  Tooltip.prototype.getDelegateOptions = function () {
+    var options  = {}
+    var defaults = this.getDefaults()
+
+    this._options && $.each(this._options, function (key, value) {
+      if (defaults[key] != value) options[key] = value
+    })
+
+    return options
+  }
+
+  Tooltip.prototype.enter = function (obj) {
+    var self = obj instanceof this.constructor ?
+      obj : $(obj.currentTarget).data('bs.' + this.type)
+
+    if (!self) {
+      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
+      $(obj.currentTarget).data('bs.' + this.type, self)
+    }
+
+    if (obj instanceof $.Event) {
+      self.inState[obj.type == 'focusin' ? 'focus' : 'hover'] = true
+    }
+
+    if (self.tip().hasClass('in') || self.hoverState == 'in') {
+      self.hoverState = 'in'
+      return
+    }
+
+    clearTimeout(self.timeout)
+
+    self.hoverState = 'in'
+
+    if (!self.options.delay || !self.options.delay.show) return self.show()
+
+    self.timeout = setTimeout(function () {
+      if (self.hoverState == 'in') self.show()
+    }, self.options.delay.show)
+  }
+
+  Tooltip.prototype.isInStateTrue = function () {
+    for (var key in this.inState) {
+      if (this.inState[key]) return true
+    }
+
+    return false
+  }
+
+  Tooltip.prototype.leave = function (obj) {
+    var self = obj instanceof this.constructor ?
+      obj : $(obj.currentTarget).data('bs.' + this.type)
+
+    if (!self) {
+      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
+      $(obj.currentTarget).data('bs.' + this.type, self)
+    }
+
+    if (obj instanceof $.Event) {
+      self.inState[obj.type == 'focusout' ? 'focus' : 'hover'] = false
+    }
+
+    if (self.isInStateTrue()) return
+
+    clearTimeout(self.timeout)
+
+    self.hoverState = 'out'
+
+    if (!self.options.delay || !self.options.delay.hide) return self.hide()
+
+    self.timeout = setTimeout(function () {
+      if (self.hoverState == 'out') self.hide()
+    }, self.options.delay.hide)
+  }
+
+  Tooltip.prototype.show = function () {
+    var e = $.Event('show.bs.' + this.type)
+
+    if (this.hasContent() && this.enabled) {
+      this.$element.trigger(e)
+
+      var inDom = $.contains(this.$element[0].ownerDocument.documentElement, this.$element[0])
+      if (e.isDefaultPrevented() || !inDom) return
+      var that = this
+
+      var $tip = this.tip()
+
+      var tipId = this.getUID(this.type)
+
+      this.setContent()
+      $tip.attr('id', tipId)
+      this.$element.attr('aria-describedby', tipId)
+
+      if (this.options.animation) $tip.addClass('fade')
+
+      var placement = typeof this.options.placement == 'function' ?
+        this.options.placement.call(this, $tip[0], this.$element[0]) :
+        this.options.placement
+
+      var autoToken = /\s?auto?\s?/i
+      var autoPlace = autoToken.test(placement)
+      if (autoPlace) placement = placement.replace(autoToken, '') || 'top'
+
+      $tip
+        .detach()
+        .css({ top: 0, left: 0, display: 'block' })
+        .addClass(placement)
+        .data('bs.' + this.type, this)
+
+      this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)
+      this.$element.trigger('inserted.bs.' + this.type)
+
+      var pos          = this.getPosition()
+      var actualWidth  = $tip[0].offsetWidth
+      var actualHeight = $tip[0].offsetHeight
+
+      if (autoPlace) {
+        var orgPlacement = placement
+        var viewportDim = this.getPosition(this.$viewport)
+
+        placement = placement == 'bottom' && pos.bottom + actualHeight > viewportDim.bottom ? 'top'    :
+                    placement == 'top'    && pos.top    - actualHeight < viewportDim.top    ? 'bottom' :
+                    placement == 'right'  && pos.right  + actualWidth  > viewportDim.width  ? 'left'   :
+                    placement == 'left'   && pos.left   - actualWidth  < viewportDim.left   ? 'right'  :
+                    placement
+
+        $tip
+          .removeClass(orgPlacement)
+          .addClass(placement)
+      }
+
+      var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)
+
+      this.applyPlacement(calculatedOffset, placement)
+
+      var complete = function () {
+        var prevHoverState = that.hoverState
+        that.$element.trigger('shown.bs.' + that.type)
+        that.hoverState = null
+
+        if (prevHoverState == 'out') that.leave(that)
+      }
+
+      $.support.transition && this.$tip.hasClass('fade') ?
+        $tip
+          .one('bsTransitionEnd', complete)
+          .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :
+        complete()
+    }
+  }
+
+  Tooltip.prototype.applyPlacement = function (offset, placement) {
+    var $tip   = this.tip()
+    var width  = $tip[0].offsetWidth
+    var height = $tip[0].offsetHeight
+
+    // manually read margins because getBoundingClientRect includes difference
+    var marginTop = parseInt($tip.css('margin-top'), 10)
+    var marginLeft = parseInt($tip.css('margin-left'), 10)
+
+    // we must check for NaN for ie 8/9
+    if (isNaN(marginTop))  marginTop  = 0
+    if (isNaN(marginLeft)) marginLeft = 0
+
+    offset.top  += marginTop
+    offset.left += marginLeft
+
+    // $.fn.offset doesn't round pixel values
+    // so we use setOffset directly with our own function B-0
+    $.offset.setOffset($tip[0], $.extend({
+      using: function (props) {
+        $tip.css({
+          top: Math.round(props.top),
+          left: Math.round(props.left)
+        })
+      }
+    }, offset), 0)
+
+    $tip.addClass('in')
+
+    // check to see if placing tip in new offset caused the tip to resize itself
+    var actualWidth  = $tip[0].offsetWidth
+    var actualHeight = $tip[0].offsetHeight
+
+    if (placement == 'top' && actualHeight != height) {
+      offset.top = offset.top + height - actualHeight
+    }
+
+    var delta = this.getViewportAdjustedDelta(placement, offset, actualWidth, actualHeight)
+
+    if (delta.left) offset.left += delta.left
+    else offset.top += delta.top
+
+    var isVertical          = /top|bottom/.test(placement)
+    var arrowDelta          = isVertical ? delta.left * 2 - width + actualWidth : delta.top * 2 - height + actualHeight
+    var arrowOffsetPosition = isVertical ? 'offsetWidth' : 'offsetHeight'
+
+    $tip.offset(offset)
+    this.replaceArrow(arrowDelta, $tip[0][arrowOffsetPosition], isVertical)
+  }
+
+  Tooltip.prototype.replaceArrow = function (delta, dimension, isVertical) {
+    this.arrow()
+      .css(isVertical ? 'left' : 'top', 50 * (1 - delta / dimension) + '%')
+      .css(isVertical ? 'top' : 'left', '')
+  }
+
+  Tooltip.prototype.setContent = function () {
+    var $tip  = this.tip()
+    var title = this.getTitle()
+
+    $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)
+    $tip.removeClass('fade in top bottom left right')
+  }
+
+  Tooltip.prototype.hide = function (callback) {
+    var that = this
+    var $tip = $(this.$tip)
+    var e    = $.Event('hide.bs.' + this.type)
+
+    function complete() {
+      if (that.hoverState != 'in') $tip.detach()
+      that.$element
+        .removeAttr('aria-describedby')
+        .trigger('hidden.bs.' + that.type)
+      callback && callback()
+    }
+
+    this.$element.trigger(e)
+
+    if (e.isDefaultPrevented()) return
+
+    $tip.removeClass('in')
+
+    $.support.transition && $tip.hasClass('fade') ?
+      $tip
+        .one('bsTransitionEnd', complete)
+        .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :
+      complete()
+
+    this.hoverState = null
+
+    return this
+  }
+
+  Tooltip.prototype.fixTitle = function () {
+    var $e = this.$element
+    if ($e.attr('title') || typeof $e.attr('data-original-title') != 'string') {
+      $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
+    }
+  }
+
+  Tooltip.prototype.hasContent = function () {
+    return this.getTitle()
+  }
+
+  Tooltip.prototype.getPosition = function ($element) {
+    $element   = $element || this.$element
+
+    var el     = $element[0]
+    var isBody = el.tagName == 'BODY'
+
+    var elRect    = el.getBoundingClientRect()
+    if (elRect.width == null) {
+      // width and height are missing in IE8, so compute them manually; see https://github.com/twbs/bootstrap/issues/14093
+      elRect = $.extend({}, elRect, { width: elRect.right - elRect.left, height: elRect.bottom - elRect.top })
+    }
+    var elOffset  = isBody ? { top: 0, left: 0 } : $element.offset()
+    var scroll    = { scroll: isBody ? document.documentElement.scrollTop || document.body.scrollTop : $element.scrollTop() }
+    var outerDims = isBody ? { width: $(window).width(), height: $(window).height() } : null
+
+    return $.extend({}, elRect, scroll, outerDims, elOffset)
+  }
+
+  Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {
+    return placement == 'bottom' ? { top: pos.top + pos.height,   left: pos.left + pos.width / 2 - actualWidth / 2 } :
+           placement == 'top'    ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2 } :
+           placement == 'left'   ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :
+        /* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width }
+
+  }
+
+  Tooltip.prototype.getViewportAdjustedDelta = function (placement, pos, actualWidth, actualHeight) {
+    var delta = { top: 0, left: 0 }
+    if (!this.$viewport) return delta
+
+    var viewportPadding = this.options.viewport && this.options.viewport.padding || 0
+    var viewportDimensions = this.getPosition(this.$viewport)
+
+    if (/right|left/.test(placement)) {
+      var topEdgeOffset    = pos.top - viewportPadding - viewportDimensions.scroll
+      var bottomEdgeOffset = pos.top + viewportPadding - viewportDimensions.scroll + actualHeight
+      if (topEdgeOffset < viewportDimensions.top) { // top overflow
+        delta.top = viewportDimensions.top - topEdgeOffset
+      } else if (bottomEdgeOffset > viewportDimensions.top + viewportDimensions.height) { // bottom overflow
+        delta.top = viewportDimensions.top + viewportDimensions.height - bottomEdgeOffset
+      }
+    } else {
+      var leftEdgeOffset  = pos.left - viewportPadding
+      var rightEdgeOffset = pos.left + viewportPadding + actualWidth
+      if (leftEdgeOffset < viewportDimensions.left) { // left overflow
+        delta.left = viewportDimensions.left - leftEdgeOffset
+      } else if (rightEdgeOffset > viewportDimensions.right) { // right overflow
+        delta.left = viewportDimensions.left + viewportDimensions.width - rightEdgeOffset
+      }
+    }
+
+    return delta
+  }
+
+  Tooltip.prototype.getTitle = function () {
+    var title
+    var $e = this.$element
+    var o  = this.options
+
+    title = $e.attr('data-original-title')
+      || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)
+
+    return title
+  }
+
+  Tooltip.prototype.getUID = function (prefix) {
+    do prefix += ~~(Math.random() * 1000000)
+    while (document.getElementById(prefix))
+    return prefix
+  }
+
+  Tooltip.prototype.tip = function () {
+    if (!this.$tip) {
+      this.$tip = $(this.options.template)
+      if (this.$tip.length != 1) {
+        throw new Error(this.type + ' `template` option must consist of exactly 1 top-level element!')
+      }
+    }
+    return this.$tip
+  }
+
+  Tooltip.prototype.arrow = function () {
+    return (this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow'))
+  }
+
+  Tooltip.prototype.enable = function () {
+    this.enabled = true
+  }
+
+  Tooltip.prototype.disable = function () {
+    this.enabled = false
+  }
+
+  Tooltip.prototype.toggleEnabled = function () {
+    this.enabled = !this.enabled
+  }
+
+  Tooltip.prototype.toggle = function (e) {
+    var self = this
+    if (e) {
+      self = $(e.currentTarget).data('bs.' + this.type)
+      if (!self) {
+        self = new this.constructor(e.currentTarget, this.getDelegateOptions())
+        $(e.currentTarget).data('bs.' + this.type, self)
+      }
+    }
+
+    if (e) {
+      self.inState.click = !self.inState.click
+      if (self.isInStateTrue()) self.enter(self)
+      else self.leave(self)
+    } else {
+      self.tip().hasClass('in') ? self.leave(self) : self.enter(self)
+    }
+  }
+
+  Tooltip.prototype.destroy = function () {
+    var that = this
+    clearTimeout(this.timeout)
+    this.hide(function () {
+      that.$element.off('.' + that.type).removeData('bs.' + that.type)
+      if (that.$tip) {
+        that.$tip.detach()
+      }
+      that.$tip = null
+      that.$arrow = null
+      that.$viewport = null
+    })
+  }
+
+
+  // TOOLTIP PLUGIN DEFINITION
+  // =========================
+
+  function Plugin(option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.tooltip')
+      var options = typeof option == 'object' && option
+
+      if (!data && /destroy|hide/.test(option)) return
+      if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  var old = $.fn.tooltip
+
+  $.fn.tooltip             = Plugin
+  $.fn.tooltip.Constructor = Tooltip
+
+
+  // TOOLTIP NO CONFLICT
+  // ===================
+
+  $.fn.tooltip.noConflict = function () {
+    $.fn.tooltip = old
+    return this
+  }
+
+}(jQuery);

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/js/bootstrap/transition.js
----------------------------------------------------------------------
diff --git a/docs/js/bootstrap/transition.js b/docs/js/bootstrap/transition.js
new file mode 100755
index 0000000..fae36ed
--- /dev/null
+++ b/docs/js/bootstrap/transition.js
@@ -0,0 +1,59 @@
+/* ========================================================================
+ * Bootstrap: transition.js v3.3.6
+ * http://getbootstrap.com/javascript/#transitions
+ * ========================================================================
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)
+  // ============================================================
+
+  function transitionEnd() {
+    var el = document.createElement('bootstrap')
+
+    var transEndEventNames = {
+      WebkitTransition : 'webkitTransitionEnd',
+      MozTransition    : 'transitionend',
+      OTransition      : 'oTransitionEnd otransitionend',
+      transition       : 'transitionend'
+    }
+
+    for (var name in transEndEventNames) {
+      if (el.style[name] !== undefined) {
+        return { end: transEndEventNames[name] }
+      }
+    }
+
+    return false // explicit for ie8 (  ._.)
+  }
+
+  // http://blog.alexmaccaw.com/css-transitions
+  $.fn.emulateTransitionEnd = function (duration) {
+    var called = false
+    var $el = this
+    $(this).one('bsTransitionEnd', function () { called = true })
+    var callback = function () { if (!called) $($el).trigger($.support.transition.end) }
+    setTimeout(callback, duration)
+    return this
+  }
+
+  $(function () {
+    $.support.transition = transitionEnd()
+
+    if (!$.support.transition) return
+
+    $.event.special.bsTransitionEnd = {
+      bindType: $.support.transition.end,
+      delegateType: $.support.transition.end,
+      handle: function (e) {
+        if ($(e.target).is(this)) return e.handleObj.handler.apply(this, arguments)
+      }
+    }
+  })
+
+}(jQuery);

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/bookkeeper.rst
----------------------------------------------------------------------
diff --git a/docs/operations/bookkeeper.rst b/docs/operations/bookkeeper.rst
deleted file mode 100644
index 5a35ba9..0000000
--- a/docs/operations/bookkeeper.rst
+++ /dev/null
@@ -1,193 +0,0 @@
-BookKeeper
-==========
-
-For reliable BookKeeper service, you should deploy BookKeeper in a cluster.
-
-Run from bookkeeper source
---------------------------
-
-The version of BookKeeper that DistributedLog depends on is not the official opensource version.
-It is twitter's production version `4.3.4-TWTTR`, which is available in `https://github.com/twitter/bookkeeper`. 
-We are working actively with BookKeeper community to merge all twitter's changes back to the community.
-
-The major changes in Twitter's bookkeeper includes:
-
-- BOOKKEEPER-670_: Long poll reads and LastAddConfirmed piggyback. It is to reduce the tailing read latency.
-- BOOKKEEPER-759_: Delay ensemble change if it doesn't break ack quorum constraint. It is to reduce the write latency on bookie failures.
-- BOOKKEEPER-757_: Ledger recovery improvements, to reduce the latency on ledger recovery.
-- Misc improvements on bookie recovery and bookie storage.
-
-.. _BOOKKEEPER-670: https://issues.apache.org/jira/browse/BOOKKEEPER-670
-.. _BOOKKEEPER-759: https://issues.apache.org/jira/browse/BOOKKEEPER-759
-.. _BOOKKEEPER-757: https://issues.apache.org/jira/browse/BOOKKEEPER-757
-
-To build bookkeeper, run:
-
-1. First checkout the bookkeeper source code from twitter's branch.
-
-.. code-block:: bash
-
-    $ git clone https://github.com/twitter/bookkeeper.git bookkeeper   
-
-
-2. Build the bookkeeper package:
-
-.. code-block:: bash
-
-    $ cd bookkeeper 
-    $ mvn clean package assembly:single -DskipTests
-
-However, since `bookkeeper-server` is one of the dependency of `distributedlog-service`.
-You could simply run bookkeeper using same set of scripts provided in `distributedlog-service`.
-In the following sections, we will describe how to run bookkeeper using the scripts provided in
-`distributedlog-service`.
-
-Run from distributedlog source
-------------------------------
-
-Build
-+++++
-
-First of all, build DistributedLog:
-
-.. code-block:: bash
-
-    $ mvn clean install -DskipTests
-
-
-Configuration
-+++++++++++++
-
-The configuration file `bookie.conf` under `distributedlog-service/conf` is a template of production
-configuration to run a bookie node. Most of the configuration settings are good for production usage.
-You might need to configure following settings according to your environment and hardware platform.
-
-Port
-^^^^
-
-By default, the service port is `3181`, where the bookie server listens on. You can change the port
-to whatever port you like by modifying the following setting.
-
-::
-
-    bookiePort=3181
-
-
-Disks
-^^^^^
-
-You need to configure following settings according to the disk layout of your hardware. It is recommended
-to put `journalDirectory` under a separated disk from others for performance. It is okay to set
-`indexDirectories` to be same as `ledgerDirectories`. However, it is recommended to put `indexDirectories`
-to a SSD driver for better performance.
-
-::
-    
-    # Directory Bookkeeper outputs its write ahead log
-    journalDirectory=/tmp/data/bk/journal
-
-    # Directory Bookkeeper outputs ledger snapshots
-    ledgerDirectories=/tmp/data/bk/ledgers
-
-    # Directory in which index files will be stored.
-    indexDirectories=/tmp/data/bk/ledgers
-
-
-To better understand how bookie nodes work, please check bookkeeper_ website for more details.
-
-ZooKeeper
-^^^^^^^^^
-
-You need to configure following settings to point the bookie to the zookeeper server that it is using.
-You need to make sure `zkLedgersRootPath` exists before starting the bookies.
-
-::
-   
-    # Root zookeeper path to store ledger metadata
-    # This parameter is used by zookeeper-based ledger manager as a root znode to
-    # store all ledgers.
-    zkLedgersRootPath=/messaging/bookkeeper/ledgers
-    # A list of one of more servers on which zookeeper is running.
-    zkServers=localhost:2181
-
-
-Stats Provider
-^^^^^^^^^^^^^^
-
-Bookies use `StatsProvider` to expose its metrics. The `StatsProvider` is a pluggable library to
-adopt to various stats collecting systems. Please check :doc:`monitoring` for more details.
-
-::
-    
-    # stats provide - use `codahale` metrics library
-    statsProviderClass=org.apache.bookkeeper.stats.CodahaleMetricsServletProvider
-
-    ### Following settings are stats provider related settings
-
-    # Exporting codahale stats in http port `9001`
-    codahaleStatsHttpPort=9001
-
-
-Index Settings
-^^^^^^^^^^^^^^
-
-- `pageSize`: size of a index page in ledger cache, in bytes. If there are large number
-  of ledgers and each ledger has fewer entries, smaller index page would improve memory usage.
-- `pageLimit`: The maximum number of index pages in ledger cache. If nummber of index pages
-  reaches the limitation, bookie server starts to swap some ledgers from memory to disk.
-  Increase this value when swap becomes more frequent. But make sure `pageLimit*pageSize`
-  should not be more than JVM max memory limitation.
-
-
-Journal Settings
-^^^^^^^^^^^^^^^^
-
-- `journalMaxGroupWaitMSec`: The maximum wait time for group commit. It is valid only when
-  `journalFlushWhenQueueEmpty` is false.
-- `journalFlushWhenQueueEmpty`: Flag indicates whether to flush/sync journal. If it is `true`,
-  bookie server will sync journal when there is no other writes in the journal queue.
-- `journalBufferedWritesThreshold`: The maximum buffered writes for group commit, in bytes.
-  It is valid only when `journalFlushWhenQueueEmpty` is false.
-- `journalBufferedEntriesThreshold`: The maximum buffered writes for group commit, in entries.
-  It is valid only when `journalFlushWhenQueueEmpty` is false.
-
-Setting `journalFlushWhenQueueEmpty` to `true` will produce low latency when the traffic is low.
-However, the latency varies a lost when the traffic is increased. So it is recommended to set
-`journalMaxGroupWaitMSec`, `journalBufferedEntriesThreshold` and `journalBufferedWritesThreshold`
-to reduce the number of fsyncs made to journal disk, to achieve sustained low latency.
-
-Thread Settings
-^^^^^^^^^^^^^^^
-
-It is recommended to configure following settings to align with the cpu cores of the hardware.
-
-::
-    
-    numAddWorkerThreads=4
-    numJournalCallbackThreads=4
-    numReadWorkerThreads=4
-    numLongPollWorkerThreads=4
-
-Run 
-+++
-
-As `bookkeeper-server` is shipped as part of `distributedlog-service`, you could use the `dlog-daemon.sh`
-script to start `bookie` as daemon thread.
-
-Start the bookie:
-
-.. code-block:: bash
-
-    $ ./distributedlog-service/bin/dlog-daemon.sh start bookie --conf /path/to/bookie/conf
-
-
-Stop the bookie:
-
-.. code-block:: bash
-
-    $ ./distributedlog-service/bin/dlog-daemon.sh stop bookie
-
-
-Please check bookkeeper_ website for more details.
-
-.. _bookkeeper: http://bookkeeper.apache.org/

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/deployment.rst
----------------------------------------------------------------------
diff --git a/docs/operations/deployment.rst b/docs/operations/deployment.rst
deleted file mode 100644
index 41240db..0000000
--- a/docs/operations/deployment.rst
+++ /dev/null
@@ -1,533 +0,0 @@
-Cluster Setup & Deployment
-==========================
-
-This section describes how to run DistributedLog in `distributed` mode.
-To run a cluster with DistributedLog, you need a Zookeeper cluster and a Bookkeeper cluster.
-
-Build
------
-
-To build DistributedLog, run:
-
-.. code-block:: bash
-
-   mvn clean install -DskipTests
-
-
-Or run `./scripts/snapshot` to build the release packages from current source. The released
-packages contain the binaries for running `distributedlog-service`, `distributedlog-benchmark`
-and `distributedlog-tutorials`.
-
-NOTE: we run following instructions from distributedlog source code after running `mvn clean install`.
-And assume `DL_HOME` is the directory of distributedlog source.
-
-Zookeeper
----------
-
-(If you already have a zookeeper cluster running, you could skip this section.)
-
-We could use the `dlog-daemon.sh` and the `zookeeper.conf.template` to demonstrate run a 1-node
-zookeeper ensemble locally.
-
-Create a `zookeeper.conf` from the `zookeeper.conf.template`.
-
-.. code-block:: bash
-
-    $ cp distributedlog-service/conf/zookeeper.conf.template distributedlog-service/conf/zookeeper.conf
-
-Configure the settings in `zookeeper.conf`. By default, it will use `/tmp/data/zookeeper` for storing
-the zookeeper data. Let's create the data directories for zookeeper.
-
-.. code-block:: bash
-
-    $ mkdir -p /tmp/data/zookeeper/txlog
-
-Once the data directory is created, we need to assign `myid` for this zookeeper node.
-
-.. code-block:: bash
-
-    $ echo "1" > /tmp/data/zookeeper/myid
-
-Start the zookeeper daemon using `dlog-daemon.sh`.
-
-.. code-block:: bash
-
-    $ ./distributedlog-service/bin/dlog-daemon.sh start zookeeper ${DL_HOME}/distributedlog-service/conf/zookeeper.conf
-
-You could verify the zookeeper setup using `zkshell`.
-
-.. code-block:: bash
-
-    // ./distributedlog-service/bin/dlog zkshell ${zkservers}
-    $ ./distributedlog-service/bin/dlog zkshell localhost:2181
-    Connecting to localhost:2181
-    Welcome to ZooKeeper!
-    JLine support is enabled
-
-    WATCHER::
-
-    WatchedEvent state:SyncConnected type:None path:null
-    [zk: localhost:2181(CONNECTED) 0] ls /
-    [zookeeper]
-    [zk: localhost:2181(CONNECTED) 1]
-
-Please refer to the :doc:`zookeeper` for more details on setting up zookeeper cluster.
-
-Bookkeeper
-----------
-
-(If you already have a bookkeeper cluster running, you could skip this section.)
-
-We could use the `dlog-daemon.sh` and the `bookie.conf.template` to demonstrate run a 3-nodes
-bookkeeper cluster locally.
-
-Create a `bookie.conf` from the `bookie.conf.template`. Since we are going to run a 3-nodes
-bookkeeper cluster locally. Let's make three copies of `bookie.conf.template`.
-
-.. code-block:: bash
-
-    $ cp distributedlog-service/conf/bookie.conf.template distributedlog-service/conf/bookie-1.conf
-    $ cp distributedlog-service/conf/bookie.conf.template distributedlog-service/conf/bookie-2.conf
-    $ cp distributedlog-service/conf/bookie.conf.template distributedlog-service/conf/bookie-3.conf
-
-Configure the settings in the bookie configuraiont files.
-
-First of all, choose the zookeeper cluster that the bookies will use and set `zkServers` in
-the configuration files.
-
-::
-    
-    zkServers=localhost:2181
-
-Choose the zookeeper path to store bookkeeper metadata and set `zkLedgersRootPath` in the configuration
-files. Let's use `/messaging/bookkeeper/ledgers` in this instruction.
-
-::
-
-    zkLedgersRootPath=/messaging/bookkeeper/ledgers
-
-
-Format bookkeeper metadata
-++++++++++++++++++++++++++
-
-(NOTE: only format bookkeeper metadata when first time setting up the bookkeeper cluster.)
-
-The bookkeeper shell doesn't automatically create the `zkLedgersRootPath` when running `metaformat`.
-So using `zkshell` to create the `zkLedgersRootPath`.
-
-::
-
-    $ ./distributedlog-service/bin/dlog zkshell localhost:2181
-    Connecting to localhost:2181
-    Welcome to ZooKeeper!
-    JLine support is enabled
-
-    WATCHER::
-
-    WatchedEvent state:SyncConnected type:None path:null
-    [zk: localhost:2181(CONNECTED) 0] create /messaging ''
-    Created /messaging
-    [zk: localhost:2181(CONNECTED) 1] create /messaging/bookkeeper ''
-    Created /messaging/bookkeeper
-    [zk: localhost:2181(CONNECTED) 2] create /messaging/bookkeeper/ledgers ''
-    Created /messaging/bookkeeper/ledgers
-    [zk: localhost:2181(CONNECTED) 3]
-
-
-If the `zkLedgersRootPath`, run `metaformat` to format the bookkeeper metadata.
-
-::
-    
-    $ BOOKIE_CONF=${DL_HOME}/distributedlog-service/conf/bookie-1.conf ./distributedlog-service/bin/dlog bkshell metaformat
-    Are you sure to format bookkeeper metadata ? (Y or N) Y
-
-Add Bookies
-+++++++++++
-
-Once the bookkeeper metadata is formatted, it is ready to add bookie nodes to the cluster.
-
-Configure Ports
-^^^^^^^^^^^^^^^
-
-Configure the ports that used by bookies.
-
-bookie-1:
-
-::
-   
-    # Port that bookie server listen on
-    bookiePort=3181
-    # Exporting codahale stats
-    185 codahaleStatsHttpPort=9001
-
-bookie-2:
-
-::
-   
-    # Port that bookie server listen on
-    bookiePort=3182
-    # Exporting codahale stats
-    185 codahaleStatsHttpPort=9002
-
-bookie-3:
-
-::
-   
-    # Port that bookie server listen on
-    bookiePort=3183
-    # Exporting codahale stats
-    185 codahaleStatsHttpPort=9003
-
-Configure Disk Layout
-^^^^^^^^^^^^^^^^^^^^^
-
-Configure the disk directories used by a bookie server by setting following options.
-
-::
-    
-    # Directory Bookkeeper outputs its write ahead log
-    journalDirectory=/tmp/data/bk/journal
-    # Directory Bookkeeper outputs ledger snapshots
-    ledgerDirectories=/tmp/data/bk/ledgers
-    # Directory in which index files will be stored.
-    indexDirectories=/tmp/data/bk/ledgers
-
-As we are configuring a 3-nodes bookkeeper cluster, we modify the following settings as below:
-
-bookie-1:
-
-::
-    
-    # Directory Bookkeeper outputs its write ahead log
-    journalDirectory=/tmp/data/bk-1/journal
-    # Directory Bookkeeper outputs ledger snapshots
-    ledgerDirectories=/tmp/data/bk-1/ledgers
-    # Directory in which index files will be stored.
-    indexDirectories=/tmp/data/bk-1/ledgers
-
-bookie-2:
-
-::
-    
-    # Directory Bookkeeper outputs its write ahead log
-    journalDirectory=/tmp/data/bk-2/journal
-    # Directory Bookkeeper outputs ledger snapshots
-    ledgerDirectories=/tmp/data/bk-2/ledgers
-    # Directory in which index files will be stored.
-    indexDirectories=/tmp/data/bk-2/ledgers
-
-bookie-3:
-
-::
-    
-    # Directory Bookkeeper outputs its write ahead log
-    journalDirectory=/tmp/data/bk-3/journal
-    # Directory Bookkeeper outputs ledger snapshots
-    ledgerDirectories=/tmp/data/bk-3/ledgers
-    # Directory in which index files will be stored.
-    indexDirectories=/tmp/data/bk-3/ledgers
-
-Format bookie
-^^^^^^^^^^^^^
-
-Once the disk directories are configured correctly in the configuration file, use
-`bkshell bookieformat` to format the bookie.
-
-::
-    
-    BOOKIE_CONF=${DL_HOME}/distributedlog-service/conf/bookie-1.conf ./distributedlog-service/bin/dlog bkshell bookieformat
-    BOOKIE_CONF=${DL_HOME}/distributedlog-service/conf/bookie-2.conf ./distributedlog-service/bin/dlog bkshell bookieformat
-    BOOKIE_CONF=${DL_HOME}/distributedlog-service/conf/bookie-3.conf ./distributedlog-service/bin/dlog bkshell bookieformat
-
-
-Start bookie
-^^^^^^^^^^^^
-
-Start the bookie using `dlog-daemon.sh`.
-
-::
-    
-    SERVICE_PORT=3181 ./distributedlog-service/bin/dlog-daemon.sh start bookie --conf ${DL_HOME}/distributedlog-service/conf/bookie-1.conf
-    SERVICE_PORT=3182 ./distributedlog-service/bin/dlog-daemon.sh start bookie --conf ${DL_HOME}/distributedlog-service/conf/bookie-2.conf
-    SERVICE_PORT=3183 ./distributedlog-service/bin/dlog-daemon.sh start bookie --conf ${DL_HOME}/distributedlog-service/conf/bookie-3.conf
-    
-Verify whether the bookie is setup correctly. You could simply check whether the bookie is showed up in
-zookeeper `zkLedgersRootPath`/available znode.
-
-::
-    
-    $ ./distributedlog-service/bin/dlog zkshell localhost:2181
-    Connecting to localhost:2181
-    Welcome to ZooKeeper!
-    JLine support is enabled
-
-    WATCHER::
-
-    WatchedEvent state:SyncConnected type:None path:null
-    [zk: localhost:2181(CONNECTED) 0] ls /messaging/bookkeeper/ledgers/available
-    [127.0.0.1:3181, 127.0.0.1:3182, 127.0.0.1:3183, readonly]
-    [zk: localhost:2181(CONNECTED) 1]
-
-
-Or check if the bookie is exposing the stats at port `codahaleStatsHttpPort`.
-
-::
-    
-    // ping the service
-    $ curl localhost:9001/ping
-    pong
-    // checking the stats
-    curl localhost:9001/metrics?pretty=true
-
-Stop bookie
-^^^^^^^^^^^
-
-Stop the bookie using `dlog-daemon.sh`.
-
-::
-    
-    $ ./distributedlog-service/bin/dlog-daemon.sh stop bookie
-    // Example:
-    $ SERVICE_PORT=3181 ./distributedlog-service/bin/dlog-daemon.sh stop bookie
-    doing stop bookie ...
-    stopping bookie
-    Shutdown is in progress... Please wait...
-    Shutdown completed.
-
-Turn bookie to readonly
-^^^^^^^^^^^^^^^^^^^^^^^
-
-Start the bookie in `readonly` mode.
-
-::
-    
-    $ SERVICE_PORT=3181 ./distributedlog-service/bin/dlog-daemon.sh start bookie --conf ${DL_HOME}/distributedlog-service/conf/bookie-1.conf --readonly
-
-Verify if the bookie is running in `readonly` mode.
-
-::
-    
-    $ ./distributedlog-service/bin/dlog zkshell localhost:2181
-    Connecting to localhost:2181
-    Welcome to ZooKeeper!
-    JLine support is enabled
-
-    WATCHER::
-
-    WatchedEvent state:SyncConnected type:None path:null
-    [zk: localhost:2181(CONNECTED) 0] ls /messaging/bookkeeper/ledgers/available
-    [127.0.0.1:3182, 127.0.0.1:3183, readonly]
-    [zk: localhost:2181(CONNECTED) 1] ls /messaging/bookkeeper/ledgers/available/readonly
-    [127.0.0.1:3181]
-    [zk: localhost:2181(CONNECTED) 2]
-
-Please refer to the :doc:`bookkeeper` for more details on setting up bookkeeper cluster.
-
-Create Namespace
-----------------
-
-After setting up a zookeeper cluster and a bookkeeper cluster, you could provision DL namespaces
-for applications to use.
-
-Provisioning a DistributedLog namespace is accomplished via the `bind` command available in `dlog tool`.
-
-Namespace is bound by writing bookkeeper environment settings (e.g. the ledger path, bkLedgersZkPath,
-or the set of Zookeeper servers used by bookkeeper, bkZkServers) as metadata in the zookeeper path of
-the namespace DL URI. The DL library resolves the DL URI to determine which bookkeeper cluster it
-should read and write to. 
-
-The namespace binding has following features:
-
-- `Inheritance`: suppose `distributedlog://<zkservers>/messaging/distributedlog` is bound to bookkeeper
-  cluster `X`. All the streams created under `distributedlog://<zkservers>/messaging/distributedlog`,
-  will write to bookkeeper cluster `X`.
-- `Override`: suppose `distributedlog://<zkservers>/messaging/distributedlog` is bound to bookkeeper
-  cluster `X`. You want streams under `distributedlog://<zkservers>/messaging/distributedlog/S` write
-  to bookkeeper cluster `Y`. You could just bind `distributedlog://<zkservers>/messaging/distributedlog/S`
-  to bookkeeper cluster `Y`. The binding to `distributedlog://<zkservers>/messaging/distributedlog/S`
-  only affects streams under `distributedlog://<zkservers>/messaging/distributedlog/S`.
-
-Create namespace binding using `dlog tool`. For example, we create a namespace
-`distributedlog://127.0.0.1:2181/messaging/distributedlog/mynamespace` pointing to the
-bookkeeper cluster we just created above.
-
-::
-    
-    $ distributedlog-service/bin/dlog admin bind \\
-        -dlzr 127.0.0.1:2181 \\
-        -dlzw 127.0.0.1:2181 \\
-        -s 127.0.0.1:2181 \\
-        -bkzr 127.0.0.1:2181 \\
-        -l /messaging/bookkeeper/ledgers \\
-        -i false \\
-        -r true \\
-        -c \\
-        distributedlog://127.0.0.1:2181/messaging/distributedlog/mynamespace
-
-    No bookkeeper is bound to distributedlog://127.0.0.1:2181/messaging/distributedlog/mynamespace
-    Created binding on distributedlog://127.0.0.1:2181/messaging/distributedlog/mynamespace.
-
-
-- Configure the zookeeper cluster used for storing DistributedLog metadata: `-dlzr` and `-dlzw`.
-  Ideally `-dlzr` and `-dlzw` would be same the zookeeper server in distributedlog namespace uri.
-  However to scale zookeeper reads, the zookeeper observers sometimes are added in a different
-  domain name than participants. In such case, configuring `-dlzr` and `-dlzw` to different
-  zookeeper domain names would help isolating zookeeper write and read traffic.
-- Configure the zookeeper cluster used by bookkeeper for storing the metadata : `-bkzr` and `-s`.
-  Similar as `-dlzr` and `-dlzw`, you could configure the namespace to use different zookeeper
-  domain names for readers and writers to access bookkeeper metadatadata.
-- Configure the bookkeeper ledgers path: `-l`.
-- Configure the zookeeper path to store DistributedLog metadata. It is implicitly included as part
-  of namespace URI.
-
-Write Proxy
------------
-
-A write proxy consists of multiple write proxies. They don't store any state locally. So they are
-mostly stateless and can be run as many as you can.
-
-Configuration
-+++++++++++++
-
-Different from bookkeeper, DistributedLog tries not to configure any environment related settings
-in configuration files. Any environment related settings are stored and configured via `namespace binding`.
-The configuration file should contain non-environment related settings.
-
-There is a `write_proxy.conf` template file available under `distributedlog-service` module.
-
-Run write proxy
-+++++++++++++++
-
-A write proxy could be started using `dlog-daemon.sh` script under `distributedlog-service`.
-
-::
-    
-    WP_SHARD_ID=${WP_SHARD_ID} WP_SERVICE_PORT=${WP_SERVICE_PORT} WP_STATS_PORT=${WP_STATS_PORT} ./distributedlog-service/bin/dlog-daemon.sh start writeproxy
-
-- `WP_SHARD_ID`: A non-negative integer. You don't need to guarantee uniqueness of shard id, as it is just an
-  indicator to the client for routing the requests. If you are running the `write proxy` using a cluster scheduler
-  like `aurora`, you could easily obtain a shard id and use that to configure `WP_SHARD_ID`.
-- `WP_SERVICE_PORT`: The port that write proxy listens on.
-- `WP_STATS_PORT`: The port that write proxy exposes stats to a http endpoint.
-
-Please check `distributedlog-service/conf/dlogenv.sh` for more environment variables on configuring write proxy.
-
-- `WP_CONF_FILE`: The path to the write proxy configuration file.
-- `WP_NAMESPACE`: The distributedlog namespace that the write proxy is serving for.
-
-For example, we start 3 write proxies locally and point to the namespace created above.
-
-::
-    
-    $ WP_SHARD_ID=1 WP_SERVICE_PORT=4181 WP_STATS_PORT=20001 ./distributedlog-service/bin/dlog-daemon.sh start writeproxy
-    $ WP_SHARD_ID=2 WP_SERVICE_PORT=4182 WP_STATS_PORT=20002 ./distributedlog-service/bin/dlog-daemon.sh start writeproxy
-    $ WP_SHARD_ID=3 WP_SERVICE_PORT=4183 WP_STATS_PORT=20003 ./distributedlog-service/bin/dlog-daemon.sh start writeproxy
-
-The write proxy will announce itself to the zookeeper path `.write_proxy` under the dl namespace path.
-
-We could verify that the write proxy is running correctly by checking the zookeeper path or checking its stats port.
-
-::
-    
-    $ ./distributedlog-service/bin/dlog zkshell localhost:2181
-    Connecting to localhost:2181
-    Welcome to ZooKeeper!
-    JLine support is enabled
-
-    WATCHER::
-
-    WatchedEvent state:SyncConnected type:None path:null
-    [zk: localhost:2181(CONNECTED) 0] ls /messaging/distributedlog/mynamespace/.write_proxy
-    [member_0000000000, member_0000000001, member_0000000002]
-
-
-::
-    
-    $ curl localhost:20001/ping
-    pong
-
-
-Add and Remove Write Proxies
-++++++++++++++++++++++++++++
-
-Removing a write proxy is pretty straightforward by just killing the process.
-
-::
-    
-    WP_SHARD_ID=1 WP_SERVICE_PORT=4181 WP_STATS_PORT=10001 ./distributedlog-service/bin/dlog-daemon.sh stop writeproxy
-
-
-Adding a new write proxy is just adding a new host and starting the write proxy
-process as described above.
-
-Write Proxy Naming
-++++++++++++++++++
-
-The `dlog-daemon.sh` script starts the write proxy by announcing it to the `.write_proxy` path under
-the dl namespace. So you could use uri in the distributedlog client builder to access the write proxy cluster.
-
-Verify the setup
-++++++++++++++++
-
-You could verify the write proxy cluster by running tutorials over the setup cluster.
-
-Create 10 streams.
-
-::
-    
-    $ ./distributedlog-service/bin/dlog tool create -u distributedlog://127.0.0.1:2181/messaging/distributedlog/mynamespace -r stream- -e 0-10
-    You are going to create streams : [stream-0, stream-1, stream-2, stream-3, stream-4, stream-5, stream-6, stream-7, stream-8, stream-9, stream-10] (Y or N) Y
-
-
-Tail read from the 10 streams.
-
-::
-    
-    $ ./distributedlog-tutorials/distributedlog-basic/bin/runner run com.twitter.distributedlog.basic.MultiReader distributedlog://127.0.0.1:2181/messaging/distributedlog/mynamespace stream-0,stream-1,stream-2,stream-3,stream-4,stream-5,stream-6,stream-7,stream-8,stream-9,stream-10
-
-
-Run record generator over some streams
-
-::
-    
-    $ ./distributedlog-tutorials/distributedlog-basic/bin/runner run com.twitter.distributedlog.basic.RecordGenerator 'zk!127.0.0.1:2181!/messaging/distributedlog/mynamespace/.write_proxy' stream-0 100
-    $ ./distributedlog-tutorials/distributedlog-basic/bin/runner run com.twitter.distributedlog.basic.RecordGenerator 'zk!127.0.0.1:2181!/messaging/distributedlog/mynamespace/.write_proxy' stream-1 100
-
-
-Check the terminal running `MultiReader`. You will see similar output as below:
-
-::
-    
-    """
-    Received record DLSN{logSegmentSequenceNo=1, entryId=21044, slotId=0} from stream stream-0
-    """
-    record-1464085079105
-    """
-    Received record DLSN{logSegmentSequenceNo=1, entryId=21046, slotId=0} from stream stream-0
-    """
-    record-1464085079113
-    """
-    Received record DLSN{logSegmentSequenceNo=1, entryId=9636, slotId=0} from stream stream-1
-    """
-    record-1464085079110
-    """
-    Received record DLSN{logSegmentSequenceNo=1, entryId=21048, slotId=0} from stream stream-0
-    """
-    record-1464085079125
-    """
-    Received record DLSN{logSegmentSequenceNo=1, entryId=9638, slotId=0} from stream stream-1
-    """
-    record-1464085079121
-    """
-    Received record DLSN{logSegmentSequenceNo=1, entryId=21050, slotId=0} from stream stream-0
-    """
-    record-1464085079133
-    """
-    Received record DLSN{logSegmentSequenceNo=1, entryId=9640, slotId=0} from stream stream-1
-    """
-    record-1464085079130
-    """
-
-
-
-Please refer to the :doc:`performance` for more details on tuning performance.

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/docker.rst
----------------------------------------------------------------------
diff --git a/docs/operations/docker.rst b/docs/operations/docker.rst
deleted file mode 100644
index c9bd921..0000000
--- a/docs/operations/docker.rst
+++ /dev/null
@@ -1,36 +0,0 @@
-Deploying distributed log using docker
-======================================
-
-Prerequesites:
-------------
-1. Docker
-
-Steps:
-------
-1. Create a snapshot using
-
-```
-./scripts/snapshot
-```
-
-2. Create your own docker image using 
-
-```
-docker build -t <your image name> .
-```
-
-3. You can run the docker container using
-
-```
-docker run -e ZK_SERVERS=<zk server list> -e DEPLOY_BK=<true|false> -e DEPLOY_WP=<true|false> <your image name>
-```
-
-Environment variables
-----------------------
-
-Following are the environment variables which can change how the docker container runs.
-
-1. ZK_SERVERS: ZK servers running exernally (the container does not run a zookeeper)
-2. DEPLOY_BOTH: Deploys writeproxies as well as the bookies
-3. DEPLOY_WP: Flag to notify that a writeproxy needs to be deployed
-4. DEPLOY_BK: Flag to notify that a bookie needs to be deployed

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/hardware.rst
----------------------------------------------------------------------
diff --git a/docs/operations/hardware.rst b/docs/operations/hardware.rst
deleted file mode 100644
index b36b1c8..0000000
--- a/docs/operations/hardware.rst
+++ /dev/null
@@ -1,120 +0,0 @@
-Hardware
-========
-
-Figure 1 describes the data flow of DistributedLog. Write traffic comes to `Write Proxy`
-and the data is replicated in `RF` (replication factor) ways to `BookKeeper`. BookKeeper
-stores the replicated data and keeps the data for a given retention period. The data is
-read by `Read Proxy` and fanout to readers.
-
-In such layered architecture, each layer has its own responsibilities and different resource
-requirements. It makes the capacity and cost model much clear and users could scale
-different layers independently.
-
-.. figure:: ../images/costmodel.png
-   :align: center
-
-   Figure 1. DistributedLog Cost Model
-
-Metrics
-~~~~~~~
-
-There are different metrics measuring the capability of a service instance in each layer
-(e.g a `write proxy` node, a `bookie` storage node, a `read proxy` node and such). These metrics
-can be `rps` (requests per second), `bps` (bits per second), `number of streams` that a instance
-can support, and latency requirements. `bps` is the best and simple factor on measuring the
-capability of current distributedlog architecture.
-
-Write Proxy
-~~~~~~~~~~~
-
-Write Proxy (WP) is a stateless serving service that writes and replicates fan-in traffic into BookKeeper.
-The capability of a write proxy instance is purely dominated by the *OUTBOUND* network bandwidth,
-which is reflected as incoming `Write Throughput` and `Replication Factor`.
-
-Calculating the capacity of Write Proxy (number of instances of write proxies) is pretty straightforward.
-The formula is listed as below.
-
-::
-
-    Number of Write Proxies = (Write Throughput) * (Replication Factor) / (Write Proxy Outbound Bandwidth)
-
-As it is bandwidth bound, we'd recommend using machines that have high network bandwith (e.g 10Gb NIC).
-
-The cost estimation is also straightforward.
-
-::
-
-    Bandwidth TCO ($/day/MB) = (Write Proxy TCO) / (Write Proxy Outbound Bandwidth)
-    Cost of write proxies = (Write Throughput) * (Replication Factor) / (Bandwidth TCO)
-
-CPUs
-^^^^
-
-DistributedLog is not CPU bound. You can run an instance with 8 or 12 cores just fine.
-
-Memories
-^^^^^^^^
-
-There's a fair bit of caching. Consider running with at least 8GB of memory.
-
-Disks
-^^^^^
-
-This is a stateless process, disk performances are not relevant.
-
-Network
-^^^^^^^
-
-Depending on your throughput, you might be better off running this with 10Gb NIC. In this scenario, you can easily achieves 350MBps of writes.
-
-
-BookKeeper
-~~~~~~~~~~
-
-BookKeeper is the log segment store, which is a stateful service. There are two factors to measure the
-capability of a Bookie instance: `bandwidth` and `storage`. The bandwidth is majorly dominated by the
-outbound traffic from write proxy, which is `(Write Throughput) * (Replication Factor)`. The storage is
-majorly dominated by the traffic and also `Retention Period`.
-
-Calculating the capacity of BookKeeper (number of instances of bookies) is a bit more complicated than Write
-Proxy. The total number of instances is the maximum number of the instances of bookies calculated using
-`bandwidth` and `storage`.
-
-::
-
-    Number of bookies based on bandwidth = (Write Throughput) * (Replication Factor) / (Bookie Inbound Bandwidth)
-    Number of bookies based on storage = (Write Throughput) * (Replication Factor) * (Replication Factor) / (Bookie disk space)
-    Number of bookies = maximum((number of bookies based on bandwidth), (number of bookies based on storage))
-
-We should consider both bandwidth and storage when choosing the hardware for bookies. There are several rules to follow:
-- A bookie should have multiple disks.
-- The number of disks used as journal disks should have similar I/O bandwidth as its *INBOUND* network bandwidth. For example, if you plan to use a disk for journal which I/O bandwidth is around 100MBps, a 1Gb NIC is a better choice than 10Gb NIC.
-- The number of disks used as ledger disks should be large enough to hold data if retention period is typical long.
-
-The cost estimation is straightforward based on the number of bookies estimated above.
-
-::
-
-    Cost of bookies = (Number of bookies) * (Bookie TCO)
-
-Read Proxy
-~~~~~~~~~~
-
-Similar as Write Proxy, Read Proxy is also dominated by *OUTBOUND* bandwidth, which is reflected as incoming `Write Throughput` and `Fanout Factor`.
-
-Calculating the capacity of Read Proxy (number of instances of read proxies) is also pretty straightforward.
-The formula is listed as below.
-
-::
-
-    Number of Read Proxies = (Write Throughput) * (Fanout Factor) / (Read Proxy Outbound Bandwidth)
-
-As it is bandwidth bound, we'd recommend using machines that have high network bandwith (e.g 10Gb NIC).
-
-The cost estimation is also straightforward.
-
-::
-
-    Bandwidth TCO ($/day/MB) = (Read Proxy TCO) / (Read Proxy Outbound Bandwidth)
-    Cost of read proxies = (Write Throughput) * (Fanout Factor) / (Bandwidth TCO)
-

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/main.rst
----------------------------------------------------------------------
diff --git a/docs/operations/main.rst b/docs/operations/main.rst
deleted file mode 100644
index 6eb2a96..0000000
--- a/docs/operations/main.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-Deployment & Administration
-===========================
-
-.. toctree::
-   :maxdepth: 1
-
-   deployment
-   operations
-   performance
-   hardware
-   monitoring
-   zookeeper
-   bookkeeper

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/monitoring.rst
----------------------------------------------------------------------
diff --git a/docs/operations/monitoring.rst b/docs/operations/monitoring.rst
deleted file mode 100644
index d01caf9..0000000
--- a/docs/operations/monitoring.rst
+++ /dev/null
@@ -1,378 +0,0 @@
-Monitoring
-==========
-
-DistributedLog uses the stats library provided by Apache BookKeeper for reporting metrics in
-both the server and the client. This can be configured to report stats using pluggable stats
-provider to integrate with your monitoring system.
-
-Stats Provider
-~~~~~~~~~~~~~~
-
-`StatsProvider` is a provider that provides different kinds of stats logger for different scopes.
-The provider is also responsible for reporting its managed metrics.
-
-::
-
-    // Create the stats provider
-    StatsProvider statsProvider = ...;
-    // Start the stats provider
-    statsProvider.start(conf);
-    // Stop the stats provider
-    statsProvider.stop();
-
-Stats Logger
-____________
-
-A scoped `StatsLogger` is a stats logger that records 3 kinds of statistics
-under a given `scope`.
-
-A `StatsLogger` could be either created by obtaining from stats provider with
-the scope name:
-
-::
-
-    StatsProvider statsProvider = ...;
-    StatsLogger statsLogger = statsProvider.scope("test-scope");
-
-Or created by obtaining from a stats logger with a sub scope name:
-
-::
-
-    StatsLogger rootStatsLogger = ...;
-    StatsLogger subStatsLogger = rootStatsLogger.scope("sub-scope");
-
-All the metrics in a stats provider are managed in a hierarchical of scopes.
-
-::
-
-    // all stats recorded by `rootStatsLogger` are under 'root'
-    StatsLogger rootStatsLogger = statsProvider.scope("root");
-    // all stats recorded by 'subStatsLogger1` are under 'root/scope1'
-    StatsLogger subStatsLogger1 = statsProvider.scope("scope1");
-    // all stats recorded by 'subStatsLogger2` are under 'root/scope2'
-    StatsLogger subStatsLogger2 = statsProvider.scope("scope2");
-
-Counters
-++++++++
-
-A `Counter` is a cumulative metric that represents a single numerical value. A **counter**
-is typically used to count requests served, tasks completed, errors occurred, etc. Counters
-should not be used to expose current counts of items whose number can also go down, e.g.
-the number of currently running tasks. Use `Gauges` for this use case.
-
-To change a counter, use:
-
-::
-    
-    StatsLogger statsLogger = ...;
-    Counter births = statsLogger.getCounter("births");
-    // increment the counter
-    births.inc();
-    // decrement the counter
-    births.dec();
-    // change the counter by delta
-    births.add(-10);
-    // reset the counter
-    births.reset();
-
-Gauges
-++++++
-
-A `Gauge` is a metric that represents a single numerical value that can arbitrarily go up and down.
-
-Gauges are typically used for measured values like temperatures or current memory usage, but also
-"counts" that can go up and down, like the number of running tasks.
-
-To define a gauge, stick the following code somewhere in the initialization:
-
-::
-
-    final AtomicLong numPendingRequests = new AtomicLong(0L);
-    StatsLogger statsLogger = ...;
-    statsLogger.registerGauge(
-        "num_pending_requests",
-        new Gauge<Number>() {
-            @Override
-            public Number getDefaultValue() {
-                return 0;
-            }
-            @Override
-            public Number getSample() {
-                return numPendingRequests.get();
-            }
-        });
-
-The gauge must always return a numerical value when sampling.
-
-Metrics (OpStats)
-+++++++++++++++++
-
-A `OpStats` is a set of metrics that represents the statistics of an `operation`. Those metrics
-include `success` or `failure` of the operations and its distribution (also known as `Histogram`).
-It is usually used for timing.
-
-::
-
-    StatsLogger statsLogger = ...;
-    OpStatsLogger writeStats = statsLogger.getOpStatsLogger("writes");
-    long writeLatency = ...;
-
-    // register success op
-    writeStats.registerSuccessfulEvent(writeLatency);
-
-    // register failure op
-    writeStats.registerFailedEvent(writeLatency);
-
-Available Stats Providers
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-All the available stats providers are listed as below:
-
-* Twitter Science Stats (deprecated)
-* Twitter Ostrich Stats (deprecated)
-* Twitter Finagle Stats
-* Codahale Stats
-
-Twitter Science Stats
-_____________________
-
-Use following dependency to enable Twitter science stats provider.
-
-::
-
-   <dependency>
-     <groupId>org.apache.bookkeeper.stats</groupId>
-     <artifactId>twitter-science-provider</artifactId>
-     <version>${bookkeeper.version}</version>
-   </dependency>
-
-Construct the stats provider for clients.
-
-::
-
-    StatsProvider statsProvider = new TwitterStatsProvider();
-    DistributedLogConfiguration conf = ...;
-
-    // starts the stats provider (optional)
-    statsProvider.start(conf);
-
-    // all the dl related stats are exposed under "dlog"
-    StatsLogger statsLogger = statsProvider.getStatsLogger("dlog");
-    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder()
-        .uri(...)
-        .conf(conf)
-        .statsLogger(statsLogger)
-        .build();
-
-    ...
-
-    // stop the stats provider (optional)
-    statsProvider.stop();
-
-
-Expose the stats collected by the stats provider by configuring following settings:
-
-::
-
-    // enable exporting the stats
-    statsExport=true
-    // exporting the stats at port 8080
-    statsHttpPort=8080
-
-
-If exporting stats is enabled, all the stats are exported by the http endpoint.
-You could curl the http endpoint to check the stats.
-
-::
-
-    curl -s <host>:8080/vars
-
-
-check ScienceStats_ for more details.
-
-.. _ScienceStats: https://github.com/twitter/commons/tree/master/src/java/com/twitter/common/stats
-
-Twitter Ostrich Stats
-_____________________
-
-Use following dependency to enable Twitter ostrich stats provider.
-
-::
-
-   <dependency>
-     <groupId>org.apache.bookkeeper.stats</groupId>
-     <artifactId>twitter-ostrich-provider</artifactId>
-     <version>${bookkeeper.version}</version>
-   </dependency>
-
-Construct the stats provider for clients.
-
-::
-
-    StatsProvider statsProvider = new TwitterOstrichProvider();
-    DistributedLogConfiguration conf = ...;
-
-    // starts the stats provider (optional)
-    statsProvider.start(conf);
-
-    // all the dl related stats are exposed under "dlog"
-    StatsLogger statsLogger = statsProvider.getStatsLogger("dlog");
-    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder()
-        .uri(...)
-        .conf(conf)
-        .statsLogger(statsLogger)
-        .build();
-
-    ...
-
-    // stop the stats provider (optional)
-    statsProvider.stop();
-
-
-Expose the stats collected by the stats provider by configuring following settings:
-
-::
-
-    // enable exporting the stats
-    statsExport=true
-    // exporting the stats at port 8080
-    statsHttpPort=8080
-
-
-If exporting stats is enabled, all the stats are exported by the http endpoint.
-You could curl the http endpoint to check the stats.
-
-::
-
-    curl -s <host>:8080/stats.txt
-
-
-check Ostrich_ for more details.
-
-.. _Ostrich: https://github.com/twitter/ostrich
-
-Twitter Finagle Metrics
-_______________________
-
-Use following dependency to enable bridging finagle stats receiver to bookkeeper's stats provider.
-All the stats exposed by the stats provider will be collected by finagle stats receiver and exposed
-by Twitter's admin service.
-
-::
-
-   <dependency>
-     <groupId>org.apache.bookkeeper.stats</groupId>
-     <artifactId>twitter-finagle-provider</artifactId>
-     <version>${bookkeeper.version}</version>
-   </dependency>
-
-Construct the stats provider for clients.
-
-::
-
-    StatsReceiver statsReceiver = ...; // finagle stats receiver
-    StatsProvider statsProvider = new FinagleStatsProvider(statsReceiver);
-    DistributedLogConfiguration conf = ...;
-
-    // the stats provider does nothing on start.
-    statsProvider.start(conf);
-
-    // all the dl related stats are exposed under "dlog"
-    StatsLogger statsLogger = statsProvider.getStatsLogger("dlog");
-    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder()
-        .uri(...)
-        .conf(conf)
-        .statsLogger(statsLogger)
-        .build();
-
-    ...
-
-    // the stats provider does nothing on stop.
-    statsProvider.stop();
-
-
-check `finagle metrics library`__ for more details on how to expose the stats.
-
-.. _TwitterServer: https://twitter.github.io/twitter-server/Migration.html
-
-__ TwitterServer_
-
-Codahale Metrics
-________________
-
-Use following dependency to enable Twitter ostrich stats provider.
-
-::
-
-   <dependency>
-     <groupId>org.apache.bookkeeper.stats</groupId>
-     <artifactId>codahale-metrics-provider</artifactId>
-     <version>${bookkeeper.version}</version>
-   </dependency>
-
-Construct the stats provider for clients.
-
-::
-
-    StatsProvider statsProvider = new CodahaleMetricsProvider();
-    DistributedLogConfiguration conf = ...;
-
-    // starts the stats provider (optional)
-    statsProvider.start(conf);
-
-    // all the dl related stats are exposed under "dlog"
-    StatsLogger statsLogger = statsProvider.getStatsLogger("dlog");
-    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder()
-        .uri(...)
-        .conf(conf)
-        .statsLogger(statsLogger)
-        .build();
-
-    ...
-
-    // stop the stats provider (optional)
-    statsProvider.stop();
-
-
-Expose the stats collected by the stats provider in different ways by configuring following settings.
-Check Codehale_ on how to configuring report endpoints.
-
-::
-
-    // How frequent report the stats
-    codahaleStatsOutputFrequencySeconds=...
-    // The prefix string of codahale stats
-    codahaleStatsPrefix=...
-
-    //
-    // Report Endpoints
-    //
-
-    // expose the stats to Graphite
-    codahaleStatsGraphiteEndpoint=...
-    // expose the stats to CSV files
-    codahaleStatsCSVEndpoint=...
-    // expose the stats to Slf4j logging
-    codahaleStatsSlf4jEndpoint=...
-    // expose the stats to JMX endpoint
-    codahaleStatsJmxEndpoint=...
-
-
-check Codehale_ for more details.
-
-.. _Codehale: https://dropwizard.github.io/metrics/3.1.0/
-
-Enable Stats Provider on Bookie Servers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The stats provider used by *Bookie Servers* is configured by setting the following option.
-
-::
-
-    // class of stats provider
-    statsProviderClass="org.apache.bookkeeper.stats.CodahaleMetricsProvider"
-
-Metrics
-~~~~~~~
-
-Check the :doc:`../references/metrics` reference page for the metrics exposed by DistributedLog.

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/operations.rst
----------------------------------------------------------------------
diff --git a/docs/operations/operations.rst b/docs/operations/operations.rst
deleted file mode 100644
index 6a8061e..0000000
--- a/docs/operations/operations.rst
+++ /dev/null
@@ -1,204 +0,0 @@
-DistributedLog Operations
-=========================
-
-Feature Provider
-~~~~~~~~~~~~~~~~
-
-DistributedLog uses a `feature-provider` library provided by Apache BookKeeper for managing features
-dynamically at runtime. It is a feature-flag_ system used to proportionally control what features
-are enabled for the system. In other words, it is a way of altering the control in a system without
-restarting it. It can be used during all stages of development, its most visible use case is on
-production. For instance, during a production release, you can enable or disable individual features,
-control the data flow through the system, thereby minimizing risk of system failure in real time.
-
-.. _feature-flag: https://en.wikipedia.org/wiki/Feature_toggle
-
-This `feature-provider` interface is pluggable and easy to integrate with any configuration management
-system.
-
-API
-___
-
-`FeatureProvider` is a provider that manages features under different scopes. The provider is responsible
-for loading features dynamically at runtime. A `Feature` is a numeric flag that control how much percentage
-of this feature will be available to the system - the number is called `availability`.
-
-::
-
-    Feature.name() => returns the name of this feature
-    Feature.availability() => returns the availability of this feature
-    Feature.isAvailable() => returns true if its availability is larger than 0; otherwise false
-
-
-It is easy to obtain a feature from the provider by just providing a feature name.
-
-::
-
-    FeatureProvider provider = ...;
-    Feature feature = provider.getFeature("feature1"); // returns the feature named 'feature1'
-
-    
-The `FeatureProvider` is scopable to allow creating features in a hierarchical way. For example, if a system
-is comprised of two subsystems, one is *cache*, while the other one is *storage*. so the features belong to
-different subsystems can be created under different scopes.
-
-::
-
-    FeatureProvider provider = ...;
-    FeatureProvider cacheFeatureProvider = provider.scope("cache");
-    FeatureProvider storageFeatureProvider = provider.scope("storage");
-    Feature writeThroughFeature = cacheFeatureProvider.getFeature("write_through");
-    Feature duralWriteFeature = storageFeatureProvider.getFeature("dural_write");
-
-    // so the available features under `provider` are: (assume scopes are separated by '.')
-    // - 'cache.write_through'
-    // - 'storage.dural_write'
-
-
-The feature provider could be passed to `DistributedLogNamespaceBuilder` when building the namespace,
-thereby it would be used for controlling the features exposed under `DistributedLogNamespace`.
-
-::
-
-    FeatureProvider rootProvider = ...;
-    FeatureProvider dlFeatureProvider = rootProvider.scope("dlog");
-    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder()
-        .uri(uri)
-        .conf(conf)
-        .featureProvider(dlFeatureProvider)
-        .build();
-
-
-The feature provider is loaded by reflection on distributedlog write proxy server. You could specify
-the feature provider class name as below. Otherwise it would use `DefaultFeatureProvider`, which disables
-all the features by default.
-
-::
-
-    featureProviderClass=com.twitter.distributedlog.feature.DynamicConfigurationFeatureProvider
-
-
-
-Configuration Based Feature Provider
-____________________________________
-
-Beside `DefaultFeatureProvider`, distributedlog also provides a file-based feature provider - it loads
-the features from properties files.
-
-All the features and their availabilities are configured in properties file format. For example,
-
-::
-
-    cache.write_through=100
-    storage.dural_write=0
-
-
-You could configure `featureProviderClass` in distributedlog configuration file by setting it to
-`com.twitter.distributedlog.feature.DynamicConfigurationFeatureProvider` to enable file-based feature
-provider. The feature provider will load the features from two files, one is base config file configured
-by `fileFeatureProviderBaseConfigPath`, while the other one is overlay config file configured by
-`fileFeatureProviderOverlayConfigPath`. Current implementation doesn't differentiate these two files
-too much other than the `overlay` config will override the settings in `base` config. It is recommended
-to have a base config file for storing the default availability values for your system and dynamically
-adjust the availability values in overlay config file.
-
-::
-
-    featureProviderClass=com.twitter.distributedlog.feature.DynamicConfigurationFeatureProvider
-    fileFeatureProviderBaseConfigPath=/path/to/base/config
-    fileFeatureProviderOverlayConfigPath=/path/to/overlay/config
-    // how frequent we reload the config files
-    dynamicConfigReloadIntervalSec=60
-
-
-Available Features
-__________________
-
-Check the :doc:`../references/features` reference page for the features exposed by DistributedLog.
-
-`dlog`
-~~~~~~
-
-A CLI is provided for inspecting DistributedLog streams and metadata.
-
-.. code:: bash
-
-   dlog
-   JMX enabled by default
-   Usage: dlog <command>
-   where command is one of:
-       local               Run distributedlog sandbox
-       example             Run distributedlog example
-       tool                Run distributedlog tool
-       proxy_tool          Run distributedlog proxy tool to interact with proxies
-       balancer            Run distributedlog balancer
-       admin               Run distributedlog admin tool
-       help                This help message
-
-   or command is the full name of a class with a defined main() method.
-
-   Environment variables:
-       DLOG_LOG_CONF        Log4j configuration file (default $HOME/src/distributedlog/distributedlog-service/conf/log4j.properties)
-       DLOG_EXTRA_OPTS      Extra options to be passed to the jvm
-       DLOG_EXTRA_CLASSPATH Add extra paths to the dlog classpath
-
-These variable can also be set in conf/dlogenv.sh
-
-Create a stream
-_______________
-
-To create a stream:
-
-.. code:: bash
-
-   dlog tool create -u <DL URI> -r <STREAM PREFIX> -e <STREAM EXPRESSION>
-
-
-List the streams
-________________
-
-To list all the streams under a given DistributedLog namespace:
-
-.. code:: bash
-
-   dlog tool list -u <DL URI>
-
-Show stream's information
-_________________________
-
-To view the metadata associated with a stream:
-
-.. code:: bash
-
-   dlog tool show -u <DL URI> -s <STREAM NAME>
-
-
-Dump a stream
-_____________
-
-To dump the items inside a stream:
-
-.. code:: bash
-
-   dlog tool dump -u <DL URI> -s <STREAM NAME> -o <START TXN ID> -l <NUM RECORDS>
-
-Delete a stream
-_______________
-
-To delete a stream, run:
-
-.. code:: bash
-
-   dlog tool delete -u <DL URI> -s <STREAM NAME>
-
-
-Truncate a stream
-_________________
-
-Truncate the streams under a given DistributedLog namespace. You could specify a filter to match the streams that you want to truncate.
-
-There is a difference between the ``truncate`` and ``delete`` command. When you issue a ``truncate``, the data will be purge without removing the streams. A ``delete`` will delete the stream. You can pass the flag ``-delete`` to the ``truncate`` command to also delete the streams.
-
-.. code:: bash
-
-   dlog tool truncate -u <DL URI>

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/performance.rst
----------------------------------------------------------------------
diff --git a/docs/operations/performance.rst b/docs/operations/performance.rst
deleted file mode 100644
index caac8ad..0000000
--- a/docs/operations/performance.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Performance Tuning
-==================
-
-(describe how to tune performance, critical settings)

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/vagrant.rst
----------------------------------------------------------------------
diff --git a/docs/operations/vagrant.rst b/docs/operations/vagrant.rst
deleted file mode 100644
index 70e6fb0..0000000
--- a/docs/operations/vagrant.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-Sample Deployment using vagrant
-================================
-
-This file explains vagrant deployment.
-
-Prerequesites
---------------
-1. Vagrant: From https://www.vagrantup.com/downloads.html
-2. vagrant-hostmanager plugin: From https://github.com/devopsgroup-io/vagrant-hostmanager
-
-Steps
------
-1. Create a snapshot using ./scripts/snapshot
-2. Run vagrant up from the root directory of the enlistment
-3. Vagrant brings up a zookeepers with machine names: zk1,zk2, .... with IP addresses 192.168.50.11,192.168.50.12,....
-4. Vagrant brings the bookies with machine names: node1,node2, ... with IP addresses 192.168.50.51,192.168.50.52,....
-5. The script will also start writeproxies at distributedlog://$PUBLIC_ZOOKEEPER_ADDRESSES/messaging/distributedlog/mynamespace as the namespace. If you want it to point to a different namespace/port/shard, please update the ./vagrant/bk.sh script. 
-6. If you want to run the client on the host machine, please add these node names and their IP addresses to the /etc/hosts file on the host.

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/operations/zookeeper.rst
----------------------------------------------------------------------
diff --git a/docs/operations/zookeeper.rst b/docs/operations/zookeeper.rst
deleted file mode 100644
index a0d65a5..0000000
--- a/docs/operations/zookeeper.rst
+++ /dev/null
@@ -1,88 +0,0 @@
-ZooKeeper
-=========
-
-To run a DistributedLog ensemble, you'll need a set of Zookeeper
-nodes. There is no constraints on the number of Zookeeper nodes you
-need. One node is enough to run your cluster, but for reliability
-purpose, you should run at least 3 nodes.
-
-Version
--------
-
-DistributedLog leverages zookeepr `multi` operations for metadata updates.
-So the minimum version of zookeeper is 3.4.*. We recommend to run stable
-zookeeper version `3.4.8`.
-
-Run ZooKeeper from distributedlog source
-----------------------------------------
-
-Since `zookeeper` is one of the dependency of `distributedlog-service`. You could simply
-run `zookeeper` servers using same set of scripts provided in `distributedlog-service`.
-In the following sections, we will describe how to run zookeeper using the scripts provided
-in `distributedlog-service`.
-
-Build
-+++++
-
-First of all, build DistributedLog:
-
-.. code-block:: bash
-
-    $ mvn clean install -DskipTests
-
-Configuration
-+++++++++++++
-
-The configuration file `zookeeper.conf.template` under `distributedlog-service/conf` is a template of
-production configuration to run a zookeeper node. Most of the configuration settings are good for
-production usage. You might need to configure following settings according to your environment and
-hardware platform.
-
-Ensemble
-^^^^^^^^
-
-You need to configure the zookeeper servers form this ensemble as below:
-
-::
-    
-    server.1=127.0.0.1:2710:3710:participant;0.0.0.0:2181
-
-
-Please check zookeeper_ website for more configurations.
-
-Disks
-^^^^^
-
-You need to configure following settings according to the disk layout of your hardware.
-It is recommended to put `dataLogDir` under a separated disk from others for performance.
-
-::
-    
-    # the directory where the snapshot is stored.
-    dataDir=/tmp/data/zookeeper
-    
-    # where txlog  are written
-    dataLogDir=/tmp/data/zookeeper/txlog
-
-
-Run
-+++
-
-As `zookeeper` is shipped as part of `distributedlog-service`, you could use the `dlog-daemon.sh`
-script to start `zookeeper` as daemon thread.
-
-Start the zookeeper:
-
-.. code-block:: bash
-
-    $ ./distributedlog-service/bin/dlog-daemon.sh start zookeeper /path/to/zookeeper.conf
-
-Stop the zookeeper:
-
-.. code-block:: bash
-
-    $ ./distributedlog-service/bin/dlog-daemon.sh stop zookeeper
-
-Please check zookeeper_ website for more details.
-
-.. _zookeeper: http://zookeeper.apache.org/

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/performance/main.rst
----------------------------------------------------------------------
diff --git a/docs/performance/main.rst b/docs/performance/main.rst
index 59820c2..6dd70c7 100644
--- a/docs/performance/main.rst
+++ b/docs/performance/main.rst
@@ -1,3 +1,7 @@
+---
+layout: default
+---
+
 Performance
 ===========
 

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/references/configuration.rst
----------------------------------------------------------------------
diff --git a/docs/references/configuration.rst b/docs/references/configuration.rst
deleted file mode 100644
index 53f684d..0000000
--- a/docs/references/configuration.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-Configuration Settings
-======================

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/references/features.rst
----------------------------------------------------------------------
diff --git a/docs/references/features.rst b/docs/references/features.rst
deleted file mode 100644
index 45c92d8..0000000
--- a/docs/references/features.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-Features
-========
-
-BookKeeper Features
--------------------
-
-*<scope>* is the scope value of the FeatureProvider passed to BookKeeperClient builder. in DistributedLog write proxy, the *<scope>* is 'bkc'.
-
-- *<scope>.repp_disable_durability_enforcement*: Feature to disable durability enforcement on region aware data placement policy. It is a feature that applied for global replicated log only. If the availability value is larger than zero, the region aware data placement policy will *NOT* enfore region-wise durability. That says if a *Log* is writing to region A, B, C with write quorum size *15* and ack quorum size *9*. If the availability value of this feature is zero, it requires *9*
-  acknowledges from bookies from at least two regions. If the availability value of this feature is larger than zero, the enforcement is *disabled* and it could acknowledge after receiving *9* acknowledges from whatever regions. By default the availability is zero. Turning on this value to tolerant multiple region failures.
-
-- *<scope>.disable_ensemble_change*: Feature to disable ensemble change on DistributedLog writers. If the availability value of this feature is larger than zero, it would disable ensemble change on writers. It could be used for toleranting zookeeper outage.
-
-- *<scope>.<region>.disallow_bookie_placement*: Feature to disallow choosing a bookie replacement from a given *region* when ensemble changing. It is a feature that applied for global replicated log. If the availability value is larger than zero, the writer (write proxy) will stop choosing a bookie from *<region>* when ensemble changing. It is useful to blackout a region dynamically.
-
-DistributedLog Features
------------------------
-
-*<scope>* is the scope value of the FeatureProvider passed to DistributedLogNamespace builder. in DistributedLog write proxy, the *<scope>* is 'dl'.
-
-- *<scope>.disable_logsegment_rolling*: Feature to disable log segment rolling. If the availability value is larger than zero, the writer (write proxy) will stop rolling to new log segments and keep writing to current log segments. It is a useful feature to tolerant zookeeper outage.
-
-- *<scope>.disable_write_limit*: Feature to disable write limiting. If the availability value is larger than zero, the writer (write proxy) will disable write limiting. It is used to control write limiting dynamically.
-
-Write Proxy Features
---------------------
-
-- *region_stop_accept_new_stream*: Feature to disable accepting new streams in current region. It is a feature that applied for global replicated log only. If the availability value is larger than zero, the write proxies will stop accepting new streams and throw RegionAvailable exception to client. Hence client will know this region is stopping accepting new streams. Client will be forced to send requests to other regions. It is a feature used for ownership failover between regions.
-- *service_rate_limit_disabled*: Feature to disable service rate limiting. If the availability value is larger than zero, the write proxies will disable rate limiting.
-- *service_checksum_disabled*: Feature to disable service request checksum validation. If the availability value is larger than zero, the write proxies will disable request checksum validation.

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/b169db04/docs/references/main.rst
----------------------------------------------------------------------
diff --git a/docs/references/main.rst b/docs/references/main.rst
deleted file mode 100644
index 5b65d87..0000000
--- a/docs/references/main.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-References
-===========
-
-This page keeps references on configuration settings, metrics and features that exposed in DistributedLog.
-
-.. toctree::
-   :maxdepth: 2
-
-   configuration
-   metrics
-   features